text stringlengths 957 885k |
|---|
<gh_stars>0
#!/usr/bin/python3
import sys
sys.path.insert(0, '..') # make '..' first in the lib search path
from collections import Counter, defaultdict
import nltk
import spacy
from MiscUtils import loadNLTKCorpus, ProgressBar
# Create an empty overrides file before importing pyinflect.
# Normally the overrrides file loads when importing so it needs to be there but
# any overrides in will be used and so will mess up this script.
# Fix this issue by creating an empty file
overrides_fn = '../pyinflect/overrides.csv'
open(overrides_fn, 'w').close()
import pyinflect
# This script creates an overrides file that allows the system to overcome issues with
# the way Spacy lemmatizes words and invalid data in the AGID.
# The created file is a mapping from lemma/tag to the "best" inflection. Note that
# this only overrides methods where the treebank tag is used, not ones where the
# simplified AGID tag (V, N or A) is used.
# Note that if the AGID version is changed this script should be re-run. Additionally
# if Spacy changes their lemmatizer or if a different lemmatizer is used consider re-running
# this script.
if __name__ == '__main__':
# Configuration
#corp_fns = ['austen-emma.txt'] # 7,491 sentences
corp_fns = nltk.corpus.gutenberg.fileids() # 18 files with 94K sentences
max_chars = int(1e9)
req_count = 2 # require at least the many instances in corpus for an override
multiples_fn = '../CorpMultiInfls.txt.txt'
# Load Spacy
print('Loading Spacy model')
nlp = spacy.load('en_core_web_sm')
print('Using spaCy version ', spacy.__version__)
# Load the corpus to test with
print('Loading corpus')
sents = []
for corp_fn in corp_fns:
sents += loadNLTKCorpus(corp_fn, max_chars)
print('Loaded {:,} test sentences'.format(len(sents)))
print()
# Loop through the sentences and count the instances of (lemma, tag, corpus_word)
# corpus_word is considered the "correct" inflection for the lemma/tag
print('Processing sentences')
infl_ctr = Counter()
pb = ProgressBar(len(sents))
for i, sent in enumerate(sents):
doc = nlp(sent)
for word in doc:
if not word.tag_:
continue
# Skip "be" since it's an oddball case where the inflection can't be determiened
# from the Penn tag alone.
if word.lemma_.lower() == 'be':
continue
# Only inflect Nouns, Verbs, Adverbs and Adjectives (and not Particles)
ptype = word.tag_[0]
if ptype in ['N', 'V', 'R', 'J'] and word.tag_!='RP':
infl = word._.inflect(word.tag_)
# for now, don't add an override for anything not in the AGID, although that could
# be done to make up for misspellings, etc..
if infl is None:
continue
#infl = infl if infl is not None else ''
key = (word.lemma_.lower(), word.tag_, word.text.lower())
infl_ctr[key] += 1
pb.update(i)
pb.clear()
print('Completed. Loaded {:,} lemma/tag/infl keys'.format(len(infl_ctr)))
print()
# Now create a new dictionary that only uses the lemma and tag as the key
# and keeps a list of (corpus_word, count)
lemma_tag_dict = defaultdict(list)
for (lemma, tag, word), count in infl_ctr.items():
lemma_tag_dict[(lemma,tag)].append( (word, count) )
# Sort through the new dictionary and decide which is the correct word to use for the inflection.
# This is trivial when only one form exist but when there are multiple, choose the one
# with the hightest count (or alphabetical if the count is equal).
# Save a list of the entries with multiple words for info / debug.
print('Sorting through entries for overrides and multiple entries')
overrides_f = open(overrides_fn, 'w')
multiples_f = open(multiples_fn, 'w')
for (lemma, tag), mappings in sorted(lemma_tag_dict.items()):
assert mappings
if len(mappings) == 1:
best_infl_word = mappings[0][0] # mappings is a list of (word, count)
best_infl_count = mappings[0][1]
elif len(mappings) > 1:
# Choose the one with the highest count. If equal, choose alphabetically.
# Note that counts are rarely equal when using the entire corpus. This mostly occurs
# for mispellings that only show-up once and these will get filtered out by "req_count".
# So, we won't be too concerned if alphabetical isn't the perfect fall-back heuristic.
mappings = sorted(mappings, key=lambda x:x[0]) # sort alphabetically
mappings = sorted(mappings, key=lambda x:x[1], reverse=True) # sort highest count first
best_infl_word = mappings[0][0]
best_infl_count = mappings[0][1]
# Write out for info / debug
multiples_f.write(' %s/%s -> %s\n' % (lemma, tag, str(mappings)))
# Skip overrides for cases where there's only a few instances in the corpus
if best_infl_count < req_count:
continue
# Now that we know what we want the lemma/tag to inflect to, check with pyinflect to see
# what it's actually doing and if it's different, write an override.
infl_list = pyinflect.getInflection(lemma, tag)
infl = infl_list[0] if infl_list else '' # choose form 0, the default
if infl != best_infl_word:
overrides_f.write('%s,%s,%s\n' % (lemma, tag, best_infl_word))
multiples_f.close()
overrides_f.close()
print('Overrides file saved to: ', overrides_fn)
print('Multiple entries saved to: ', multiples_fn)
|
import os
import time
import numpy as np
import cupy as cp
class pyMatrix:
'''
Naive Python Implementation of Matrix, initialized with NumPy arrays. Contains basic Matrix Operations for
Performance Evaluations.
Members
------------
- arr : the matrix, initialized by a NumPy Array
- nrows : the number of rows (default: 50)
- ncols : the number of columns (default: 50)
'''
def __init__(self, arr=None, nrows=50, ncols=50):
'''
Parameters
-----------
- nrows : the number of rows (default: 50)
- ncols : the number of columns (default: 50)
- arr : optional entry for custom array (default: None)
'''
if arr is not None:
self.arr = arr.astype(np.float32)
self.nrows = arr.shape[0]
self.ncols = arr.shape[1]
else:
self.nrows = nrows
self.ncols = ncols
self.arr = np.zeros((nrows, ncols), dtype=np.float32)
def norm(self):
'''Naive Python implementation of Frobenius Norm'''
norm2 = 0.
for i in range(self.nrows):
for j in range(self.ncols):
norm2 += self.arr[i, j] * self.arr[i, j]
return np.sqrt(norm2)
def matmul(self, mat, return_time=False):
'''Naive Python implememtation of matrix product'''
# raise error if ncols dont match with nrows of array
if self.ncols != mat.nrows:
raise ValueError("Dimensions {0} and {1} do not match.".format(
self.ncols, mat.nrows))
prod = np.zeros((self.nrows, mat.ncols))
# start performance timer
t0 = time.perf_counter_ns()
for i in range(self.nrows):
for j in range(self.ncols):
for k in range(mat.ncols):
prod[i][k] += self.arr[i, j] * mat.arr[j, k]
t1 = time.perf_counter_ns()
eval_time = (t1 - t0) * (1e-9)
return (pyMatrix(prod), eval_time) if return_time else pyMatrix(prod)
class npMatrix(pyMatrix):
'''
NumPy Implementation of Matrix, initialized with NumPy arrays. Contains basic Matrix Operations for
Performance Evaluations.
Members
------------
- arr : the matrix, initialized by a NumPy Array
- nrows : the number of rows (default: 50)
- ncols : the number of columns (default: 50)
'''
def norm(self):
'''NumPy Norm'''
return np.linalg.norm(self.arr)
def matmul(self, mat, return_time=False):
'''NumPy Matrix Multiplication'''
# start performance timer
t0 = time.perf_counter_ns()
prod_arr = np.matmul(self.arr, mat.arr)
t1 = time.perf_counter_ns()
eval_time = (t1 - t0) * (1e-9)
return (npMatrix(prod_arr),
eval_time) if return_time else npMatrix(prod_arr)
class cpMatrix(pyMatrix):
'''
CuPy Implementation of Matrix, initialized with CuPy arrays. Contains basic Matrix Operations for
Performance Evaluations.
Members
------------
- arr : the matrix, initialized by a CuPy Array
- nrows : the number of rows (default: 50)
- ncols : the number of columns (default: 50)
'''
def __init__(self, arr=None, nrows=50, ncols=50):
'''
Parameters
-----------
- nrows : the number of rows (default: 50)
- ncols : the number of columns (default: 50)
- arr : array initialized on host device
'''
if arr is not None:
# expensive method, but best way to keep array elements consistent
# between each matrix implementation
self.arr = cp.asarray(arr, dtype=np.float32)
self.nrows = arr.shape[0]
self.ncols = arr.shape[1]
else:
self.nrows = nrows
self.ncols = ncols
self.arr = cp.zeros((nrows, ncols), dtype=np.float32)
def norm(self):
'''CuPy Norm'''
return cp.linalg.norm(self.arr)
def matmul(self, mat, return_time=False):
'''CuPy Matrix Multiplication. arr is an array initialized on host device'''
# arr = cp.asarray(mat.arr, dtype=np.float32)
# start performance timer
t0 = time.perf_counter_ns()
prod_arr = cp.matmul(self.arr, mat.arr)
t1 = time.perf_counter_ns()
eval_time = (t1 - t0) * (1e-9)
return (cpMatrix(cp.asnumpy(prod_arr)),
eval_time) if return_time else cpMatrix(cp.asnumpy(prod_arr))
|
<reponame>Abdelrahman0W/lisa<gh_stars>0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from dataclasses import dataclass, field
from pathlib import PurePosixPath
from typing import Any, Dict, List, Type, cast
from azure.mgmt.compute.models import GrantAccessData # type: ignore
from dataclasses_json import dataclass_json
from retry import retry
from lisa import schema
from lisa.environment import Environments, EnvironmentSpace
from lisa.feature import Features
from lisa.features import StartStop
from lisa.node import Node, RemoteNode, quick_connect
from lisa.parameter_parser.runbook import RunbookBuilder
from lisa.platform_ import load_platform_from_builder
from lisa.transformer import Transformer
from lisa.util import LisaException, constants, get_date_str, get_datetime_path
from .common import (
AZURE_SHARED_RG_NAME,
check_or_create_storage_account,
get_compute_client,
get_environment_context,
get_network_client,
get_node_context,
get_or_create_storage_container,
get_storage_account_name,
wait_copy_blob,
wait_operation,
)
from .platform_ import AzurePlatform
from .tools import Waagent
DEFAULT_EXPORTED_VHD_CONTAINER_NAME = "lisa-vhd-exported"
DEFAULT_VHD_SUBFIX = "exported"
@retry(tries=10, jitter=(1, 2)) # type: ignore
def _generate_vhd_path(container_client: Any, file_name_part: str = "") -> str:
path = PurePosixPath(
f"{get_date_str()}/{get_datetime_path()}_"
f"{DEFAULT_VHD_SUBFIX}_{file_name_part}.vhd"
)
blobs = container_client.list_blobs(name_starts_with=path)
for _ in blobs:
raise LisaException(f"blob exists already: {path}")
return str(path)
@dataclass_json
@dataclass
class VhdTransformerSchema(schema.Transformer):
# resource group and vm name to be exported
resource_group_name: str = field(
default="", metadata=schema.metadata(required=True)
)
vm_name: str = "node-0"
# values for SSH connection. public_address is optional, because it can be
# retrieved from vm_name. Others can be retrieved from platform.
public_address: str = ""
public_port: int = 22
username: str = constants.DEFAULT_USER_NAME
password: str = ""
private_key_file: str = ""
# values for exported vhd. storage_account_name is optional, because it can
# be the default storage of LISA.
storage_account_name: str = ""
container_name: str = DEFAULT_EXPORTED_VHD_CONTAINER_NAME
file_name_part: str = ""
# restore environment or not
restore: bool = False
@dataclass_json
@dataclass
class DeployTransformerSchema(schema.Transformer):
requirement: schema.Capability = field(default_factory=schema.Capability)
resource_group_name: str = ""
@dataclass_json
@dataclass
class DeleteTransformerSchema(schema.Transformer):
resource_group_name: str = field(
default="", metadata=schema.metadata(required=True)
)
class VhdTransformer(Transformer):
"""
convert an azure VM to VHD, which is ready to deploy.
"""
__url_name = "url"
@classmethod
def type_name(cls) -> str:
return "azure_vhd"
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return VhdTransformerSchema
@property
def _output_names(self) -> List[str]:
return [self.__url_name]
def _internal_run(self) -> Dict[str, Any]:
runbook: VhdTransformerSchema = self.runbook
platform = _load_platform(self._runbook_builder, self.type_name())
compute_client = get_compute_client(platform)
virtual_machine = compute_client.virtual_machines.get(
runbook.resource_group_name, runbook.vm_name
)
node = self._prepare_virtual_machine(platform, virtual_machine)
vhd_location = self._export_vhd(platform, virtual_machine)
self._restore_vm(platform, virtual_machine, node)
return {self.__url_name: vhd_location}
def _prepare_virtual_machine(
self, platform: AzurePlatform, virtual_machine: Any
) -> Node:
runbook: VhdTransformerSchema = self.runbook
if not runbook.public_address:
runbook.public_address = self._get_public_ip_address(
platform, virtual_machine
)
platform_runbook: schema.Platform = platform.runbook
if not runbook.username:
runbook.username = platform_runbook.admin_username
if not runbook.password:
runbook.password = <PASSWORD>
if not runbook.private_key_file:
runbook.private_key_file = platform_runbook.admin_private_key_file
node_runbook = schema.RemoteNode(
name=runbook.vm_name,
public_address=runbook.public_address,
port=runbook.public_port,
username=runbook.username,
password=<PASSWORD>,
private_key_file=runbook.private_key_file,
)
node = quick_connect(node_runbook, f"{self.type_name()}_vm")
node.features = Features(node, platform)
node_context = get_node_context(node)
node_context.vm_name = runbook.vm_name
node_context.resource_group_name = runbook.resource_group_name
# prepare vm for exporting
wa = node.tools[Waagent]
node.execute("export HISTSIZE=0", shell=True)
wa.deprovision()
# stop the vm
startstop = node.features[StartStop]
startstop.stop()
return node
def _export_vhd(self, platform: AzurePlatform, virtual_machine: Any) -> str:
runbook: VhdTransformerSchema = self.runbook
compute_client = get_compute_client(platform)
# generate sas url from os disk, so it can be copied.
self._log.debug("generating sas url...")
location = virtual_machine.location
os_disk_name = virtual_machine.storage_profile.os_disk.name
operation = compute_client.disks.begin_grant_access(
resource_group_name=runbook.resource_group_name,
disk_name=os_disk_name,
grant_access_data=GrantAccessData(access="Read", duration_in_seconds=86400),
)
wait_operation(operation)
sas_url = operation.result().access_sas
assert sas_url, "cannot get sas_url from os disk"
self._log.debug("getting or creating storage account and container...")
# get vhd container
if not runbook.storage_account_name:
runbook.storage_account_name = get_storage_account_name(
subscription_id=platform.subscription_id, location=location, type="t"
)
check_or_create_storage_account(
credential=platform.credential,
subscription_id=platform.subscription_id,
account_name=runbook.storage_account_name,
resource_group_name=AZURE_SHARED_RG_NAME,
location=location,
log=self._log,
)
container_client = get_or_create_storage_container(
runbook.storage_account_name, runbook.container_name, platform.credential
)
path = _generate_vhd_path(container_client, runbook.file_name_part)
vhd_path = f"{container_client.url}/{path}"
self._log.info(f"copying vhd: {vhd_path}")
blob_client = container_client.get_blob_client(path)
blob_client.start_copy_from_url(sas_url, metadata=None, incremental_copy=False)
wait_copy_blob(blob_client, vhd_path, self._log)
return vhd_path
def _restore_vm(
self, platform: AzurePlatform, virtual_machine: Any, node: Node
) -> None:
runbook: VhdTransformerSchema = self.runbook
self._log.debug("restoring vm...")
# release the vhd export lock, so it can be started back
compute_client = get_compute_client(platform)
os_disk_name = virtual_machine.storage_profile.os_disk.name
operation = compute_client.disks.begin_revoke_access(
resource_group_name=runbook.resource_group_name,
disk_name=os_disk_name,
)
wait_operation(operation)
if runbook.restore:
start_stop = node.features[StartStop]
start_stop.start()
def _get_public_ip_address(
self, platform: AzurePlatform, virtual_machine: Any
) -> str:
runbook: VhdTransformerSchema = self.runbook
for (
network_interface_reference
) in virtual_machine.network_profile.network_interfaces:
if network_interface_reference.primary:
network_interface_name = network_interface_reference.id.split("/")[-1]
break
network_client = get_network_client(platform)
network_interface = network_client.network_interfaces.get(
runbook.resource_group_name, network_interface_name
)
for ip_config in network_interface.ip_configurations:
if ip_config.public_ip_address:
public_ip_name = ip_config.public_ip_address.id.split("/")[-1]
break
public_ip_address: str = network_client.public_ip_addresses.get(
runbook.resource_group_name, public_ip_name
).ip_address
assert (
public_ip_address
), "cannot find public IP address, make sure the VM is in running status."
return public_ip_address
class DeployTransformer(Transformer):
"""
deploy a node in transformer phase for further operations
"""
__resource_group_name = "resource_group_name"
@classmethod
def type_name(cls) -> str:
return "azure_deploy"
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return DeployTransformerSchema
@property
def _output_names(self) -> List[str]:
return [
self.__resource_group_name,
constants.ENVIRONMENTS_NODES_REMOTE_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PORT,
constants.ENVIRONMENTS_NODES_REMOTE_USERNAME,
constants.ENVIRONMENTS_NODES_REMOTE_PASSWORD,
constants.ENVIRONMENTS_NODES_REMOTE_PRIVATE_KEY_FILE,
]
def _internal_run(self) -> Dict[str, Any]:
platform = _load_platform(self._runbook_builder, self.type_name())
runbook: DeployTransformerSchema = self.runbook
envs = Environments()
environment_requirement = EnvironmentSpace()
environment_requirement.nodes.append(runbook.requirement)
environment = envs.from_requirement(environment_requirement)
assert environment
platform.prepare_environment(environment=environment)
platform.deploy_environment(environment)
resource_group_name = get_environment_context(environment).resource_group_name
# generate return results
results = {
self.__resource_group_name: resource_group_name,
}
node: RemoteNode = cast(RemoteNode, environment.default_node)
connection_info = node.connection_info
assert connection_info
results.update(connection_info)
return results
class DeleteTransformer(Transformer):
"""
delete an environment
"""
@classmethod
def type_name(cls) -> str:
return "azure_delete"
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return DeleteTransformerSchema
@property
def _output_names(self) -> List[str]:
return []
def _internal_run(self) -> Dict[str, Any]:
platform = _load_platform(self._runbook_builder, self.type_name())
runbook: DeleteTransformerSchema = self.runbook
# mock up environment for deletion
envs = Environments()
environment_requirement = EnvironmentSpace()
environment_requirement.nodes.append(schema.NodeSpace())
environment = envs.from_requirement(environment_requirement)
assert environment
environment_context = get_environment_context(environment)
environment_context.resource_group_name = runbook.resource_group_name
environment_context.resource_group_is_created = True
platform.delete_environment(environment)
return {}
def _load_platform(
runbook_builder: RunbookBuilder, transformer_name: str
) -> AzurePlatform:
platform = load_platform_from_builder(runbook_builder)
assert isinstance(
platform, AzurePlatform
), f"'{transformer_name}' support only Azure platform"
platform.initialize()
return platform
|
#!/usr/bin/env python3
import os
import sys
import argparse
import numpy as np
import scipy.optimize
from pyscf import gto,df,dft
import pyscf.data
from functions import *
def energy(x):
exponents = np.exp(x)
newbasis = exp2basis(exponents, myelements, basis)
E = 0.0
for m in moldata:
E += energy_mol(newbasis, m)
return E
def gradient(x):
exponents = np.exp(x)
newbasis = exp2basis(exponents, myelements, basis)
global it
E = 0.0
dE_da = np.zeros(nexp)
for m,name in zip(moldata,args.molecules):
E_, dE_da_ = gradient_mol(nexp, newbasis, m)
E += E_
dE_da += dE_da_
print(os.path.basename(name), 'e =', E_, '(', E_/m['self']*100.0, '%)')
print('it:', it, E, max(abs(dE_da)))
dE_da = cut_myelements(dE_da, myelements, bf_bounds)
it+=1
print(flush=True)
dE_dx = dE_da * exponents
return E, dE_dx
def gradient_only(x):
return gradient(x)[1]
def read_bases(basis_files):
basis = {}
for i in basis_files:
with open(i, "r") as f:
addbasis = eval(f.read())
q = list(addbasis.keys())[0]
if q in basis.keys():
print('error: several sets for element', q)
exit()
basis.update(addbasis)
return basis
def make_bf_start():
nbf = []
for q in elements:
nbf.append(len(basis[q]))
bf_bounds = {}
for i,q in enumerate(elements):
start = sum(nbf[0:i])
bf_bounds[q] = [start, start+nbf[i]]
return bf_bounds
def make_moldata(fname):
rho_data = np.load(fname)
molecule = rho_data['atom' ]
rho = rho_data['rho' ]
coords = rho_data['coords' ]
weights = rho_data['weights']
self = np.einsum('p,p,p->',weights,rho,rho)
mol = gto.M(atom=str(molecule), basis=basis)
idx = []
centers = []
for iat in range(mol.natm):
q = mol._atom[iat][0]
ib0 = bf_bounds[q][0]
for ib,b in enumerate(mol._basis[q]):
l = b[0]
idx += [ib+ib0] * (2*l+1)
centers += [iat] * (2*l+1)
idx = np.array(idx)
distances = np.zeros((mol.natm, len(rho)))
for iat in range(mol.natm):
center = mol.atom_coord(iat)
distances[iat] = np.sum((coords - center)**2, axis=1)
return {
'mol' : mol ,
'rho' : rho ,
'coords' : coords ,
'weights' : weights ,
'self' : self ,
'idx' : idx ,
'centers' : centers ,
'distances' : distances
}
###################################################################
parser = argparse.ArgumentParser(description='Optimize basis')
parser.add_argument('-e', '--elements', metavar='elements', type=str, nargs='+', help='elements for optimization')
parser.add_argument('-b', '--basis', metavar='basis', type=str, nargs='+', help='initial df bases', required=True)
parser.add_argument('--molecules', metavar='molecules', type=str, nargs='+', help='molecules', required=True) # cannot use '-m' because pyscf treats it as memory
parser.add_argument('-g', '--gtol', metavar='gtol', type=float, default=1e-7, help='tolerance')
parser.add_argument('--method', metavar='method', type=str, default='CG', help='minimization algoritm')
parser.add_argument('--check', dest='check', default=False, action='store_true')
args = parser.parse_args()
print(args.basis)
basis = read_bases(args.basis)
elements = sorted(basis.keys(), key=pyscf.data.elements.charge)
if args.elements:
myelements = args.elements
myelements.sort(key=pyscf.data.elements.charge)
else:
myelements = elements
print(myelements, '/', elements)
basis_list = [ i for q in elements for i in basis[q]]
angular_momenta = np.array([ i[0] for i in basis_list ])
exponents = np.array([ i[1][0] for i in basis_list ])
nexp = len(basis_list)
bf_bounds = make_bf_start()
print(args.molecules)
moldata = []
for fname in args.molecules:
moldata.append(make_moldata(fname))
print()
for l,a in zip(angular_momenta, exponents):
print('l =', l, 'a = ', a)
print()
x0 = np.log(exponents)
x1 = cut_myelements(x0, myelements, bf_bounds)
angular_momenta = cut_myelements(angular_momenta, myelements, bf_bounds)
if args.check:
it = 0
gr1 = scipy.optimize.approx_fprime(x1, energy, 1e-4)
gr2 = gradient_only(x1)
print()
print('anal')
print(gr2)
print('num')
print(gr1)
print('diff')
print(gr1-gr2)
print('rel diff')
print((gr1-gr2)/gr1)
print()
exit()
print(args.method, 'tol =', args.gtol)
it = 0
xopt = scipy.optimize.minimize(energy, x1, method=args.method, jac=gradient_only, options={ 'gtol':args.gtol,'disp':True}).x
exponents = np.exp(xopt)
newbasis = exp2basis(exponents, myelements, basis)
printbasis(newbasis, sys.stdout)
|
<gh_stars>0
"""Base backend for Arduino Uno and its derivatives."""
from typing import Mapping, Optional, Type
from j5.backends import Backend
from j5.backends.console import Console
from j5.components import GPIOPinInterface, GPIOPinMode, LEDInterface
class PinData:
"""Contains data about a pin."""
mode: GPIOPinMode
digital_state: bool
def __init__(self, *, mode: GPIOPinMode, digital_state: bool):
self.mode = mode
self.digital_state = digital_state
class ArduinoConsoleBackend(
GPIOPinInterface,
LEDInterface,
Backend,
):
"""An abstract class to create console backends for different Arduinos."""
def __init__(self, serial: str, console_class: Type[Console] = Console) -> None:
self._serial = serial
self._pins: Mapping[int, PinData] = {
i: PinData(mode=GPIOPinMode.DIGITAL_OUTPUT, digital_state=False)
for i in range(2, 20)
# Digital 2 - 13
# Analogue 14 - 19
}
# Setup console helper
self._console = console_class(f"{self.board.__name__}({self._serial})")
@property
def firmware_version(self) -> Optional[str]:
"""The firmware version reported by the board."""
return None # Console, so no firmware
def set_gpio_pin_mode(self, identifier: int, pin_mode: GPIOPinMode) -> None:
"""Set the hardware mode of a GPIO pin."""
self._console.info(f"Set pin {identifier} to {pin_mode.name}")
self._pins[identifier].mode = pin_mode
def get_gpio_pin_mode(self, identifier: int) -> GPIOPinMode:
"""Get the hardware mode of a GPIO pin."""
return self._pins[identifier].mode
def write_gpio_pin_digital_state(self, identifier: int, state: bool) -> None:
"""Write to the digital state of a GPIO pin."""
if self._pins[identifier].mode is not GPIOPinMode.DIGITAL_OUTPUT:
raise ValueError(f"Pin {identifier} mode needs to be DIGITAL_OUTPUT "
f"in order to set the digital state.")
self._console.info(f"Set pin {identifier} state to {state}")
self._pins[identifier].digital_state = state
def get_gpio_pin_digital_state(self, identifier: int) -> bool:
"""Get the last written state of the GPIO pin."""
if self._pins[identifier].mode is not GPIOPinMode.DIGITAL_OUTPUT:
raise ValueError(f"Pin {identifier} mode needs to be DIGITAL_OUTPUT "
f"in order to read the digital state.")
return self._pins[identifier].digital_state
def read_gpio_pin_digital_state(self, identifier: int) -> bool:
"""Read the digital state of the GPIO pin."""
if self._pins[identifier].mode not in [
GPIOPinMode.DIGITAL_INPUT_PULLUP,
GPIOPinMode.DIGITAL_INPUT,
GPIOPinMode.DIGITAL_INPUT_PULLDOWN,
]:
raise ValueError(f"Pin {identifier} mode needs to be DIGITAL_INPUT_* "
f"in order to read the digital state.")
return self._console.read(f"Pin {identifier} digital state [true/false]", bool)
def read_gpio_pin_analogue_value(self, identifier: int) -> float:
"""Read the scaled analogue value of the GPIO pin."""
if self._pins[identifier].mode is not GPIOPinMode.ANALOGUE_INPUT:
raise ValueError(f"Pin {identifier} mode needs to be ANALOGUE_INPUT "
f"in order to read the analogue value.")
return self._console.read(f"Pin {identifier} ADC state [float]", float)
def write_gpio_pin_dac_value(self, identifier: int, scaled_value: float) -> None:
"""Write a scaled analogue value to the DAC on the GPIO pin."""
# Uno doesn't have any of these.
raise NotImplementedError
def write_gpio_pin_pwm_value(self, identifier: int, duty_cycle: float) -> None:
"""Write a scaled analogue value to the PWM on the GPIO pin."""
# Not implemented on derivitive boards yet.
raise NotImplementedError
def get_led_state(self, identifier: int) -> bool:
"""Get the state of an LED."""
if identifier != 0:
raise ValueError("Arduino Uno only has LED 0 (digital pin 13).")
return self.get_gpio_pin_digital_state(13)
def set_led_state(self, identifier: int, state: bool) -> None:
"""Set the state of an LED."""
if identifier != 0:
raise ValueError("Arduino Uno only has LED 0 (digital pin 13)")
self.write_gpio_pin_digital_state(13, state)
|
<gh_stars>10-100
# THIS SCRIPT IS SUPPOSED TO RUN IN A JUPYTER NOTEBOOK (WE USED VS CODE)
# %%
import pandas as pd
import numpy as np
from sklearn.preprocessing import PowerTransformer
from sklearn.covariance import MinCovDet
from scipy.stats import chi2
import seaborn as sb
import matplotlib.pyplot as plt
# %%
def add_is_outlier_IQR(data, col_name):
col_values = data[col_name]
Q1=col_values.quantile(0.25)
Q3=col_values.quantile(0.75)
IQR=Q3-Q1
outliers_col_name = f'is_{col_name.replace(" ", "_")}_outlier'
data[outliers_col_name] = ((col_values < (Q1 - 1.5 * IQR)) | (col_values > (Q3 + 1.5 * IQR)))
return data
def boxPlot(data, varx, vary, title, xlab, ylab, hue = None):
hplot = sb.boxplot(varx, vary, hue=hue, data=data)
plt.title(title, fontsize=18)
plt.xlabel(xlab, fontsize=16)
plt.ylabel(ylab, fontsize=16)
return hplot
def yeo_johnson_transf(data):
pt = PowerTransformer(method='yeo-johnson', standardize=True)
pt.fit(data)
lambdas = pt.lambdas_
df_yeojohnson = pd.DataFrame( pt.transform(data), columns=data.columns.values )
return df_yeojohnson, lambdas
# %%
# Load red wine data
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv',
sep=';')
# Get numeric column names but the quality
numeric_col_names = df.drop('quality', axis=1).columns.values
df
# %%
sb.set(style="whitegrid")
# Let's plot sulphates boxplot in order to see if
# there are univariate outliers
boxPlot(df, varx='sulphates', vary=None,
title='Sulphates distribution',
xlab='sulphates', ylab=None)
# In case you're not using a Jupyter notebook run also the following:
# plt.show()
# %%
# As you see there are outliers, let's add a boolean
# column to the dataframeindicating which row
# has a sulphate outlier
add_is_outlier_IQR(df, 'sulphates')
# Let's plot the boxplot removing the initial outliers
df_no_outliers = df.loc[~df['is_sulphates_outlier']]
boxPlot(df_no_outliers, varx='sulphates', vary=None,
title='Sulphates distribution without outliers',
xlab='sulphates', ylab=None)
# In case you're not using a Jupyter notebook run also the following:
# plt.show()
# %%
# Let's now plot boxplots for each quality vote,
# removing the initial outliers
boxPlot(df_no_outliers, varx='quality', vary='sulphates',
title='Sulphates distribution without outliers by Quality',
xlab='quality', ylab='sulphates')
# In case you're not using a Jupyter notebook run also the following:
# plt.show()
# %%
# MULTIVARIATE ANALYSIS
#-----------------------
# Let's now plot an histogram for all the variables
# using the dataset without outliers
df_no_outliers.drop('quality', axis=1).hist(figsize=(10,10))
plt.tight_layout()
plt.show()
# %%
# Let's apply Yeo-Johnson transformations
# in order to remove skewness
df_transf, lambda_arr = yeo_johnson_transf(df_no_outliers[numeric_col_names])
# Let's plot an histogram for all the transformed variables
# in order to check if skewness is decreased
df_transf.hist(figsize=(10,10))
plt.tight_layout()
plt.show()
# %%
# # WARNING: The following plots take some minutes to get plotted.
# #
# # If you want to check also the density plots of each variable and
# # the scatter plots between all of them two by two, grouped by quality,
# # you can use the pairplot. This one is using the original dataframe...
# sb.pairplot(df, hue='quality', diag_kind = 'kde',
# plot_kws = {'alpha': 0.6, 's': 80, 'edgecolor': 'k'})
# # %%
# # ... and this one is generated using the transformed dataframe.
# df_transf_qual = df_transf.copy()
# df_transf_qual['quality'] = df['quality']
# sb.pairplot(df_transf_qual, hue='quality', diag_kind = 'kde',
# plot_kws = {'alpha': 0.6, 's': 80, 'edgecolor': 'k'})
# # In case you're not using a Jupyter notebook run also the following:
# # plt.show()
# %%
# Let's compute the squared Mahalanobis distances using
# the Minimum Covariance Determinant to calculate a
# robust covariance matrix
robust_cov = MinCovDet(support_fraction=0.7).fit(df_transf)
center = robust_cov.location_
D = robust_cov.mahalanobis(df_transf - center)
D
# %%
# The squared Mahalanobis distance (D) follows a Chi-Square distribution
# (https://markusthill.github.io/mahalanbis-chi-squared/#the-squared-mahalanobis-distance-follows-a-chi-square-distribution-more-formal-derivation)
#
# Given a cutoff value associated with the statistical significance
# with which we want to determine outliers, we obtain the corresponding
# threshold value above which to consider an observation an outlier
cutoff = 0.98
degrees_of_freedom = df_transf.shape[1] # given by the number of variables (columns)
cut = chi2.ppf(cutoff, degrees_of_freedom) # threshold value
# Squared Mahalanobis distance values of outliers
D[D > cut]
# %%
# Calculate the probability that the distance D[5]
# is an outlier
chi2.cdf(D[5], degrees_of_freedom)
# %%
# Calulate if the observation is an outlier given the cutoff
is_outlier_arr = (D > cut)
# Calculate the probability that an observation is an outlier not by chance
outliers_stat_proba = np.zeros(len(is_outlier_arr))
for i in range(len(is_outlier_arr)):
outliers_stat_proba[i] = chi2.cdf(D[i], degrees_of_freedom)
# How many outliers with statistical significance greater than the cutoff
len(outliers_stat_proba[outliers_stat_proba > cutoff])
# %%
# Adding outliers info to the dataframe according to
# the squared Mahalanobis distance
df['is_mahalanobis_outlier'] = is_outlier_arr
df['mahalanobis_outlier_proba'] = outliers_stat_proba
df[df['is_mahalanobis_outlier']]
# %%
|
import sys
from decimal import *
class Convertor:
def convert(self,score,settings):
pass
class MidiConvertor(Convertor):
# does noteOff affect the output at all? test again, because i might need it for cymbals
volumeMap = {
"P": .20,
"MP": .40, # pianissimo
"MF": .60, # mezzo-forte
"F": .80, # forte
"FF": 1.00 # ff
}
# will need to hard-code volume levels for crescendos and decrescendos
def dynamicRanges(self, score):
# annotate with crescendo/decrescndo rise/fall data
# keep track of beats and notes between change start and stopping point
# annotate each note with its beat/note index
# helps us with calculating volume rise and fall per note
# along the way, annotate notes with volume percentage that aren't within a dynamic change
# keep track of the previous dynamic change
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
start = {}
startDynamic = 'MF'
dynamic = 'MF'
beatCount = 0
for measure in music:
for beat in measure['beats']:
noteCount = 0
for note in beat:
note['beatIndex'] = beatCount
note['noteIndex'] = noteCount
if 'dynamic' in note:
dynamic = note['dynamic']
if 'dynamicChangeEnd' in note:
# log what we're changing to
# this note should have an absolute dynamic
beats = note['beatIndex'] - start['beatIndex']
diff = self.volumeMap[ dynamic ] - self.volumeMap[ startDynamic ]
perBeat = diff / beats
start['perBeat'] = perBeat # yikes, floating point sucks
start = {}
if len(start) == 0:
note['volumePercentage'] = self.volumeMap[ dynamic ]
if 'dynamicChange' in note:
start = note
startDynamic = dynamic
noteCount += 1
beatCount += 1
return score
def cymbals(self, score):
# convert cymbal notes with special annotations to different surfaces
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
if not instrument == 'cymbal':
continue
for measure in music:
for beat in measure['beats']:
for note in beat:
if 'tap' in note:
note['surface'] = '^'
elif 'hihat' in note:
note['surface'] = '*'
elif 'surface' in note:
if note['surface'] == 'c' or note['surface'] == 'd':
note['surface'] = '@'
else:
note['surface'] = '!'
return score
def convert(self, score, settings):
score = self.dynamicRanges(score)
score = self.cymbals(score)
#print( repr(score) )
#return ''
instrumentProgramMap = {
"bass": "0",
"cymbal": "3",
"snare": "2",
"tenor": "1"
}
instrumentVolumeMap = {
"bass": 127,
"cymbal": 80,
"snare": 100,
"tenor": 100
}
instrumentPanMap = {
"bass": "64", # 30
"cymbal": "64",
"snare": "64",
"tenor": "64" # 98
}
noteMap = {
# snare
"h": "c6",
"z": "c6",
"x": "d6",
# bass and tenor
"a": "e6",
"b": "c6",
"c": "a5",
"d": "f5",
"e": "d5",
"f": "e6",
# cymbal
"!": "e6",
"@": "c6",
"^": "e6",
"*": "a5"
}
if 'tempo' in score:
scoreTempo = int(score['tempo'])
tempo = 60000000 / scoreTempo
else:
scoreTempo = 120
tempo = 500000
# MFile format tracks division
out = "MFile 1 " + str(len(score['instruments']) + 1) + " 384\n" # +1 tracks because of tempo track
# tempo track
out += "MTrk\n"
out += "0 Tempo " + str(tempo) + "\n"
out += "0 TimeSig 4/4 18 8\n"
out += "TrkEnd\n"
# set counter a second into the future for blank space padding
channel = 1
flamPosition = -20 # calculate based on tempo
accentIncrease = 2 * int(127/5)
perBeat = 384
startingCounter = 30 #(scoreTempo / 60) * 30 # calculate how much time would yield a second
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
instrumentVolume = instrumentVolumeMap[ instrument ]
volume = self.volumeMap['F'] # start at forte
volumePerBeat = 0
volumePerNote = 0
counter = startingCounter
nextBeat = counter + perBeat
channelString = str(channel)
out += "MTrk\n"
# map instrument to a channel
out += "0 PrCh ch=" + channelString + " prog=" + instrumentProgramMap[instrument] + "\n"
# set main track volume
out += "0 Par ch=" + channelString + " con=7 val=" + str(instrumentVolume) + "\n"
out += "0 Par ch=" + channelString + " con=10 val=" + instrumentPanMap[instrument] + "\n"
for measure in music:
for beat in measure['beats']:
c1 = counter
volume += volumePerBeat
notes = len(beat) # need to only count actual notes, not rests
for note in beat:
c2 = str(c1)
if 'rest' in note:
pass
else:
if 'flam' in note:
# if surface is shot, flams should be on the drum head
# annotate notes with proper flam surface
#go back a bit, from current counter value
tempVolume = int(instrumentVolume * self.volumeMap['MP'])
out += str(c1 - 13) + " On ch=" + channelString + " n=" + noteMap[ note['flam'] ] + " v=" + str(tempVolume) + "\n"
#out += str(c1 - 5) + " Off ch=" + channelString + " n=" + noteMap[ note['surface'] ] + " v=0\n"
# prepare volume
if 'volumePercentage' in note:
volume = note['volumePercentage']
tempVolume = volume
if volumePerNote <> 0:
tempVolume += volumePerNote
if 'perBeat' in note:
volumePerBeat = note['perBeat']
volumePerNote = volumePerBeat / notes
if 'accent' in note:
actualVolume = int(instrumentVolume * tempVolume) + accentIncrease
else:
actualVolume = int(instrumentVolume * tempVolume)
if actualVolume > 127:
actualVolume = 127
# bass unisons
if note['surface'] == 'u':
note['surface'] = 'abcde'
for surface in note['surface']:
out += c2 + " On ch=" + channelString + " n=" + noteMap[ surface ] + " v=" + str(actualVolume) + "\n"
# expand diddle/tremolo
# add the second note
if 'diddle' in note:
# don't think diddle should be same volume!
c3 = str(c1 + (perBeat / (note['duration'] * 2)))
out += c3 + " On ch=" + channelString + " n=" + noteMap[ surface ] + " v=" + str(actualVolume) + "\n"
if 'fours' in note:
c3 = perBeat / (note['duration'] * 4)
c4 = str(c1 + (c3))
out += c4 + " On ch=" + channelString + " n=" + noteMap[ surface ] + " v=" + str(actualVolume) + "\n"
c4 = str(c1 + c3 + c3)
out += c4 + " On ch=" + channelString + " n=" + noteMap[ surface ] + " v=" + str(actualVolume) + "\n"
c4 = str(c1 + c3 + c3 + c3)
out += c4 + " On ch=" + channelString + " n=" + noteMap[ surface ] + " v=" + str(actualVolume) + "\n"
# when do we turn off
# divide
c3 = str(c1 + (perBeat / note['duration']))
for surface in note['surface']:
# why do i sometimes see the note off volume at 64?
#out += c3 + " Off ch=" + channelString + " n=" + noteMap[ surface ] + " v=0\n"
pass
# i bet some cymbal notes we'll have to avoid turning off until we get an explicit choke note
c1 += (perBeat / note['duration']) # how long does this note last?
nextBeat += perBeat
counter += perBeat
# end note loop
# end beat loop
# end measure loop
out += "TrkEnd\n"
channel += 1
# end instrument loop
return out
class VDLMidiConvertor(Convertor):
# does noteOff affect the output at all? test again, because i might need it for cymbals
# tried 7% gain with each level, not significant enough
# but .15 leaves soft part inaudible, with bass parts boomy on mp3 sample playing through speakers. ugh.
volumeMap = {
"P": .50, # pianissimo
"MP": .60, # pianissimo
# should be mezzo-piano in here, i think
"MF": .70, # mezzo-forte
"F": .80, # forte
"FF": .90 # ff
}
# will need to hard-code volume levels for crescendos and decrescendos
def dynamicRanges(self, score):
# annotate with crescendo/decrescndo rise/fall data
# keep track of beats and notes between change start and stopping point
# annotate each note with its beat/note index
# helps us with calculating volume rise and fall per note
# along the way, annotate notes with volume percentage that aren't within a dynamic change
# keep track of the previous dynamic change
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
start = {}
startDynamic = 'MF'
dynamic = 'MF'
beatCount = 0
for measure in music:
for beat in measure['beats']:
noteCount = 0
for note in beat:
note['beatIndex'] = beatCount
note['noteIndex'] = noteCount
if 'dynamic' in note:
dynamic = note['dynamic']
if 'dynamicChangeEnd' in note:
# log what we're changing to
# this note should have an absolute dynamic
beats = note['beatIndex'] - start['beatIndex']
diff = self.volumeMap[ dynamic ] - self.volumeMap[ startDynamic ]
perBeat = diff / beats
start['perBeat'] = perBeat # yikes, floating point sucks
start = {}
if len(start) == 0:
note['volumePercentage'] = self.volumeMap[ dynamic ]
if 'dynamicChange' in note:
start = note
startDynamic = dynamic
noteCount += 1
beatCount += 1
return score
# tweak surfaces
def surfaces(self, score):
# convert cymbal notes with special annotations to different surfaces
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
hand = 0
for measure in music:
for beat in measure['beats']:
for note in beat:
if 'hand' in note:
if note['hand'] == 'l':
hand = 1
else:
hand = 0
note['hand2'] = hand
# change surface for shots
if instrument == 'tenor':
# but simultaneous
if 'shot' in note:
note['surface'] = note['surface'] + '!'
# change surface for rim clicks
if instrument == 'tenor' or instrument == 'bass':
if 'rim' in note:
note['surface'] = note['surface'].upper()
if 'cymbal' in note and note['cymbal'] == 'slide':
note['surface'] = chr(21 + ord(note['surface']))
if instrument == 'bass':
if 'shot' in note: # unison bass rims
note['surface'] = 'U'
del(note['shot'])
if instrument == 'cymbal' and 'surface' in note and not 'rest' in note:
if note['surface'] == 'u':
note['surface'] = 'a'
if 'cymbal' in note:
if note['cymbal'] == 'slide':
note['surface'] = 'b'
if note['cymbal'] == 'hihat':
note['surface'] = 'd'
if note['cymbal'] == 'tap':
note['surface'] = 'e'
if note['cymbal'] == 'crashchoke':
note['surface'] = 'c'
else:
note['surface'] = 's'
if 'cymbal' in note:
if note['cymbal'] == 'slide':
note['surface'] = 't'
if note['cymbal'] == 'hihat':
note['surface'] = 'v'
if note['cymbal'] == 'tap':
note['surface'] = 'w'
if note['cymbal'] == 'crashchoke':
note['surface'] = 'u'
# but doing this gets clipping
#if instrument == 'bass' and 'surface' in note and note['surface'] == 'u':
# note['surface'] = 'abcde'
# we still alternate on rests, right? we should.
if hand == 0:
hand = 1
else:
hand = 0
return score
def convert(self, score, settings):
score = self.dynamicRanges(score)
score = self.surfaces(score)
# annotate sticking
#print( repr(score) )
#return ''
instrumentProgramMap = {
"bass": "2",
"cymbal": "3",
"snare": "0",
"tenor": "1"
}
# integers so we can add and subtract
# are tenors too soft on Bananaton?
instrumentVolumeMap = {
"bass": 110, # accented bass unisons clip at Forte
"cymbal": 127,
"snare": 127,
"tenor": 127
}
instrumentPanMap = {
"bass": "94", # 30
"cymbal": "64",
"snare": "64",
"tenor": "34" # 98
}
# these map to manual VirtualDrumline instruments
noteMap = {
"snare": {
# actually, snares seem to be up another octave
"h": [68,66], # g#4 f#4
"x": [67,65], # shot g4 f4
"y": [75,75], # crushes
"z": [73,73], # crushes
"a": [68,66], # head, center
#"b": [], # head, midway
#"c": [], # head, edge
"d": [63,61], # rim
"e": [28,28], # stick click
"f": [71,70] # back stick
},
"tenor": {
# heads
"a": [60,59], # c4 b3
"b": [58,57], # a#3 a3
"c": [56,55], # g#3 g3
"d": [54,53], # f#3 f3
"e": [64,63], # e4 d#4
"f": [62,61], # d4 c#4
# shots
# rims should be upper, like basses, but oh well
"A": [36,35], # c2 b1
"B": [34,33], # a#1 a1
"C": [32,31], # g#1 g1
"D": [30,29], # f#1 f1
"E": [40,39], # e2 d#2
"F": [38,37], # d2 c#2
#crushes
"u": [84,84], # c2 b1
"v": [82,82], # a#1 a1
"w": [80,80], # g#1 g1
"x": [78,78], # f#1 f1
"y": [88,88], # e2 d#2
"z": [86,86] # d2 c#2
},
"bass": {
"a": [64,63], # e4 d#4
"b": [62,61], # d4 c#4
"c": [60,59], # c4 b3
"d": [58,57], # a#3 a3
"e": [56,55], # g#3 g3
"u": [52,52], # used two 52s to avoid buzzing on left. e3 d#3
"x": [50,49],
# rims
"A": [40,39], # e2 d#2
"B": [38,37], # d2 c#2
"C": [36,35], # c2 b1
"D": [34,33], # a#1 a1
"E": [32,31], # g#1 g1
"U": [50,49]# d3 c#3
},
"cymbal": {
# unisons
"a": [54,54], # was flat crash, now port
"b": [64,64], # slide-choke
"c": [57,57], # slam-choke
"d": [70,70], # hihat
"e": [66,66], # edge tap
"f": [], # mute for a crash?
# solo
"s": [30,30], # crash
"t": [40,40], # slide-choke
"u": [33,33], # crash choke
"v": [46,46], # hihat
"w": [42,42] # edge tap
}
}
if 'tempo' in score:
scoreTempo = int(score['tempo'])
tempo = 60000000 / scoreTempo
else:
scoreTempo = 120
tempo = 500000
# MFile format tracks division
tracks = 1
for instrument in score['instruments']:
if instrument in score:
tracks = tracks + 1
out = "MFile 1 " + str(tracks) + " 384\n" # +1 tracks because of tempo track
# tempo track
out += "MTrk\n"
out += "0 Tempo " + str(tempo) + "\n"
out += "0 TimeSig 4/4 18 8\n"
out += "TrkEnd\n"
# set counter a second into the future for blank space padding
flamPosition = -25 # calculate based on tempo
accentIncrease = 20 # we go up 10% (12.7x2=25) each dynamic level
perBeat = 384
startingCounter = 30 #(scoreTempo / 60) * 30 # calculate how much time would yield a second
channel = 1
transpose = 12
# 12 for making midi file to play through Kontact
for instrument in score['instruments']:
if not instrument in score:
channel += 1
continue
music = score[ instrument ]
instrumentVolume = instrumentVolumeMap[ instrument ]
volume = self.volumeMap['F'] # start at forte
volumePerBeat = 0
volumePerNote = 0
counter = startingCounter
nextBeat = counter + perBeat
channelString = str(channel)
out += "MTrk\n"
# map instrument to a channel
out += "0 PrCh ch=" + channelString + " prog=" + instrumentProgramMap[instrument] + "\n"
# set main track volume
out += "0 Par ch=" + channelString + " con=7 val=" + str(instrumentVolume) + "\n"
out += "0 Par ch=" + channelString + " con=10 val=" + instrumentPanMap[instrument] + "\n"
for measure in music:
for beat in measure['beats']:
c1 = counter
volume += volumePerBeat
notes = len(beat) # need to only count actual notes, not rests
for note in beat:
c2 = str(c1)
if 'rest' in note:
pass
else:
hand = note['hand2']
if 'flam' in note:
if hand == 0:
hand2 = 1
else:
hand2 = 0
# if surface is shot, flams should be on the drum head
# annotate notes with proper flam surface
#go back a bit, from current counter value
something = tempVolume
tempVolume = int(instrumentVolume * self.volumeMap['P'])
out += str(c1 - 13) + " On ch=" + channelString + " n=" + str( transpose + noteMap[ instrument ][ note['flam'] ][ hand2 ]) + " v=" + str(tempVolume) + "\n"
#out += str(c1 - 5) + " Off ch=" + channelString + " n=" + noteMap[ note['surface'] ][ hand ] + " v=0\n"
tempVolume = something
# prepare volume
if 'volumePercentage' in note:
volume = note['volumePercentage']
tempVolume = volume
if volumePerNote <> 0:
tempVolume += volumePerNote
if 'perBeat' in note:
volumePerBeat = note['perBeat']
volumePerNote = volumePerBeat / notes
if 'accent' in note:
actualVolume = int(instrumentVolume * tempVolume) + accentIncrease
else:
actualVolume = int(instrumentVolume * tempVolume)
if actualVolume > 127:
actualVolume = 127
if 'stop' in note:
onoff = 'Off'
actualVolume = 0
else:
onoff = 'On'
for surface in note['surface']:
# if tenor and shot, adjust mod wheel
out += c2 + " " + onoff + " ch=" + channelString + " n=" + str(transpose + noteMap[ instrument ][ surface ][ hand ]) + " v=" + str(actualVolume) + "\n"
# expand diddle/tremolo
# add the second note
if 'diddle' in note:
# don't think diddle should be same volume!
c3 = str(c1 + (perBeat / (note['duration'] * 2)))
out += c3 + " On ch=" + channelString + " n=" + str(transpose + noteMap[ instrument ][ surface ][ hand ]) + " v=" + str(actualVolume) + "\n"
if 'fours' in note:
c3 = perBeat / (note['duration'] * 4)
c4 = str(c1 + (c3))
if hand == 0:
hand = 1
else:
hand = 0
out += c4 + " On ch=" + channelString + " n=" + str(transpose + noteMap[ instrument ][ surface ][ hand ]) + " v=" + str(actualVolume) + "\n"
c4 = str(c1 + c3 + c3)
if hand == 0:
hand = 1
else:
hand = 0
out += c4 + " On ch=" + channelString + " n=" + str(transpose + noteMap[ instrument ][ surface ][ hand ]) + " v=" + str(actualVolume) + "\n"
c4 = str(c1 + c3 + c3 + c3)
if hand == 0:
hand = 1
else:
hand = 0
out += c4 + " On ch=" + channelString + " n=" + str(transpose + noteMap[ instrument ][ surface ][ hand ]) + " v=" + str(actualVolume) + "\n"
# when do we turn off
# divide
c3 = str(c1 + (perBeat / note['duration']))
for surface in note['surface']:
# why do i sometimes see the note off volume at 64?
#out += c3 + " Off ch=" + channelString + " n=" + noteMap[ surface ][ hand ] + " v=0\n"
pass
# i bet some cymbal notes we'll have to avoid turning off until we get an explicit choke note
c1 += (perBeat / note['duration']) # how long does this note last?
nextBeat += perBeat
counter += perBeat
# end note loop
# end beat loop
# end measure loop
out += "TrkEnd\n"
channel += 1
# end instrument loop
return out
# for trying out another soundfont with a different instrument layout
class MidiConvertor2(Convertor):
# does noteOff affect the output at all? test again, because i might need it for cymbals
volumeMap = {
"P": .20,
"MP": .40, # pianissimo
"MF": .60, # mezzo-forte
"F": .80, # forte
"FF": 1.00 # ff
}
# will need to hard-code volume levels for crescendos and decrescendos
def dynamicRanges(self, score):
# annotate with crescendo/decrescndo rise/fall data
# keep track of beats and notes between change start and stopping point
# annotate each note with its beat/note index
# helps us with calculating volume rise and fall per note
# along the way, annotate notes with volume percentage that aren't within a dynamic change
# keep track of the previous dynamic change
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
start = {}
startDynamic = 'MF'
dynamic = 'MF'
beatCount = 0
for measure in music:
for beat in measure['beats']:
noteCount = 0
for note in beat:
note['beatIndex'] = beatCount
note['noteIndex'] = noteCount
if 'dynamic' in note:
dynamic = note['dynamic']
if 'dynamicChangeEnd' in note:
# log what we're changing to
# this note should have an absolute dynamic
beats = note['beatIndex'] - start['beatIndex']
diff = self.volumeMap[ dynamic ] - self.volumeMap[ startDynamic ]
perBeat = diff / beats
start['perBeat'] = perBeat # yikes, floating point sucks
start = {}
if len(start) == 0:
note['volumePercentage'] = self.volumeMap[ dynamic ]
if 'dynamicChange' in note:
start = note
startDynamic = dynamic
noteCount += 1
beatCount += 1
return score
def convert(self, score, settings):
score = self.dynamicRanges(score)
#print( repr(score) )
#return ''
instrumentProgramMap = {
"bass": "0",
"cymbal": "0",
"snare": "0",
"tenor": "0"
}
instrumentVolumeMap = {
"bass": 127,
"cymbal": 127,
"snare": 100,
"tenor": 110
}
instrumentPanMap = {
"bass": "30",
"cymbal": "64",
"snare": "64",
"tenor": "98"
}
noteMap = {
'snare': {
"h": "77",
"x": "79",
"s": "81" # rim/ping
},
'bass': {
"a": "69",
"b": "66",
"c": "62",
"d": "57",
"e": "53" # should be 50
},
'tenor': {
"a": "83", # 83
"b": "82", # 82
"c": "80", # 80
"d": "78", # 78
"e": "85" # 85
},
"cymbal": {
"a": "111",
"b": "113",
"c": "111",
"d": "113",
"^": "111", # taps
"=": "113", # slam-choke
"h": "121", # hihat
}
}
if 'tempo' in score:
scoreTempo = int(score['tempo'])
tempo = 60000000 / scoreTempo
else:
scoreTempo = 120
tempo = 500000
# MFile format tracks division
out = "MFile 1 " + str(len(score['instruments']) + 1) + " 384\n" # +1 tracks because of tempo track
# tempo track
out += "MTrk\n"
out += "0 Tempo " + str(tempo) + "\n"
out += "0 TimeSig 4/4 18 8\n"
out += "TrkEnd\n"
# set counter a second into the future for blank space padding
channel = 1
flamPosition = -20 # calculate based on tempo
accentIncrease = int(127/4)
perBeat = 384
startingCounter = 30 #(scoreTempo / 60) * 30 # calculate how much time would yield a second
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
instrumentVolume = instrumentVolumeMap[ instrument ]
volume = self.volumeMap['F'] # start at forte
volumePerBeat = 0
volumePerNote = 0
counter = startingCounter
nextBeat = counter + perBeat
channelString = str(channel)
out += "MTrk\n"
# map instrument to a channel
out += "0 PrCh ch=" + channelString + " prog=" + instrumentProgramMap[instrument] + "\n"
# set main track volume
out += "0 Par ch=" + channelString + " con=7 val=" + str(instrumentVolume) + "\n"
out += "0 Par ch=" + channelString + " con=10 val=" + instrumentPanMap[instrument] + "\n"
for measure in music:
for beat in measure['beats']:
c1 = counter
volume += volumePerBeat
notes = len(beat) # need to only count actual notes, not rests
for note in beat:
c2 = str(c1)
if 'rest' in note:
pass
else:
if 'flam' in note:
# if surface is shot, flams should be on the drum head
# annotate notes with proper flam surface
#go back a bit, from current counter value
tempVolume = int(instrumentVolume * self.volumeMap['P'])
out += str(c1 - 13) + " On ch=" + channelString + " n=" + noteMap[instrument][ note['flam'] ] + " v=" + str(tempVolume) + "\n"
#out += str(c1 - 5) + " Off ch=" + channelString + " n=" + noteMap[instrument][ note['surface'] ] + " v=0\n"
# prepare volume
if 'volumePercentage' in note:
volume = note['volumePercentage']
tempVolume = volume
if volumePerNote <> 0:
tempVolume += volumePerNote
if 'perBeat' in note:
volumePerBeat = note['perBeat']
volumePerNote = volumePerBeat / notes
if 'accent' in note:
actualVolume = int(instrumentVolume * tempVolume) + accentIncrease
if actualVolume > 127:
actualVolume = 127
else:
actualVolume = int(instrumentVolume * tempVolume)
for surface in note['surface']:
# SHIT looks like i might need to make simultaneous notes for cymbals be separate notes! because "a" might need to choke while "b" taps
# if 'choke' in note:
# elif 'slide' in note:
# elif 'tap' in note:
out += c2 + " On ch=" + channelString + " n=" + noteMap[instrument][ surface ] + " v=" + str(actualVolume) + "\n"
# expand diddle/tremolo
# add the second note
if 'diddle' in note:
c3 = str(c1 + (perBeat / (note['duration'] * 2)))
out += c3 + " On ch=" + channelString + " n=" + noteMap[instrument][ surface ] + " v=" + str(actualVolume) + "\n"
if 'fours' in note:
c3 = perBeat / (note['duration'] * 4)
c4 = str(c1 + (c3))
out += c4 + " On ch=" + channelString + " n=" + noteMap[instrument][ surface ] + " v=" + str(actualVolume) + "\n"
c4 = str(c1 + c3 + c3)
out += c4 + " On ch=" + channelString + " n=" + noteMap[instrument][ surface ] + " v=" + str(actualVolume) + "\n"
c4 = str(c1 + c3 + c3 + c3)
out += c4 + " On ch=" + channelString + " n=" + noteMap[instrument][ surface ] + " v=" + str(actualVolume) + "\n"
# when do we turn off
# divide
c3 = str(c1 + (perBeat / note['duration']))
for surface in note['surface']:
# why do i sometimes see the note off volume at 64?
#out += c3 + " Off ch=" + channelString + " n=" + noteMap[instrument][ surface ] + " v=0\n"
pass
# i bet some cymbal notes we'll have to avoid turning off until we get an explicit choke note
c1 += (perBeat / note['duration']) # how long does this note last?
nextBeat += perBeat
counter += perBeat
# end note loop
# end beat loop
# end measure loop
out += "TrkEnd\n"
channel += 1
# end instrument loop
return out
class MusicXMLConvertor(Convertor):
durationMap = {
1: 'quarter',
2: 'eighth',
3: 'eighth',
4: '16th',
5: '16th',
6: '16th',
7: '16th',
8: '32nd',
9: '64th',
10: '64th',
11: '64th',
12: '64th'
}
beamMap = {
2: '1',
3: '1',
4: '2',
5: '2',
6: '2',
7: '2',
8: '3',
}
dynamicMap = {
'P': 'p',
'MP': 'mp',
'MF': 'mf',
'F': 'f',
'FF': 'ff'
}
noteHeads = {
'x': 'x'
}
def convert(self, score, settings):
nl = "\n"
t = "\t"
t2 = t + t
t3 = t + t + t
t4 = t + t + t + t
noteMap = {
# snare
"h": "C5",
"x": "C5",
"y": "C5",
"z": "C5",
"f": "C5", #backstick
# bass and tenor
"a": "E5",
"b": "C5",
"c": "A4",
"d": "F4",
"e": "D4",
"u": "B4"
}
out = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' + nl + '<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 2.0 Partwise//EN" "http://www.musicxml.org/dtds/partwise.dtd">' + nl
out += '<score-partwise version="2.0">' + nl
if 'title' in score:
out += '<work>' + nl
out += t + '<work-title>' + score['title'] + '</work-title>' + nl
out += '</work>' + nl
if 'author' in score:
out += '<identification>' + nl
out += t + '<creator type="composer">' + score['author'] + '</creator>' + nl
out += t + '<rights>Copyright 2010 ' + score['author'] + '</rights>' + nl
out += '</identification>' + nl
out += '<part-list>' + nl
i = 1
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
out += t + '<score-part id="P' + str(i) + '">' + nl
# upper case first letters
out += t2 + '<part-name>' + instrument + '</part-name>' + nl
#out += t2 + '<midi-instrument id="P' + str(i) + 'i">' + nl
#out += t3 + '<midi-channel></midi-channel>' + nl
#out += t3 + '<midi-program></midi-program>' + nl
#out += t3 + '<midi-unpitched></midi-unpitched>' + nl
#out += t2 + '</midi-instrument>' + nl
out += t + '</score-part>' + nl
i += 1
out += '</part-list>' + nl
divisions = 24
i = 1
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
out += '<part id="P' + str(i) + '">' + nl
prevTimeSignature = ''
iMeasure = 1
for measure in music:
if not 'timesignature' in measure:
measure['timesignature'] = '4/4'
ts = measure['timesignature'].split('/')
out += t + '<measure number="' + str(iMeasure) + '">' + nl
out += t2 + '<attributes>' + nl
# divisions per quarter note for 1 duration
out += t3 + '<divisions>' + str(divisions) + '</divisions>' + nl
if iMeasure == 1:
out += t3 + '<key><fifths>0</fifths><mode>major</mode></key>' + nl
if prevTimeSignature <> measure['timesignature']:
out += t3 + '<time symbol="common"><beats>' + str(ts[0]) + '</beats><beat-type>' + str(ts[1]) + '</beat-type></time>' + nl
if iMeasure == 1:
out += t3 + '<clef><sign>percussion</sign></clef>' + nl
out += t2 + '</attributes>' + nl
for beat in measure['beats']:
iNote = 1
for note in beat:
if 'rest' in note:
duration = note['duration']
out += t2 + '<note>' + nl
out += t3 + '<rest />' + nl
out += t3 + '<duration>' + str(divisions / note['duration']) + '</duration>' + nl
out += t3 + '<type>' + str(self.durationMap[ note['duration'] ]) + '</type>' + nl
if duration == 3:
out += t3 + '<time-modification><actual-notes>3</actual-notes><normal-notes>2</normal-notes></time-modification>' + nl
elif duration == 6:
out += t3 + '<time-modification><actual-notes>6</actual-notes><normal-notes>4</normal-notes></time-modification>' + nl
if note['duration'] > 1:
out += t3 + '<beam>'
if iNote == 1:
out += 'begin'
elif iNote == note['duration']:
out += 'end'
else:
out += 'continue'
out += '</beam>' + nl
if (duration == 3 or duration == 6) and (iNote == 1 or iNote == note['duration']):
out += t4 + '<notations><tuplet type="'
if iNote == 1:
out += 'start'
elif iNote == note['duration']:
out += 'stop'
out += '" number="1" bracket="yes" show-number="actual" show-type="none" line-shape="straight" placement="above" /></notations>' + nl
out += t2 + '</note>' + nl
else:
iSurface = 1
for surface in note['surface']:
if 'dynamicChange' in note or 'dynamicChangeEnd' in note:
out += t2 + '<direction><direction-type><wedge type="'
if 'dynamicChange' in note:
if note['dynamicChange'] == '<':
out += 'crescendo'
else:
out += 'diminuendo'
else:
out += 'stop'
out += '">'
out += '</wedge></direction-type></direction>' + nl
if 'flam' in note:
noteMapped = noteMap[ note['flam'] ]
out += t2 + '<note>' + nl
out += t3 + '<grace slash="no" steal-time-previous="0" steal-time-following="0" />' + nl
out += t3 + '<unpitched><display-step>' + noteMapped[0] + '</display-step><display-octave>' + noteMapped[1] + '</display-octave></unpitched>' + nl
out += t3 + '<tie type="start" />' + nl
out += t2 + '</note>' + nl
duration = note['duration']
out += t2 + '<note>' + nl
if iSurface > 1: # simultaneous notes
# this note is in a chord with the previous
out += t3 + '<chord />' + nl
noteMapped = noteMap[ surface ]
out += t3 + '<unpitched><display-step>' + noteMapped[0] + '</display-step><display-octave>' + noteMapped[1] + '</display-octave></unpitched>' + nl
out += t3 + '<duration>' + str(divisions / note['duration']) + '</duration>' + nl
if 'flam' in note: # close tie
out += t3 + '<tie type="stop" />' + nl
#out += t3 + '<voice>' + str(iSurface) + '</voice>' + nl
out += t3 + '<type>' + str(self.durationMap[ note['duration'] ]) + '</type>' + nl
if duration == 3:
out += t3 + '<time-modification><actual-notes>3</actual-notes><normal-notes>2</normal-notes></time-modification>' + nl
elif duration == 6:
out += t3 + '<time-modification><actual-notes>6</actual-notes><normal-notes>4</normal-notes></time-modification>' + nl
pass
out += t3 + '<stem>up</stem>' + nl
if instrument == 'cymbal':
out += t3 + '<notehead>x</notehead>' + nl
else:
if 'shot' in note:
out += t3 + '<notehead>x</notehead>' + nl
elif surface in self.noteHeads:
out += t3 + '<notehead>' + self.noteHeads[ surface ] + '</notehead>' + nl
if note['duration'] > 1:
out += t3 + '<beam>' # number="' + self.beamMap[ note['duration'] ] + '">'
if iNote == 1:
out += 'begin'
elif iNote == note['duration']:
out += 'end'
else:
out += 'continue'
out += '</beam>' + nl
if duration == 3 or duration == 6 or 'accent' in note or 'staccato' in note or 'diddle' in note or 'dynamic' in note:
out += t3 + '<notations>' + nl
# for tuplet bracket, need to know whether first or last note in tuplet
if (duration == 3 or duration == 6) and (iNote == 1 or iNote == note['duration']):
out += t4 + '<tuplet type="'
if iNote == 1:
out += 'start'
elif iNote == note['duration']:
out += 'stop'
out += '" number="1" bracket="yes" show-number="actual" show-type="none" line-shape="straight" placement="above" />' + nl
if 'diddle' in note:
out += t4 + '<ornaments><tremolo type="single">1</tremolo></ornaments>' + nl
if 'accent' in note or 'staccato' in note:
out += t4 + '<articulations>'
if 'accent' in note:
out += '<accent placement="above"></accent>'
if 'staccato' in note:
out += '<staccato placement="below"></staccato>'
out += '</articulations>' + nl
if 'dynamic' in note:
out += t4 + '<dynamics placement="below"><' + self.dynamicMap[ note['dynamic'] ] + ' /></dynamics>' + nl
if duration == 3 or duration == 6 or 'accent' in note or 'staccato' in note or 'diddle' in note or 'dynamic' in note:
out += t3 + '</notations>' + nl
if 'hand' in note:
if 'accent' in note:
out += t3 + '<lyric placement="below"><text>' + note['hand'].upper() + '</text></lyric>' + nl
else:
out += t3 + '<lyric placement="below"><text>' + note['hand'] + '</text></lyric>' + nl
out += t2 + '</note>' + nl
iSurface += 1
iNote += 1
# end note loop
# end beat loop
iMeasure += 1
prevTimeSignature = measure['timesignature']
out += t + '</measure>' + nl
# end measure loop
out += '</part>' + nl
i += 1
# end instrument loop
out += '</score-partwise>'
return out
class LilypondConvertor(Convertor):
# lilypond specific
# inserts flam spacers in the rest of the score
# fucking hate that i have to do this
def flams(self, parsed):
instruments = parsed['instruments'].keys()
measure = 0
beat = 0
note = 0
done = False
flams = []
while not done:
measureNotFound = False
beatNotFound = False
foundFlam = False
for instrument in instruments:
print(instrument)
# if we've gone past beats
# if we've gone past measures
if measure >= len(parsed['instruments'][ instrument ] ):
measureNotFound = True
print('measure not found: ' + str(measure) )
continue
if beat >= len( parsed['instruments'][ instrument ][ measure ]['beats'] ):
beatNotFound = True
print('beat not found: ' + str(beat) )
continue
notes = parsed['instruments'][ instrument ][ measure ]['beats'][ beat ]
i = 0
z = len(notes)
while i < z:
note = notes[ i ]
if type(note) == dict:
if 'flam' in note:
foundFlam = True
flams.append( (instrument, measure, beat, i) )
i += 1
# somehow fill in flam spacers
if measureNotFound and beatNotFound:
done = True
elif measureNotFound:
done = True
elif beatNotFound:
beat = 0
measure += 1
else:
beat += 1
print( repr( flams ))
return {}
return parsed
# or fixDurations
def fixDurations(self, score):
# fix durations and set tuplet flags
# tuplet flags may already be set in some cases by left/right square brackets
# 1=4, 2=8, 3=8tuplet, 4=16, 5=16tuplet, 6=16tuplet, 7=16tuplet, 8=32
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
for measure in music:
for beat in measure['beats']:
for note in beat:
# if tuplet and (last note in a beat, or no longer tuplet), close tuplet
# is last beat?
if note['duration'] == 3:
duration = 8
elif note['duration'] > 4 and note['duration'] < 8:
duration = 16
elif note['duration'] == 1 or note['duration'] == 2 or note['duration'] % 4 == 0:
duration = note['duration'] * 4
note['duration'] = duration
if instrument == 'tenor' and 'surface' in note and note['surface'] == 'e':
note['surface'] = 'E'
if instrument == 'bass' and 'surface' in note and note['surface'] == 'x':
note['surface'] = 'u'
note['shot'] = True
# should i set shot flag here too?
return score
def convert(self, score, settings={}):
#a = self.flams(parsed)
a = self.fixDurations(score)
ret = '\\version "2.12.3"\n'
ret += '#(set-default-paper-size "a4" \'portrait)\n'
# this doesn't do enough. lilypond feels like a waste of effort.
#ret += '\t\\paper {\n'
#ret += '\t\tbetween-system-padding = #0.1\n'
#ret += '\t\tbetween-system-space = #0.1\n'
#ret += '\t\tragged-last-bottom = ##f\n'
#ret += '\t\tragged-bottom = ##f\n'
#ret += '\t}\n'
ret += '\\header {\n'
if 'title' in a:
ret += '\ttitle="' + a['title'] + '"\n'
if 'subtitle' in a:
ret += '\tsubtitle="' + a['subtitle'] + '"\n'
if 'author' in a:
ret += '\tcomposer="' + a['author'] + '"\n'
ret += '\tcopyright = \\markup {"Copyright" \\char ##x00A9 "' + a['author'] + '"}\n'
ret += '}\n\n'
ret += '\\score {\n'
ret += '\t<<\n'
mapping = {
'h': 'c\'\'',
'x': 'c\'\'', # rim shot
'a': 'e\'\'',
'b': 'c\'\'',
'c': 'a\'',
'd': 'f\'',
'e': 'd\'',
'E': 'g\'\'', # spocks
'f': 'b',
'r': 'r', # rest
'u': 'b\'', # unison b'
'U': 'b\'', # unison rim
'z': 'e\'\'',
# dynamics
'P': '\\p',
'MP': '\\mp',
'MF': '\\mf',
'F': '\\f',
'FF': '\\ff',
'<': '\\<',
'>': '\\>',
':': '\\!'
}
# set this flag when we encounter one of them
# unset it when we encounter the first dynamic that's not one
crescendoDecrescendo = False
dynamic = 'MF'
for instrument in score['instruments']:
if not instrument in score:
continue
music = score[ instrument ]
if instrument == 'snare':
ret += '\t% Snare\n'
ret += '\t\\new Staff {\n'
ret += '\t\t\\numericTimeSignature\n'
if 'timesignature' in a:
ret += '\t\t\\time ' + a['timesignature'] + '\n'
ret += '\t\t\\set Staff.clefGlyph = #"clefs.percussion"\n'
ret += '\t\t\\set Staff.clefPosition = #0\n'
ret += '\t\t\\set Staff.instrumentName = #"Snare "\n'
#self.beaming()
elif instrument == 'tenor':
ret += '\t% Tenor\n'
ret += '\t\\new Staff {\n'
#ret += '\t\t\\numericTimeSignature\n'
#if 'timesignature' in a:
# ret += '\t\t\\time ' + a['timesignature'] + '\n'
#ret += '\t\t\\set Staff.clefGlyph = #"clefs.percussion"\n'
#ret += '\t\t\\set Staff.clefPosition = #0\n'
ret += '\t\t\\set Staff.instrumentName = #"Tenor "\n'
elif instrument == 'bass':
ret += '\t% Bass\n'
ret += '\t\\new Staff {\n'
#ret += '\t\t\\numericTimeSignature\n'
#if 'timesignature' in a:
# ret += '\t\t\\time ' + a['timesignature'] + '\n'
#ret += '\t\t\\set Staff.clefGlyph = #"clefs.percussion"\n'
#ret += '\t\t\\set Staff.clefPosition = #0\n'
ret += '\t\t\\set Staff.instrumentName = #"Bass "\n'
elif instrument == 'cymbal':
ret += '\t% Cymbals\n'
ret += '\t\\new Staff {\n'
#ret += '\t\t\\set Staff.clefGlyph = #"clefs.percussion"\n'
#ret += '\t\t\\set Staff.clefPosition = #0\n'
#ret += '\t\t\\numericTimeSignature\n'
ret += '\t\t\\set Staff.instrumentName = #"Cymbals "\n'
#if 'timesignature' in a:
# ret += '\t\t\\time ' + a['timesignature'] + '\n'
ret += '\t\t\\stemUp\n'
iMeasure = 1
for measure in music:
beats = len(measure['beats'])
# if measure has a time signature, print it
# but it's a bitch when doing it when a crescendo hasn't ended
for beat in measure['beats']:
for note in beat:
if 'tupletStart' in note:
if note['tupletStart'] == 3:
ret += '\\times 2/3 { '
elif note['tupletStart'] == '5':
ret += '\\times 4/5 { '
else:
ret += '\\times 4/' + str(note['tupletStart']) + ' { '
if 'flam' in note:
if instrument == 'snare':
ret += '\\override Stem #\'length = #4 \\appoggiatura c\'\'8 \\revert Stem #\'length \stemUp '
else: # tenor and bass flam element has surface
ret += '\\override Stem #\'length = #4 \\appoggiatura ' + mapping[ note['flam'] ] + '8 \\revert Stem #\'length \stemUp '
# note or rest?
if 'rest' in note:
ret += 'r' + str(note['duration'])
else:
if len(note['surface']) > 1:
ret += ' <<'
for surface in note['surface']:
if instrument == 'cymbal':
if not 'stop' in note:
ret += '\\once \\override NoteHead #\'style = #\'cross '
else:
if 'shot' in note:
ret += '\\override NoteHead #\'style = #\'cross '
ret += mapping[ surface ] + str(note['duration'])
# diddle?
# check flag for whether to expand tremolos
if 'diddle' in note:
ret += ':' + str(note['duration'] * 2)
# fours?
if 'fours' in note:
ret += ':' + str(note['duration'] * 4)
if 'hand' in note:
ret += ' _"' + note['hand'] + '"'
if 'dynamicChangeEnd' in note:
ret += '\! '
if 'dynamic' in note:
ret += mapping[ note['dynamic'] ] + ' '
if 'dynamicChange' in note:
ret += mapping[ note['dynamicChange'] ] + ' '
# should note be accented?
if 'accent' in note:
#ret += ' \\accent'
ret += ' ^>'
if 'staccato' in note:
ret += ' \staccato'
if 'shot' in note:
ret += ' \\revert NoteHead #\'style'
if instrument == 'cymbal':
if 'cymbal' in note:
if note['cymbal'] == 'slide' and not 'stop' in note:
ret += '( '
if note['cymbal'] == 'crashchoke':
ret += ' ^^'
if 'stop' in note:
ret += ') \\slurDown '
ret += ' '
if len(note['surface']) > 1:
ret += ' >>'
if 'tupletStop' in note: # last note in a tuplet
ret += '} '
ret += ' '
# end note loop
# end beat loop
if len(measure['beats']) > 1:
ret += ' | '
ret += ' \n '
if iMeasure == 4:
#if len(measure['beats']) > 1:
# ret += ' \\break \n '
iMeasure = 1
else:
iMeasure += 1
# end measure loop
ret += '}\n'
# end instrument loop
ret += '>>\n'
ret += '\t\\layout {\n'
#print('\t\tindent = 0')
ret += '\t}\n'
ret += '}\n'
return ret
|
from datetime import datetime
from bs4 import BeautifulSoup
from config import *
'''
https://www.crummy.com/software/BeautifulSoup/bs4/doc/
https://docs.python.org/3/library/random.html
'''
#import pdfkit
# Template of Worksheets (#1, #2) on either side of a section
section_template = '''<div id=section-{} class="container"><div class="columns">
<div class="column"><h1 class="title">{} - Sheet {}</h1>
<div class="table-container">
<table class="left-pane table is-fullwidth">
<tbody class="left-pane"></tbody>
</table></div></div>
<div class="column"><h1 class="title">{} - Sheet {}</h1>
<div class="table-container">
<table class="right-pane table is-fullwidth">
<tbody class="right-pane"></tbody>
</table></div></div>
</div></div>
<div class="sheetbreak"></div>
'''
# Open the 'template2.html' - placeholder of our Worksheets
with open('template2.html', 'r') as raw_doc:
templ_doc = raw_doc.read()
raw_doc.close()
save_date = datetime.strftime(datetime.now(),"%b_%d_%Y")
template = BeautifulSoup(templ_doc, 'html.parser')
easy_size = len(easy_pairs)
random_size = len(random_pairs)
'''
HTML Table Generator
'''
tbody_template = '''<tbody></tbody>'''
template = BeautifulSoup(templ_doc, 'html.parser')
all_sections = BeautifulSoup('', 'html.parser')
sheet_break = BeautifulSoup("<div class=\"sheetbreak\"></div>", 'html.parser')
# Template Generator
def tablePopulator(tbody, number_pairs, operator):
'''
tablePopulator from passed pairings
tbody: table Body <tbody> <bs4>
number_pairs: list of number pairs <list(list)>
operator: Operator Symbol defining Worksheet <str> : '+','-'
'''
tbody_tag = tbody.tbody
for pair in number_pairs:
# <tr></tr>
trow = tbody.new_tag("tr")
for idx,no in enumerate(pair):
# <tr><td>Number 1</td></tr>
td = tbody.new_tag("td")
td.string = str(no)
trow.append(td)
# <tr><td>Number 1</td><td>Operator</td1></tr>
if idx == 0:
td = tbody.new_tag("td")
td.string = operator
trow.append(td)
# <tr><td>Number 1</td><td>Operator</td1></tr>
td = tbody.new_tag("td")
td.string = '________'
trow.append(td)
# End Of Row
# Append Row to Body
tbody_tag.append(trow)
return tbody
def SectionGenerator(pairs, sheet_length, section_template,
tbody_template,all_sections, sheet_number, safety_pairs, operator, exercise_name):
'''
Generates <section></section> Each containing Two Worksheets
pairs : Pairs to Generate Sheets <list(list)>
sheet_length: Default 20 per worksheet <int>
section_template: HTML Template of Section <str (in HTML format)>
tbody_template: HTML Template of tbody <str (in HTML format)>
all_sections: Soup HTML Parser Object Placeholder for Sheets <bs4>
sheet_number: Sheet Numbering Default: 3 <int>
safety_pairs: Filling in the remaining side of a section <list>
operator: Execercise numericals with operands <str> [+, - , x, /]
exercise_name: Sheet Name <str> ['Addition' ...]
'''
for idx, looper in enumerate(range(0, len(pairs), 20)):
sect_number = (idx + 1)
number_pairs_left = pairs[looper+0:looper+10]
number_pairs_right = pairs[looper+10:looper+20]
section = BeautifulSoup(
section_template.format(sect_number, exercise_name, sheet_number-2 , exercise_name, sheet_number-1)
,'html.parser')
sheet_number += 2
# Forced Replace
if number_pairs_left != []:
tbody = BeautifulSoup(tbody_template, 'html.parser')
tbody_tag = tbody.tbody
generated_easy_body = tablePopulator(tbody, number_pairs_left, operator)
left_section = BeautifulSoup(str(section).replace("<tbody class=\"left-pane\"></tbody>", str(generated_easy_body)), 'html.parser')
if number_pairs_right != []:
tbody = BeautifulSoup(tbody_template, 'html.parser')
tbody_tag = tbody.tbody
generated_easy_body = tablePopulator(tbody, number_pairs_right, operator)
right_section = BeautifulSoup(str(left_section).replace("<tbody class=\"right-pane\"></tbody>", str(generated_easy_body)), 'html.parser')
else:
tbody = BeautifulSoup(tbody_template, 'html.parser')
tbody_tag = tbody.tbody
generated_easy_body = tablePopulator(tbody, safety_pairs[:10], operator)
right_section = BeautifulSoup(str(left_section).replace("<tbody class=\"right-pane\"></tbody>", str(generated_easy_body)), 'html.parser')
all_sections.append(right_section)
return all_sections, sheet_number
all_sections,sheet_number = SectionGenerator(all_pairs,
20,
section_template,
tbody_template,all_sections,
sheet_number,
safety_pairs,
operator,
exercise_name
)
final_template = BeautifulSoup(str(template).replace("<section class=\"section\"></section>", str(all_sections)), 'html.parser')
for idx, sheet in enumerate(final_template.select('.sheetbreak')):
if ( (idx+1)%2 != 0):
sheet.decompose()
#print(final_template.prettify())
with open('Sheet ' + save_date + '.html', 'w') as raw_doc:
raw_doc.write(str(final_template))
raw_doc.close()
print('Done ... Sheet ' + save_date)
# https://pypi.org/project/pdfkit/
# https://wkhtmltopdf.org/usage/wkhtmltopdf.txt
'''print_options = {
'margin-top': '0.1in',
'margin-right': '0.25in',
'margin-bottom': '0.1in',
'margin-left': '0.25in'
}
#time.sleep(3)
pdfkit.from_file('Sheet ' + save_date + '.html', 'sample.pdf', options=print_options)''' |
<reponame>ministryofjustice/opg-ansible-roles
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: cloudfront
short_description: Create, delete and update actions for the main AWS
Cloudfront actions.
description:
- Read the AWS documentation for Cloudfront for the correct json values
as there are too many to list here.
- Creates distributions, streaming_distributions, invalidations,
origin_access_ids
- Deletes distributions, streaming_distributions, origin_access_ids
- Updates distributions, streaming_distributions
version_added: "2.2"
requirements: [ boto3 ]
options:
type:
description:
- specifies the resource to take action upon, streaming is
streaming_distribution
required: True
choices: [
'distribution',
'origin_access_id',
'invalidation',
'streaming',
]
default: None
policy:
description:
- The path to the properly json formatted policy file or a
properly formatted json policy as string, see
https://github.com/ansible/ansible/issues/7005#issuecomment-42894813
on how to use it properly
- Used for creation and updating of resources
required: false
default: None
resource_id:
description:
- Required when creating an invalidation against a distribution
- Required for removal of distributions and origin_access_ids
required: false
state:
description:
- present to ensure resource is created or updated.
- absent to remove resource
required: false
default: present
choices: [ "present", "absent"]
wait_for_deployed:
description:
- distributions and streaming_distributions need to be disabled
before you can remove them.
- Setting this to yes will allow this module to disable the
distribution on your behalf, wait
- until the status has changed to "Deployed" before removing your
distribution. This has a timeout of 15 mins which is
the recommended value from AWS.
- this setting can also be used ensure a distribution,
invalidation, origin_access_id is created or updated
required: false
choices: ['yes', 'no']
default: no
wait_for_retries:
description:
- used in conjunction with 'wait_for_deployed'. This value is multiplied
by the polling value of 30 seconds. The default wait_for_retries is 30
which gives a total of 15 mins of wait time.
required: false
default: 30
author: <NAME>(@Etherdaemon)
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple example of creating a new distribution with a json file
- name: Create a new distribution
cloudfront:
type: distribution
state: present
policy: "{{ role_path }}/files/distribution.json"
- name: Create a new distribution and wait for deployed
cloudfront:
type: distribution
state: present
wait_for_deployed: yes
policy: "{{ role_path }}/files/distribution.json"
- name: Create a new origin access identity
cloudfront:
type: origin_access_id
state: present
policy: "{{ role_path }}/files/origin_access.json"
#Disable and delete distribution
- name: Disable and wait for status to change to "Deployed" and delete
distribution
cloudfront:
type: distribution
state: absent
resource_id: EEFF123DDFF
wait_for_deployed: yes
- name: Create a new invalidation using policy template
cloudfront:
type: invalidation
resource_id: EEFF123DDFF
state: present
policy: " {{ lookup( 'template', 'invalidation.json.j2') }} "
'''
RETURN = '''
result:
description: The result of the create, delete or update action.
See http://boto3.readthedocs.io/en/latest/reference/services/cloudfront.html for all the relevant responses
returned: success
type: dictionary or a list of dictionaries
'''
try:
import json
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import time
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def get_existing_distributions(client, module):
distributions = client.list_distributions()
dist_configs = []
if 'Items' in distributions['DistributionList']:
for dist in distributions['DistributionList']['Items']:
temp_object = dict()
config = client.get_distribution_config(
Id=dist['Id'])
temp_object['Config'] = config['DistributionConfig']
temp_object['ETag'] = config['ETag']
temp_object['Id'] = dist['Id']
temp_object['DomainName'] = dist['DomainName']
dist_configs.append(temp_object)
return dist_configs
def get_existing_origin_access_id(client, module):
origin_access_ids = client.list_cloud_front_origin_access_identities()
origin_access_configs = []
if 'Items' in origin_access_ids['CloudFrontOriginAccessIdentityList']:
for origin in origin_access_ids['CloudFrontOriginAccessIdentityList']['Items']:
temp_object = dict()
config = client.get_cloud_front_origin_access_identity_config(
Id=origin['Id'])
temp_object['Config'] = config['CloudFrontOriginAccessIdentityConfig']
temp_object['ETag'] = config['ETag']
temp_object['Id'] = origin['Id']
origin_access_configs.append(temp_object)
return origin_access_configs
def get_existing_invalidations(client, module):
distribution_id = module.params.get('resource_id')
if not distribution_id:
module.fail_json(msg="distribution_id is required for invalidations")
invalidations = client.list_invalidations(DistributionId=distribution_id)
invalidation_configs = []
if 'Items' in invalidations['InvalidationList']:
for invalidation in invalidations['InvalidationList']['Items']:
temp_object = dict()
config = client.get_invalidation(
DistributionId=distribution_id, Id=invalidation['Id'])
temp_object['Config'] = config['Invalidation']['InvalidationBatch']
temp_object['ETag'] = ""
temp_object['Id'] = invalidation['Id']
invalidation_configs.append(temp_object)
return invalidation_configs
def get_existing_streaming_distributions(client, module):
distributions = client.list_streaming_distributions()
dist_configs = []
for dist in distributions['StreamingDistributionList']['Items']:
temp_object = dict()
config = client.get_streaming_distribution_config(
Id=dist['Id'])
temp_object['Config'] = config['StreamingDistributionConfig']
temp_object['ETag'] = config['ETag']
temp_object['Id'] = dist['Id']
temp_object['DomainName'] = dist['DomainName']
dist_configs.append(temp_object)
return dist_configs
def creation_setup(client, module):
policy = None
changed = False
if module.params.get('policy'):
try:
with open(module.params.get('policy')) as json_data:
try:
policy = json.load(json_data)
json_data.close()
except ValueError as e:
module.fail_json(msg=str(e))
except (OSError, IOError) as e:
try:
policy = json.loads(module.params.get('policy'))
except ValueError as e:
module.fail_json(msg=str(e))
except Exception as e:
module.fail_json(msg=str(e))
invocations = {
'distribution': {
'get_config_method': get_existing_distributions,
},
'origin_access_id': {
'get_config_method': get_existing_origin_access_id,
},
'invalidation': {
'get_config_method': get_existing_invalidations,
},
'streaming': {
'get_config_method': get_existing_streaming_distributions
}
}
if 'CallerReference' not in policy:
module.fail_json(msg='CallerReference is required in your policy')
else:
caller_reference = policy['CallerReference']
resource_type = invocations[module.params.get('type')]
existing = resource_type['get_config_method'](client, module)
caller_reference_exists = False
for item in existing:
if caller_reference == item['Config']['CallerReference']:
caller_reference_exists = True
if policy == item['Config']:
temp_results = item
results = return_resource_details(client, module, temp_results)
elif module.params.get('type') == "invalidation":
module.fail_json(msg="AWS does not support updating"
" invalidation configs, please"
" create a new invalidation"
" with your new config and"
" use a new caller reference id instead")
else:
changed, results = update(client, module, policy, item['Id'], item['ETag'])
if not caller_reference_exists:
changed, results = creation(client, module, policy)
return changed, results
def return_resource_details(client, module, temp_results):
params = dict()
invocations = {
'distribution': {
'get_resource_details': client.get_distribution,
},
'origin_access_id': {
'get_resource_details': client.get_cloud_front_origin_access_identity,
},
'invalidation': {
'get_resource_details': client.get_invalidation,
},
'streaming': {
'get_resource_details': client.get_streaming_distribution
}
}
if module.params.get('type') == "invalidation":
params['DistributionId'] = module.params.get('resource_id')
params['Id'] = temp_results['Id']
resource_type = invocations[module.params.get('type')]
resource_details = json.loads(json.dumps(resource_type['get_resource_details'](**params), default=date_handler))
return resource_details
def creation(client, module, policy):
changed = False
params = dict()
args = dict()
invocations = {
'distribution': {
'method': client.create_distribution,
'config_param': "DistributionConfig",
'result_key': "Distribution",
},
'origin_access_id': {
'method': client.create_cloud_front_origin_access_identity,
'config_param': "CloudFrontOriginAccessIdentityConfig",
'result_key': "CloudFrontOriginAccessIdentity",
},
'invalidation': {
'method': client.create_invalidation,
'config_param': "InvalidationBatch",
'result_key': "Invalidation",
},
'streaming': {
'method': client.create_streaming_distribution,
'config_param': "StreamingDistributionConfig",
'result_key': "StreamingDistribution",
},
}
resource_type = invocations[module.params.get('type')]
params[resource_type['config_param']] = policy
if module.params.get('type') == "invalidation":
params['DistributionId'] = module.params.get('resource_id')
args['DistributionId'] = module.params.get('resource_id')
invocation = resource_type['method']
result = json.loads(json.dumps(invocation(**params), default=date_handler))
if module.params.get('wait_for_deployed'):
args['Id'] = result[resource_type['result_key']]['Id']
status_achieved = wait_for_deployed_status(client, module, **args)
if not status_achieved:
module.fail_json(msg="Timed out waiting for the resource to finish"
" deploying, please check the AWS console for"
" the latest status.")
changed = True
return (changed, result)
def update(client, module, policy, resource_id, etag):
changed = False
params = dict()
invocations = {
'distribution': {
'method': client.update_distribution,
'config_param': "DistributionConfig"
},
'origin_access_id': {
'method': client.update_cloud_front_origin_access_identity,
'config_param': "CloudFrontOriginAccessIdentityConfig"
},
'streaming': {
'method': client.create_streaming_distribution,
'config_param': "StreamingDistributionConfig",
},
}
params[invocations[module.params.get('type')]['config_param']] = policy
params['Id'] = resource_id
params['IfMatch'] = etag
invocation = invocations[module.params.get('type')]['method']
result = json.loads(json.dumps(invocation(**params), default=date_handler))
if module.params.get('wait_for_deployed'):
args = dict()
args['Id'] = params['Id']
status_achieved = wait_for_deployed_status(client, module, **args)
if not status_achieved:
module.fail_json(msg="Timed out waiting for the resource to finish"
" deploying, please check the AWS console for"
" the latest status.")
changed = True
return (changed, result)
def disable_distribution(client, module):
params = dict()
invocations = {
'distribution': {
'method': client.get_distribution,
'config_param': "DistributionConfig",
'dist_key': 'Distribution',
},
'streaming': {
'method': client.get_streaming_distribution,
'config_param': "StreamingDistributionConfig",
'dist_key': 'StreamingDistribution',
},
}
resource_type = invocations[module.params.get('type')]
resource_id = module.params.get('resource_id')
params['Id'] = resource_id
get_result = resource_type['method'](**params)
if get_result[resource_type['dist_key']][resource_type['config_param']]['Enabled']:
new_policy = get_result[resource_type['dist_key']][resource_type['config_param']]
new_policy['Enabled'] = False
update(client, module, new_policy, resource_id, get_result['ETag'])
status_achieved = wait_for_deployed_status(client, module, **params)
if not status_achieved:
module.fail_json(msg="Timed out disabling the resource, please try again")
else:
updated_result = client.get_distribution_config(**params)
return status_achieved, updated_result
def wait_for_deployed_status(client, module, **args):
polling_increment_secs = 30
status_achieved = False
invocations = {
'distribution': {
'method': client.get_distribution,
'result_key': 'Distribution',
},
'origin_access_id': {
'method': client.get_cloud_front_origin_access_identity,
'result_key': "CloudFrontOriginAccessIdentity"
},
'invalidation': {
'method': client.get_invalidation,
'result_key': "Invalidation"
},
'streaming': {
'method': client.get_streaming_distribution,
'result_key': 'StreamingDistribution',
},
}
resource_type = invocations[module.params.get('type')]
for x in range(0, module.params.get('wait_for_retries')):
result = resource_type['method'](**args)[resource_type['result_key']]
current_status = result['Status']
if current_status in ('Deployed', 'Completed'):
status_achieved = True
break
time.sleep(polling_increment_secs)
return status_achieved
def removal_setup(client, module):
changed = False
invocations = {
'distribution': {
'method': client.get_distribution_config,
'config_param': "DistributionConfig"
},
'origin_access_id': {
'method': client.get_cloud_front_origin_access_identity_config,
'config_param': "CloudFrontOriginAccessIdentityConfig",
},
'streaming': {
'method': client.get_streaming_distribution_config,
'config_param': "StreamingDistributionConfig",
},
}
if module.params.get('type') == "invalidation":
module.fail_json(msg="Invalidations cannot be updated or removed")
elif not module.params.get('resource_id'):
module.fail_json(msg="resource_id is requried for removing a resource")
elif module.params.get('wait_for_deployed'):
status_achieved, config = disable_distribution(client, module)
else:
invocation = invocations[module.params.get('type')]['method']
config = invocation(Id=module.params.get('resource_id'))
if not config[invocations[module.params.get('type')]['config_param']]['Enabled']:
resource_id = module.params.get('resource_id')
etag = config['ETag']
changed, result = removal(client, module, resource_id, etag)
else:
module.fail_json(msg="Resource must be disabled before you can remove it from AWS")
return changed, result
def removal(client, module, resource_id, etag):
changed = False
params = dict()
invocations = {
'distribution': client.delete_distribution,
'origin_access_id': client.delete_cloud_front_origin_access_identity,
'streaming': client.delete_streaming_distribution
}
params['Id'] = resource_id
params['IfMatch'] = etag
invocation = invocations[module.params.get('type')]
result = json.loads(json.dumps(invocation(**params), default=date_handler))
changed = True
return (changed, result)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
type=dict(choices=[
'distribution',
'origin_access_id',
'invalidation',
'streaming',
], required=True),
policy=dict(),
resource_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
wait_for_deployed=dict(type='bool', default=False),
wait_for_retries=dict(type='int', default=30),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='json and botocore/boto3 is required.')
state = module.params.get('state').lower()
#Cloudfront is non-region specific - default global region to us-east-1/US Standard
module.params['region'] = 'us-east-1'
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
cloudfront = boto3_conn(module, conn_type='client', resource='cloudfront', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except Exception as e:
module.fail_json(msg="Can't authorize connection - "+str(e))
#Ensure resource is present
if state == 'present':
(changed, results) = creation_setup(cloudfront, module)
else:
(changed, results) = removal_setup(cloudfront, module)
module.exit_json(changed=changed, result=camel_dict_to_snake_dict(results))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
<reponame>utkarsh512/Ad-hominem-fallacies
# create a distance matrix between threads
from datetime import datetime
import os
from nltk import TreebankWordTokenizer
import unicodedata
import multiprocessing
import pickle
from multiprocessing import Manager
import math
from RedditThread import RedditThread
from SemanticSimilarityHelper import SemanticSimilarityHelper
from vocabulary import Vocabulary
from embeddings import WordEmbeddings
from AnnotatedRedditComment import AnnotatedRedditComment
def chunks(l: list, n: int):
"""
Split a list into evenly sized chunks
:param l: list
:param n: chunks
:return: list of lists
"""
return [l[i:i + n] for i in range(0, len(l), n)]
def compute_parallel(input_file_name: str, output_file_prefix: str):
# run parallel as jobs; command line parameter determines which part of the batch is processed
# 605 x 6 jobs; each 32 cores
if int(os.sys.argv[1]) not in range(0, 4):
raise Exception("You must specify a number between 0-5 as a command line parameter")
# which split of the data are we using
current_split_number = int(os.sys.argv[1])
manager = Manager()
with open(input_file_name, "rb") as f:
positive_instances_all, negative_instances = pickle.load(f)
# sort again to make sure the chunks are all the same
# positive_instances_all = sorted(positive_instances_all)
# negative_instances = sorted(negative_instances)
print(len(positive_instances_all))
positive_instances_by_ids = dict()
for _ in positive_instances_all:
assert isinstance(_, RedditThread)
positive_instances_by_ids[_.get_last_comment_name()] = _
assert len(positive_instances_all) == len(positive_instances_by_ids)
# split positive instances into chunks (330 each) for 4 jobs
# positive_instances_chunks = chunks(positive_instances_all, 330)
positive_instances_chunks = chunks(positive_instances_all, 425)
print(type(positive_instances_chunks))
print(len(positive_instances_chunks))
# make sure all chunks sum up to the number of instances
assert len(positive_instances_all) == sum([len(_) for _ in positive_instances_chunks])
# and that they are all unique
_ = set()
for c in positive_instances_chunks:
for cm in c:
_.add(cm)
assert len(_) == len(positive_instances_all)
# make sure there are no intersections between chunks
ids_of_chunks = set()
for i, chunk in enumerate(positive_instances_chunks):
for _ in chunk:
assert isinstance(_, RedditThread)
if _.get_last_comment_name() in ids_of_chunks:
raise Exception("%s from chunk %d already in other chunk!" % (_.get_last_comment_name(), i))
ids_of_chunks.add(_.get_last_comment_name())
assert len(positive_instances_all) == len(ids_of_chunks)
# and assign the current positive instances
positive_instances = positive_instances_chunks[current_split_number]
print("Length of current split positive instances", len(positive_instances))
# project all instances to average word embeddings and lengths
similarity_helper = SemanticSimilarityHelper()
positive_instances_emb_vectors = dict()
negative_instances_emb_vectors = dict()
positive_instances_lengths = dict()
negative_instances_lengths = dict()
for instance in positive_instances:
assert isinstance(instance, RedditThread)
# we ignore the last comment here (the actual AH)
positive_instances_emb_vectors[
instance.get_last_comment_name()] = similarity_helper.average_embeddings_vector_thread(instance, True)
positive_instances_lengths[instance.get_last_comment_name()] = sum([len(c.body) for c in instance.comments[:-1]])
for instance in negative_instances:
assert isinstance(instance, RedditThread)
negative_instances_emb_vectors[
instance.get_last_comment_name()] = similarity_helper.average_embeddings_vector_thread(instance)
negative_instances_lengths[instance.get_last_comment_name()] = sum([len(c.body) for c in instance.comments])
print("Pre-processing done, all average embeddings computed")
print("Positive instances", len(positive_instances_emb_vectors))
print("Negative instances", len(negative_instances_emb_vectors))
def do_job(job_id, _positive_instance_id, _negative_instances_keys, _result_dict):
# print("Started job", job_id)
for negative_instance_id in _negative_instances_keys:
distance = SemanticSimilarityHelper.distance_vec(
positive_instances_emb_vectors[_positive_instance_id],
negative_instances_emb_vectors[negative_instance_id],
positive_instances_lengths[_positive_instance_id],
negative_instances_lengths[negative_instance_id])
_result_dict[negative_instance_id] = distance
# we have positive_instances and negative_instances
positive_to_negative_distances = dict()
print("Need to compute %d distances" % len(positive_instances_emb_vectors))
for counter, positive_instance_id in enumerate(positive_instances_emb_vectors):
print("Counter: %d" % counter)
start = datetime.now()
temp_parallel_dict = manager.dict()
job_number = 8
# pool.map()
total = len(negative_instances_emb_vectors.keys())
chunk_size = int(math.ceil(total / job_number))
current_slice = chunks(list(negative_instances_emb_vectors.keys()), chunk_size)
# print(slice)
jobs = []
for i, negative_instances_keys in enumerate(current_slice):
j = multiprocessing.Process(target=do_job,
args=(
i, positive_instance_id, negative_instances_keys,
temp_parallel_dict))
jobs.append(j)
for j in jobs:
j.start()
for j in jobs:
j.join()
new_dict = dict(temp_parallel_dict)
positive_to_negative_distances[positive_instance_id] = new_dict
delta = datetime.now() - start
# print(d)
print("[%s] Computed all distances for %s (size: %d)" % (delta, positive_instance_id, len(new_dict)))
print("Current size of the resulting dict: %d" % len(positive_to_negative_distances))
# for key in positive_to_negative_distances:
# print(key)
# print(len(positive_to_negative_distances[key]))
output_file_name = "%s_%d.pkl" % (output_file_prefix, current_split_number)
with open(output_file_name, "wb") as f:
pickle.dump(positive_to_negative_distances, f)
f.close()
if __name__ == "__main__":
# this first one was for ah/non-ah sampling
# compute_parallel("ah-positive-negative-instances-all.pkl", "distance_dict")
# now for threads 3
# compute_parallel("threads-with-ah-threads-with-delta-context3.pkl",
# "threads-with-ah-threads-with-delta-context3-distances")
# now for threads 2
compute_parallel("threads-with-ah-threads-with-delta-context2.pkl",
"threads-with-ah-threads-with-delta-context2-distances")
|
<filename>files/management/commands/check_files.py
from django.core.management.base import BaseCommand, CommandError
from files.models import File,VCF
import os
from django.conf import settings
import time
from django.core.exceptions import ObjectDoesNotExist
import glob
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import datetime
import json
from subprocess import run
import gzip
class Command(BaseCommand):
help = 'Check Files'
def handle(self, *args, **options):
start_time = time.time()
print('Check Files')
files = File.objects.all()#.delete()
vcfs = []
for file in files:
if file.name.endswith('.vcf.gz'):
# print(file.name, file.location)
with gzip.open(file.location, 'rt') as f:
#file_content = f.read()
build = ''
count_line = 0
count_header = 0
rs_pos = {}
for line in f:
if line.startswith('#'):
count_header +=1
if 'Homo_sapiens_assembly18' in line:
build = 'hg18'
if 'hg19' in line:
build = 'hg19'
if 'human_g1k_v37' in line:
build = 'b37'
if line.startswith('##reference'):
reference = line
if 'hg19' in reference:
build = 'hg19'
if 'b37' in reference:
build = 'b37'
if 'GRCh38' in reference:
build = 'GRCh38'
if 'NCBI37' in reference:
build = 'NCBI37'
if 'ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/dna/' in reference:
build = 'GRCh37'
if 'GRCh37' in reference:
build = 'GRCh37'
if 'NCBI36' in reference:
build = 'NCBI36'
#print(reference)
if line.startswith('##commandline'):
pass
#print(line)
if 'hg19' in line:
build = 'hg19'
if 'b37' in line:
build = 'b37'
if line.startswith('##contig'):
if 'hg19' in line:
build = 'hg19'
if 'b37' in line:
build = 'b37'
if '##contig=<ID=chrM,length=16571>':
build = 'b37,chrM'
if line.startswith('#CHROM'):
n_samples = len(line.split('\t')[9:])
#print(line)
else:
count_line +=1
# row = line.split('\t')
# if len(row) > 2:
# #print(row)
# if row[2].startswith('rs'):
# rs_pos[row[2]] = '{}:{}'.format(row[0],row[1])
# if count_line >=1000:
# break
if build == '':
print('Could not find for ',count_header, file.location, file.name)
vcf = VCF(
file=file,
n_header=count_header,
n_variants=count_line,
build=build,
n_samples=n_samples
)
vcfs.append(vcf)
# print(file.name,file.location)
#move file for inspecting it
#command = 'mkdir -p /projects/wasabi/{}'.format(file.id)
#run(command,shell=True)
#command = 'rsync {} /projects/wasabi/{}/'.format(file.location,file.id,)
#run(command,shell=True)
# if file.location.endwith('.vcf'):
# command = 'bgzip {}'.format(file.location)
VCF.objects.bulk_create(vcfs)
elapsed_time = time.time() - start_time
print('Finished checking files, it took {}'.format(elapsed_time))
|
<gh_stars>1-10
#!/usr/bin/env python3
#
# Short description of the program/script's operation/function.
#
import sys
import argparse
import subprocess
import os
FILENAME = sys.argv[0]
class ArgumentParserUsage(argparse.ArgumentParser):
"""Argparse override to print usage to stderr on argument error."""
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help(sys.stderr)
sys.exit(2)
def usage(exit_code):
"""Print usage and exit depending on given exit code."""
if exit_code == 0:
pipe = sys.stdout
else:
# if argument was non-zero, print to STDERR instead
pipe = sys.stderr
parser.print_help(pipe)
sys.exit(exit_code)
def log_message(message, pipe=sys.stdout):
"""Log a message to a specific pipe (defaulting to stdout)."""
print(FILENAME + ": " + message, file=pipe)
def log(message):
"""If verbose, log an event."""
if not args.verbose:
return
log_message(message)
def error(message, exit_code=None):
"""Log an error. If given a 2nd argument, exit using it as the error
code."""
log_message("error: " + message, sys.stderr)
if exit_code:
sys.exit(exit_code)
def run_command(args):
"""Run a command, returning the output and a boolean value
indicating whether the command failed or not."""
was_successful = True
# execute using a shell so we can use piping & redirecting
proc = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
was_successful = False
return out.decode("utf-8").strip(), was_successful
def start_game(game_cmd, pre_cmd=None):
workspace_num = "9"
cmd_switch_workspace = 'i3-msg "workspace ' + workspace_num + '"'
cmd_start_game = "nohup %s &> /dev/null &" % game_cmd
if pre_cmd:
cmd_start_game = pre_cmd + " && " + cmd_start_game
run_command(pre_cmd)
log("Full game command: " + cmd_start_game)
run_command(cmd_switch_workspace)
run_command(cmd_start_game)
parser = ArgumentParserUsage(description="Description of the program's function (identical if you'd like).")
# add arguments
parser.add_argument("-v", "--verbose", help="be verbose",
action="store_true")
parser.add_argument("game", help="unique string of game to play")
# parse arguments
args = parser.parse_args()
game_commands = {
"higurashi": "wine ~/media/games/higurashi-when-they-cry/BGI.exe",
"tome4": "tome4",
"final-fantasy-japan": "retroarch -L /lib/libretro/nestopia_libretro.so ~/media/games/nes/Final\ Fantasy\ \(Japan\).nes",
"chrono-trigger": "retroarch -L /lib/libretro/snes9x_libretro.so ~/media/games/snes/Chrono\ Trigger\ \(USA\).sfc",
}
game = args.game
# get possible matches
matches = [k for k, v in game_commands.items() if k.startswith(game)]
if len(matches) > 1:
error("query '%s' not specific enough (more than 1 match)", 3)
elif len(matches) < 1:
error("no matching games for query '%s'" % game, 4)
else:
matched_game = matches[0]
log("Game matched: " + matched_game)
if type(game_commands[matched_game]) is list:
pre_cmd = game_commands[matched_game][0]
game_cmd = game_commands[matched_game][1]
log("Game pre command: " + pre_cmd)
else:
pre_cmd = None
game_cmd = game_commands[matched_game]
log("Game command: " + game_cmd)
start_game(game_cmd, pre_cmd)
|
<filename>src/pipeformer/internal/templates/codepipeline.py<gh_stars>1-10
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Logic for building the CodePipeline stack template."""
from collections import OrderedDict
from typing import Dict
from troposphere import GetAtt, Parameter, Ref, Tags, Template, cloudformation, codepipeline, iam, s3
from pipeformer.identifiers import VALUE_SEPARATOR
from pipeformer.internal.resolve import InputResolver
from pipeformer.internal.structures import Config, Pipeline
from pipeformer.internal.util import reference_name, resource_name
from . import codebuild as codebuild_template, project_tags
__all__ = ("build",)
_ACTION_TYPE_IDS = {
"GitHub": codepipeline.ActionTypeId(Category="Source", Owner="ThirdParty", Provider="GitHub", Version="1"),
"CodeBuild": codepipeline.ActionTypeId(Category="Build", Owner="AWS", Provider="CodeBuild", Version="1"),
"CloudFormation": codepipeline.ActionTypeId(Category="Deploy", Owner="AWS", Provider="CloudFormation", Version="1"),
}
_DEFAULT_PARAMETERS = (
Parameter(reference_name(resource_name(s3.Bucket, "Artifacts"), "Name"), Type="String"),
Parameter(reference_name(resource_name(s3.Bucket, "ProjectResources"), "Name"), Type="String"),
Parameter(reference_name(resource_name(iam.Role, "CodePipeline"), "Arn"), Type="String"),
Parameter(reference_name(resource_name(iam.Role, "CodeBuild"), "Arn"), Type="String"),
Parameter(reference_name(resource_name(iam.Role, "CloudFormation"), "Arn"), Type="String"),
)
def _action_configuration(action: InputResolver, stage_name: str, action_number: int) -> Dict[str, str]:
"""Compile a CloudFormation CodePipeline action configuration.
:param action: PipeFormer action definition
:param stage_name: Stage name
:param action_number: Action counter
:return: CloudFormation action configuration
"""
codebuild_output = reference_name(codebuild_template.project_name(action_number), "Name")
_action_type_default_configurations = { # pylint: disable=invalid-name
"GitHub": lambda: dict(PollForSourceChanges=True),
"CodeBuild": lambda: dict(ProjectName=GetAtt(_codebuild_stage_name(stage_name), f"Outputs.{codebuild_output}")),
"CloudFormation": lambda: dict(RoleArn=Ref(reference_name(resource_name(iam.Role, "CloudFormation"), "Arn"))),
}
config = _action_type_default_configurations.get(action.provider, lambda: {})()
# expand and re-cast configuration to resolve references
config.update(dict(**action.configuration))
return config
def _stage_action(stage_name: str, action_number: int, action: InputResolver) -> codepipeline.Actions:
"""Construct a CodePipeline action resource.
:param stage_name: Stage name
:param action_number: Action counter
:param action: PipeFormer action definition
:return: CloudFormation action definition
"""
try:
action_type_id = _ACTION_TYPE_IDS[action.provider]
except KeyError:
raise ValueError(
f'Unknown action provider "{action.provider}". Supported providers are: {list(_ACTION_TYPE_IDS.keys())!r}'
)
kwargs = dict(
Name=f"{stage_name}-{action_number}",
RunOrder=action.run_order,
ActionTypeId=action_type_id,
Configuration=_action_configuration(action, stage_name, action_number),
)
if action.inputs:
kwargs["InputArtifacts"] = [codepipeline.InputArtifacts(Name=name) for name in action.inputs]
if action.outputs:
kwargs["OutputArtifacts"] = [codepipeline.OutputArtifacts(Name=name) for name in action.outputs]
return codepipeline.Actions(**kwargs)
def _stage(stage: InputResolver) -> codepipeline.Stages:
"""Construct a CodePipeline stage resource.
:param stage: PipeFormer stage definition
:return: CloudFormation stage definition
"""
stage_actions = []
for pos, action in enumerate(stage.actions):
stage_actions.append(_stage_action(stage.name, pos, action))
return codepipeline.Stages(Name=stage.name, Actions=stage_actions)
def _url_reference(stage_name) -> str:
"""Build a stage stack template URL reference logical resource name.
:param stage_name: Stage name
:return: Logical resource name
"""
return reference_name(VALUE_SEPARATOR.join(("Template", "CodeBuild", "Stage", stage_name)), "Url")
def _codebuild_stage_name(stage_name) -> str:
"""Build a CodeBuild stage logical resource name.
:param stage_name: Stage name
:return: Logical resource name
"""
return resource_name(cloudformation.Stack, VALUE_SEPARATOR.join(("CodeBuild", "Stage", stage_name)))
def _stack(
project: Config, stage: InputResolver, stage_name: str, default_tags: Tags
) -> (cloudformation.Stack, Parameter):
"""Construct a nested CloudFormation stack template.
:param project: PipeFormer project
:param stage: Pipeline stage definition
:param stage_name: Stage name
:param default_tags: Default tags to add to resources
:return: Constructed stack template and a parameter to add to the parent template.
"""
# Add stack to template
parameters = {
name: Ref(name)
for name in (
reference_name(resource_name(s3.Bucket, "ProjectResources"), "Name"),
reference_name(resource_name(iam.Role, "CodeBuild"), "Arn"),
)
}
for name in stage.required_inputs:
parameters[project.inputs[name].reference_name()] = Ref(project.inputs[name].reference_name())
url_reference = _url_reference(stage_name)
return (
cloudformation.Stack(
_codebuild_stage_name(stage_name), TemplateURL=Ref(url_reference), Parameters=parameters, Tags=default_tags
),
Parameter(url_reference, Type="String"),
)
def build(project: Config) -> Pipeline:
"""Construct CodePipeline templates for a project.
:param project: PipeFormer project
:return: Constructed templates
"""
pipeline_template = Template(Description=f"CodePipeline resources for pipeformer-managed project: {project.name}")
# Add resource parameters
for param in _DEFAULT_PARAMETERS:
pipeline_template.add_parameter(param)
required_inputs = set()
default_tags = project_tags(project)
stage_templates = OrderedDict()
pipeline_stages = []
for stage_name, stage in project.pipeline.items():
stage_loader = InputResolver(wrapped=stage, inputs=project.inputs)
stage_resources_template = codebuild_template.build(project, stage_loader)
pipeline_stages.append(_stage(stage_loader))
stack_resource, stack_parameter = _stack(project, stage_loader, stage_name, default_tags)
required_inputs.update(stage_loader.required_inputs)
if stage_resources_template.resources:
pipeline_template.add_resource(stack_resource)
stage_templates[stage_name] = stage_resources_template
pipeline_template.add_parameter(stack_parameter)
# Add inputs parameters
for name in required_inputs:
pipeline_template.add_parameter(Parameter(project.inputs[name].reference_name(), Type="String"))
# Add pipeline resource
pipeline_resource = codepipeline.Pipeline(
resource_name(codepipeline.Pipeline, project.name),
ArtifactStore=codepipeline.ArtifactStore(
Type="S3", Location=Ref(reference_name(resource_name(s3.Bucket, "Artifacts"), "Name"))
),
RoleArn=Ref(reference_name(resource_name(iam.Role, "CodePipeline"), "Arn")),
Stages=pipeline_stages,
)
pipeline_template.add_resource(pipeline_resource)
return Pipeline(template=pipeline_template, stage_templates=stage_templates)
|
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the StringTypeInfo object"""
import os
import textwrap
import CommonEnvironment
from CommonEnvironment import Interface
from Plugins.SharedLibraryPluginImpl.TypeInfo import TypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@Interface.staticderived
class StringTypeInfo(TypeInfo):
# ----------------------------------------------------------------------
# |
# | Public Types
# |
# ----------------------------------------------------------------------
TypeName = Interface.DerivedProperty("string")
CppType = Interface.DerivedProperty("std::string")
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def __init__(
self,
*args,
member_type=None,
create_type_info_func=None,
**kwargs
):
if member_type is None:
return
super(StringTypeInfo, self).__init__(*args, **kwargs)
# ----------------------------------------------------------------------
@Interface.override
def GetInputInfo(self, arg_name, invocation_template):
if self.IsOptional:
validation = ""
invocation = invocation_template.format(
"{name} ? std::string({name}) : nonstd::optional<std::string>()".format(
name=arg_name,
),
)
else:
validation = """if({name} == nullptr) throw std::invalid_argument("'{name}' is null");""".format(
name=arg_name,
)
invocation = invocation_template.format("std::string({})".format(arg_name))
return self.Result(
[self.Type("char const *", arg_name)],
validation,
invocation,
)
# ----------------------------------------------------------------------
@Interface.override
def GetInputBufferInfo(
self,
arg_name,
invocation_template,
items_var_name=None,
):
parameters = [
self.Type("char const * const *", "{}_ptr".format(arg_name)),
]
if items_var_name is None:
items_var_name = "{}_items".format(arg_name)
parameters.append(self.Type("size_t", items_var_name))
if self.IsOptional:
buffer_type = "std::vector<nonstd::optional<std::string>>"
validation_suffix = textwrap.dedent(
"""\
{buffer_type} {name}_buffer;
{name}_buffer.reserve({items_var_name});
while({name}_buffer.size() < {items_var_name}) {{
{name}_buffer.emplace_back(*{name}_ptr ? *{name}_ptr : nonstd::optional<std::string>());
++{name}_ptr;
}}
""",
).format(
name=arg_name,
items_var_name=items_var_name,
buffer_type=buffer_type,
)
else:
# In theory, this could be `std::vector<char const *>`, however there is a lot of code that
# expects std::string-like functionality. This is an area of potential optimization in the future.
buffer_type = "std::vector<std::string>"
validation_suffix = textwrap.dedent(
"""\
{buffer_type} {name}_buffer;
{name}_buffer.reserve({items_var_name});
while({name}_buffer.size() < {items_var_name}) {{
{name}_buffer.emplace_back(*{name}_ptr);
++{name}_ptr;
}}
""",
).format(
name=arg_name,
items_var_name=items_var_name,
buffer_type=buffer_type,
)
return self.Result(
parameters,
textwrap.dedent(
"""\
if({name}_ptr == nullptr) throw std::invalid_argument("'{name}_ptr' is null");
if({items_var_name} == 0) throw std::invalid_argument("'{items_var_name}' is 0");
{validation_suffix}
""",
).format(
name=arg_name,
items_var_name=items_var_name,
validation_suffix="\n{}\n".format(
validation_suffix,
) if validation_suffix else "",
),
invocation_template.format(
"{name}_buffer.data(), {name}_buffer.size()".format(
name=arg_name,
),
),
input_buffer_type=self.Type(buffer_type, "{}_buffer".format(arg_name)),
)
# ----------------------------------------------------------------------
@Interface.override
def GetOutputInfo(
self,
arg_name,
result_name="result",
suppress_pointer=False,
):
if self.IsOptional:
statement = textwrap.dedent(
"""\
if(!{result_name}) {{
{pointer}{name} = nullptr;
}}
else {{
std::string const & {result_name}_temp(*{result_name});
char * string_buffer(new char[{result_name}_temp.size() + 1]);
std::copy({result_name}_temp.begin(), {result_name}_temp.end(), string_buffer);
string_buffer[{result_name}_temp.size()] = 0;
{pointer}{name} = string_buffer;
}}
""",
).format(
name=arg_name,
result_name=result_name,
pointer="" if suppress_pointer else "*",
)
else:
statement = textwrap.dedent(
"""\
if({result_name}.empty()) {{
{pointer}{name} = nullptr;
}}
else {{
char * string_buffer(new char[{result_name}.size() + 1]);
std::copy({result_name}.begin(), {result_name}.end(), string_buffer);
string_buffer[{result_name}.size()] = 0;
{pointer}{name} = string_buffer;
}}
""",
).format(
name=arg_name,
result_name=result_name,
pointer="" if suppress_pointer else "*",
)
return self.Result(
[self.Type("char const *{}".format("" if suppress_pointer else "*"), arg_name),],
textwrap.dedent(
"""\
if({name} == nullptr) throw std::invalid_argument("'{name}' is null");
""",
).format(
name=arg_name,
),
statement,
)
# ----------------------------------------------------------------------
@Interface.override
def GetDestroyOutputInfo(
self,
arg_name="result",
):
return self.Result(
[self.Type("char const *", arg_name)],
"",
textwrap.dedent(
"""\
if({name})
delete [] {name};
""",
).format(
name=arg_name,
),
)
|
#!/usr/bin/python
# coding=utf8
"""
================================================================================
LingSync-to-OLD Migrator
================================================================================
This is a command-line utility that migrates a LingSync corpus to an Online
Linguistic Database (OLD). Both the source LingSync corpus and the destination
OLD must be accessible at URLs (possibly local) via HTTP.
Warnings/disclaimers
--------------------------------------------------------------------------------
- DEPENDENCY: requires that the Python Requests library be installed. All other
imports are from the standard library.
- It is assumed that the destination OLD is empty. Migrating a LingSync corpus
to an OLD that already has data in it may result in errors or corrupted data.
- Some LingSync data points (entire documents or specific fields/attributes)
are purposefully not migrated. You will need to check the resulting OLD to
verify that the conversion is satisfactory.
Usage
--------------------------------------------------------------------------------
Just run `lingsync2old.py` and you will be prompted for the required arguments::
$ ./lingsync2old.py
You can also supploy the required arguments as options::
$ ./lingsync2old.py \
--ls-url=https://corpus.lingsync.org \
--ls-corpus=my-lingsync-corpus-name \
--ls-username=my-lingsync-username \
--ls-password=<PASSWORD> \
--old-url=my-old-url \
--old-username=my-old-username \
--old-password=<PASSWORD>
Full param/option listing:
--force-download: boolean that, when `True`, forces the downloading of the
LingSync/CouchDB data, even if we have already downloaded it. Default
is `False`.
--force-convert: boolean that, when `True`, forces the converting of the
LingSync JSON data to OLD JSON data, even if we have already converted
it. Default is `False`.
--force-file-download: boolean that, when `True`, forces the downloading of
a LingSync file (e.g., audio), even if we have already downloaded and
saved it.
--verbose: boolean that makes this script say more about what it's doing.
--ls-url: The LingSync CouchDB URL that we can make requests to for
extracting the LingSync data. Defaults to 'https://corpus.lingsync.org'.
--ls-corpus: The name of the LingSync corpus that we want to
migrate.
--ls-username: The username of a user who has sufficient privileges to
request the LingSync corpus' data from the CouchDB API.
--ls-password: The password corresponding to the LingSync
username.
--old-url: The OLD URL that we will upload the converted LingSync
data to.
--old-username: The username of a user on the destination OLD who
has sufficient privileges to make create, update and delete requests,
i.e., an admin or a contributor.
--old-password: The password corresponding to the OLD username.
Algorithm
--------------------------------------------------------------------------------
It's essentially a three-step algorithm:
1. Download. Request LingSync data as JSON using the CouchDB API (and save it
locally).
2. Convert. Build a JSON structure (from 1) that the OLD can digest (and save it
locally).
3. Upload. Use the output of (2) to send JSON/REST POST requests to the relevant
OLD web service.
Here is the general mapping from LingSync documents (or implicit entities) to
OLD resources:
LingSync OLD
tags => tags
users => users
speakers => speakers
files => files
datums => forms
datalists => corpora
sessions => collections
Questions
--------------------------------------------------------------------------------
1. Are there tags in LingSync sessions?
2. Are there files in LingSync sessions?
3. Should we fill in empty values with the values of other attributes. E.g., if
the morpheme_break value is empty, should the transcription value be copied
to it?
TODOs
--------------------------------------------------------------------------------
- large file (> 20MB) upload to OLD still not implemented.
- downloading LingSync image files still not implemented.
- make this script sensitive to OLD versions, and maybe to LingSync ones too.
"""
from fielddb_client import FieldDBClient
from old_client import OLDClient
import requests
import string
import json
import optparse
import getpass
import sys
import os
import shutil
import re
import pprint
import copy
import datetime
import urlparse
import base64
import mimetypes
import codecs
import random
p = pprint.pprint
# Temporary directories
LINGSYNC_DIR = '_ls2old_lingsyncjson'
OLD_DIR = '_ls2old_oldjson'
FILES_DIR = '_ls2old_files'
DEFAULT_PASSWORD = '<PASSWORD>'
FAKE_EMAIL = u'<EMAIL>'
# Any file over 20MB is considered "big".
BIG_FILE_SIZE = 20000000
# If we have more than 200MB of file data, this script considers that "big
# data".
BIG_DATA = 200000000
# ANSI escape sequences for formatting command-line output.
ANSI_HEADER = '\033[95m'
ANSI_OKBLUE = '\033[94m'
ANSI_OKGREEN = '\033[92m'
ANSI_WARNING = '\033[93m'
ANSI_FAIL = '\033[91m'
ANSI_ENDC = '\033[0m'
ANSI_BOLD = '\033[1m'
ANSI_UNDERLINE = '\033[4m'
migration_tag_name = None
# WARNING: this should be set to `False`. However, when debugging the script,
# setting it to `True` will prevent the accumulation of conversion warnings so
# you can focus on those that you want to.
QUIET = False
# This accumulates the lengths of the field values that overflow the maximum
# length allowed by the OLD. This gives the user a rough idea of how many
# values were too long and what their lengths were.
OVERFLOWS = set()
# Global used to accumulated original tags and the datum ids that reference
# them so that they can be fixed later ...
TAGSTOFIX = {}
def flush(string):
"""Print `string` immediately, and with no carriage return.
"""
print string,
sys.stdout.flush()
def download_lingsync_json(config_dict, database_name):
"""Download the LingSync data in `database_name` using the CouchDB API.
Save the returned JSON to a local file.
"""
c = FieldDBClient(config_dict)
# Login to the LingSync CouchDB.
couchdb_login_resp = c.login_couchdb()
try:
assert couchdb_login_resp['ok'] is True
print 'Logged in to CouchDB.'
except:
print 'Unable to log in to CouchDB.'
return None
# Get the JSON from CouchDB
flush('Downloading all documents from %s' % database_name)
all_docs = c.get_all_docs_list(database_name)
if type(all_docs) is type({}) and all_docs.get('error') == 'unauthorized':
print (u'%sUser %s is not authorized to access the LingSync corpus'
u' %s.%s' % (ANSI_FAIL, config_dict['admin_username'],
database_name, ANSI_ENDC))
return None
print 'Downloaded all documents from %s' % database_name
# Write the LingSync/CouchDB JSON to a local file
fname = get_lingsync_json_filename(database_name)
with open(fname, 'w') as outfile:
json.dump(all_docs, outfile)
print 'Wrote all documents JSON file to %s' % fname
return fname
def get_lingsync_json_filename(database_name):
"""Get the relative path to the file where the downloaded LingSync JSON are
saved for the LingSync corpus `database_name`.
"""
return os.path.join(LINGSYNC_DIR, '%s.json' % database_name)
def add_optparser_options(parser):
"""Add options to the optparser parser.
--ls-url: The LingSync CouchDB URL that we can make requests to for
extracting the LingSync data. Defaults to 'https://corpus.lingsync.org'.
--ls-corpus: The name of the LingSync corpus that we want to
migrate.
--ls-username: The username of a user who has sufficient privileges to
request the LingSync corpus' data from the CouchDB API.
--ls-password: The password corresponding to the LingSync
username.
--old-url: The OLD URL that we will upload the converted LingSync
data to.
--old-username: The username of a user on the destination OLD who
has sufficient privileges to make create, update and delete requests,
i.e., an admin or a contributor.
--old-password: The password corresponding to the OLD username.
--force-download: boolean that, when `True`, forces the downloading of the
LingSync/CouchDB data, even if we have already downloaded it. Default
is `False`.
--force-convert: boolean that, when `True`, forces the converting of the
LingSync JSON data to OLD JSON data, even if we have already converted
it. Default is `False`.
--force-file-download: boolean that, when `True`, forces the downloading of
a LingSync file (e.g., audio), even if we have already downloaded and
saved it.
--verbose: boolean that makes this script say more about what it's doing.
"""
parser.add_option("--ls-url", dest="ls_url",
default='https://corpus.lingsync.org', metavar="LS_URL",
help="The LingSync CouchDB URL that we can make requests to for"
" extracting the LingSync data. Defaults to"
" 'https://corpus.lingsync.org'.")
parser.add_option("--ls-corpus", dest="ls_corpus", metavar="LS_CORPUS",
help="The name of the LingSync corpus that we want to migrate.")
parser.add_option("--ls-username", dest="ls_username",
metavar="LS_USERNAME", help="The username of a user who has sufficient"
" privileges to request the LingSync corpus' data from the CouchDB API.")
parser.add_option("--ls-password", dest="ls_password",
metavar="LS_PASSWORD", help="The password corresponding to the LingSync"
" username.")
parser.add_option("--old-url", dest="old_url", metavar="OLD_URL",
help="The OLD URL that we will upload the converted LingSync data to.")
parser.add_option("--old-username", dest="old_username",
metavar="OLD_USERNAME", help="The username of a user on the destination"
" OLD who has sufficient privileges to make create, update and delete"
" requests, i.e., an admin or a contributor.")
parser.add_option("--old-password", dest="old_password",
metavar="OLD_PASSWORD", help="The password corresponding to the OLD"
" username.")
parser.add_option("-d", "--force-download", dest="force_download",
action="store_true", default=False, metavar="FORCEDOWNLOAD",
help="Use this option if you want to download the LingSync data,"
" even if it has already been downloaded.")
parser.add_option("-c", "--force-convert", dest="force_convert",
action="store_true", default=False, metavar="FORCECONVERT",
help="Use this option if you want to convert the LingSync data"
" to OLD format, even if it has already been converted.")
parser.add_option("-f", "--force-file-download", dest="force_file_download",
action="store_true", default=False, metavar="FORCEFILEDOWNLOAD",
help="Use this option if you want to download LingSync"
" audio/video/image files, even if they have already been"
" downloaded.")
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False, metavar="VERBOSE",
help="Make this script say more about what it's doing.")
################################################################################
# OLD resource schemata
################################################################################
# This holds dicts that contain default OLD resources. These are copied
# elsewhere in the script when OLD resources-as-dicts are created.
old_schemata = {
'corpus': {
'name': u'', # required, unique among corpus names, max 255 chars
'description': u'', # string description
'content': u'', # string containing form references
'tags': [], # OLD sends this as an array of objects (attributes: `id`, `name`) but receives it as an array of integer relational ids, all of which must be valid tag ids.
'form_search': None # OLD sends this as an object (attributes: `id`, `name`) but receives it as a relational integer id; must be a valid form search id.
},
'file': {
'description': u'', # A description of the file.
'utterance_type': u'', # If the file represents a recording of an # utterance, then a value here may be # appropriate; possible values accepted by the # OLD currently are 'None', 'Object Language # Utterance', 'Metalanguage Utterance', and # 'Mixed Utterance'.
'speaker': None, # A reference to the OLD speaker who was the # speaker of this file, if appropriate.
'elicitor': None, # A reference to the OLD user who elicited this # file, if appropriate.
'tags': [], # An array of OLD tags assigned to the file.
'forms': [], # An array of forms associated to this file.
'date_elicited': u'', # When this file was elicited, if appropriate.
'base64_encoded_file': u'', # `base64_encoded_file`: When creating a file, # this attribute may contain a base-64 encoded # string representation of the file data, so long # as the file size does not exceed 20MB.
'filename': u'', # the filename, cannot be empty, max 255 chars. # Note: the OLD will remove quotation marks and # replace spaces with underscores. Note also that # the OLD will not allow the file to be created # if the MIMEtype guessed on the basis of the # filename is different from that guessed on the # basis of the file data.
'name': u'', # the name of the file, max 255 chars; This value # is only valid when the file is created as a # subinterval-referencing file or as a file whose # file data are stored elsewhere, i.e., at the # provided URL.
'MIME_type': u'' # a string representing the MIME type.
},
'form': {
'transcription': u'', # = ValidOrthographicTranscription(max=510)
'phonetic_transcription': u'', # = ValidBroadPhoneticTranscription(max=510)
'narrow_phonetic_transcription': u'', # = ValidNarrowPhoneticTranscription(max=510)
'morpheme_break': u'', # = ValidMorphemeBreakTranscription(max=510)
'grammaticality': u'', # = ValidGrammaticality(if_empty='')
'morpheme_gloss': u'', # = UnicodeString(max=510)
'translations': [], # = ValidTranslations(not_empty=True)
'comments': u'', # = UnicodeString()
'speaker_comments': u'', # = UnicodeString()
'syntax': u'', # = UnicodeString(max=1023)
'semantics': u'', # = UnicodeString(max=1023)
'status': u'', # = OneOf(h.form_statuses)
'elicitation_method': None, # = ValidOLDModelObject(model_name='ElicitationMethod')
'syntactic_category': None, # = ValidOLDModelObject(model_name='SyntacticCategory')
'speaker': None, # = ValidOLDModelObject(model_name='Speaker')
'elicitor': None, # = ValidOLDModelObject(model_name='User')
'verifier': None, # = ValidOLDModelObject(model_name='User')
'source': None, # = ValidOLDModelObject(model_name='Source')
'tags': [], # = ForEach(ValidOLDModelObject(model_name='Tag'))
'files': [], # = ForEach(ValidOLDModelObject(model_name='File'))
'date_elicited': u'' # = DateConverter(month_style='mm/dd/yyyy')
},
'collection': {
'title': u'',
'type': u'',
'url': u'',
'description': u'',
'markup_language': u'',
'contents': u'',
'contents_unpacked': u'',
'speaker': None,
'source': None,
'elicitor': None,
'date_elicited': u'',
'tags': [],
'files': []
},
'user': {
'username': u'', # = UnicodeString(max=255)
'password': u'', # = UnicodeString(max=255)
'password_confirm': u'', # = UnicodeString(max=255)
'first_name': u'', # = UnicodeString(max=255, not_empty=True)
'last_name': u'', # = UnicodeString(max=255, not_empty=True)
'email': u'', # = Email(max=255, not_empty=True)
'affiliation': u'', # = UnicodeString(max=255)
'role': u'', # = OneOf(h.user_roles, not_empty=True)
'markup_language': u'', # = OneOf(h.markup_languages, if_empty='reStructuredText')
'page_content': u'', # = UnicodeString()
'input_orthography': None,
'output_orthography': None
},
'speaker': {
'first_name': u'', # = UnicodeString(max=255, not_empty=True)
'last_name': u'', # = UnicodeString(max=255, not_empty=True)
'dialect': u'', # = UnicodeString(max=255)
'page_content': u'', # = UnicodeString()
'markup_language': u'', # = OneOf(h.markup_languages, if_empty='reStructuredText')
},
'tag': {
'name': u'',
'description': u''
},
'applicationsettings': {
'id': None,
'object_language_name': u'', # 255 chrs max
'object_language_id': u'', # 3 chrs max, ISO 639-3 3-char Id code
'metalanguage_name': u'', # 255 chrs max
'metalanguage_id': u'', # 3 chrs max, ISO 639-3 3-char Id code
'metalanguage_inventory': u'', # long text; Don't think this is really used for any OLD-side logic.
'orthographic_validation': u'None', # one of 'None', 'Warning', or 'Error'
'narrow_phonetic_inventory': u'', # long text; should be comma-delimited graphemes
'narrow_phonetic_validation': u'None', # one of 'None', 'Warning', or 'Error'
'broad_phonetic_inventory': u'', # long text; should be comma-delimited graphemes
'broad_phonetic_validation': u'None', # one of 'None', 'Warning', or 'Error'
'morpheme_break_is_orthographic': False, # boolean
'morpheme_break_validation': u'None', # one of 'None', 'Warning', or 'Error'
'phonemic_inventory': u'', # long text; should be comma-delimited graphemes
'morpheme_delimiters': u'', # 255 chars max; should be COMMA-DELIMITED single chars...
'punctuation': u'', # long text; should be punctuation chars
'grammaticalities': u'', # 255 chars max ...
'storage_orthography': None, # id of an orthography
'input_orthography': None, # id of an orthography
'output_orthography': None, # id of an orthography
'unrestricted_users': [] # an array of users who are "unrestricted". In the OLD this is a m2m relation, I think.
}
}
def get_collection_for_lingsync_doc(doc):
"""A LingSync document is identified by its `collection` attribute, which is
valuated by a string like 'sessions', or 'datums'. Sometimes, however,
there is no `collection` attribute and the `fieldDBtype` attribute is
used and evaluates to a capitalized, singular analog, e.g., 'Session' or
'Datum'. This function returns a collection value for a LingSync document.
"""
type2collection = {
'Session': 'sessions',
'Corpus': 'private_corpuses', # or 'corpuses'?
'Datum': 'datums'
}
collection = doc.get('collection')
if not collection:
fieldDBtype = doc.get('fieldDBtype')
if fieldDBtype:
collection = type2collection.get(fieldDBtype)
return collection
def lingsync2old(fname, lingsync_db_name, force_file_download):
"""Convert the LingSync database (named `lingsync_db_name`, whose data are
stored in the JSON file `fname`) to an OLD-compatible JSON file. This is
the primary "convert" function that represents Step 2.
"""
# Maps names of OLD resources (pluralized) to lists of dicts, where each
# such dict is a valid payload for an OLD POST request.
old_data = {}
# Holds warning messages accrued via the transformation of LingSync data
# structures to OLD ones.
warnings = {}
# This holds all of the `language` values from the LingSync sessions that
# we process. Since the OLD assumes a single language, we will arbitrarily
# choose the first one when creating the OLD's application settings.
languages = set()
lingsync_data = json.load(open(fname))
try:
rows = lingsync_data['rows']
except KeyError:
p(lingsync_data)
sys.exit(u'%sUnable to load LingSync data. Aborting.%s' % (ANSI_FAIL,
ANSI_ENDC))
# - LingSync sessions are turned into OLD collections.
# - LingSync datums are turned into OLD forms.
# - LingSync corpuses are not used.
# - LingSync private_corpuses are not used.
# - LingSync users are turned into OLD users.
# - LingSync datalists are turned into OLD corpora.
# - LingSync documents with no `collection` value are logic, not data; i.e.,
# mapreduces or something else.
# Note: we don't necessarily need to loop through all rows for each
# collection type. We may need to process the sessions first, because the
# datums refer to them. However, it seems that every datum redundantly
# holds a copy of its session anyway, so this may not be necessary.
# LS-Session to OLD-Collection.
# Deal with LingSync sessions first, since they contain data that will
# be needed for datums-come-forms later on.
# if r.get('doc', {}).get('collection') == 'sessions':
for r in rows:
if get_collection_for_lingsync_doc(r.get('doc', {})) == 'sessions':
old_object = process_lingsync_session(r['doc'])
if old_object:
old_data, warnings = update_state(old_object, old_data,
warnings)
# Add any language extracted from the session.
if old_object.get('language'):
languages.add(old_object['language'])
# LS-Datum to OLD-Form.
for r in rows:
if get_collection_for_lingsync_doc(r.get('doc', {})) == 'datums':
old_object = process_lingsync_datum(r['doc'],
old_data['collections'], lingsync_db_name)
if old_object:
old_data, warnings = update_state(
old_object, old_data, warnings)
# Note: LingSync corpus and private_corpus documents don't appear to
# contain any data that need to be migrated to the OLD. They contain
# metadata about the corpus, including licensing information and basic info
# about what datum and session fields to expect.
# Uncomment the following block to inspect the corpus/private_corpus
# documents in the JSON dump being analyzed.
# LS-User to OLD-User
for r in rows:
if get_collection_for_lingsync_doc(r.get('doc', {})) == 'users':
old_object = process_lingsync_user(r['doc'])
old_data, warnings = update_state(old_object, old_data, warnings)
# LS-Datalist to OLD-Corpus
for r in rows:
if get_collection_for_lingsync_doc(r.get('doc', {})) == 'datalists':
old_object = process_lingsync_datalist(r['doc'])
old_data, warnings = update_state(old_object, old_data, warnings)
# Merge/consolidate duplicate users, speakers and tags.
old_data, warnings = consolidate_resources(old_data, warnings)
# Get an OLD application settings, using the language(s) and
# grammaticalities extracted from the LingSync corpus.
old_application_settings, warnings = get_old_application_settings(old_data,
languages, warnings)
old_data['applicationsettings'] = [old_application_settings]
# Download audio, video or image files from the LingSync application, if
# necessary.
old_data, warnings, exit_status = download_lingsync_media_files(old_data,
warnings, lingsync_db_name, force_file_download)
if exit_status == 'aborted':
print ('You chose not to migrate audio/video/image files from LingSync'
' to OLD because they were too large.')
# Tell the user what we've accomplished.
print_summary(lingsync_db_name, rows, old_data, warnings)
# Save our OLD data to a JSON file in OLD_DIR/
old_data_fname = write_old_data_to_disk(old_data, lingsync_db_name)
return old_data_fname
def create_files_directory_safely(lingsync_db_name):
"""Create a directory to hold the LingSync media files, only if it doesn't
already exist.
"""
dirpath = os.path.join(FILES_DIR, lingsync_db_name)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return dirpath
def human_bytes(num_bytes):
"""Return an integer byte count in human-readable form.
"""
if num_bytes is None:
return 'File size unavailable.'
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
if num_bytes > YiB:
return '%.3g YiB' % (num_bytes / YiB)
elif num_bytes > ZiB:
return '%.3g ZiB' % (num_bytes / ZiB)
elif num_bytes > EiB:
return '%.3g EiB' % (num_bytes / EiB)
elif num_bytes > PiB:
return '%.3g PiB' % (num_bytes / PiB)
elif num_bytes > TiB:
return '%.3g TiB' % (num_bytes / TiB)
elif num_bytes > GiB:
return '%.3g GiB' % (num_bytes / GiB)
elif num_bytes > MiB:
return '%.3g MiB' % (num_bytes / MiB)
elif num_bytes > KiB:
return '%.3g KiB' % (num_bytes / KiB)
else:
return '%d bytes' % num_bytes
def download_lingsync_media_files(old_data, warnings, lingsync_db_name, force_file_download):
"""If `old_data` contains OLD file resources generated from LingSync files,
then we need to download their file data and save them for later upload to
the OLD.
"""
if len(old_data.get('files', [])) == 0:
return (old_data, warnings, 'ok')
files = old_data['files']
file_count = len(files)
file_sizes = filter(None,
[f.get('__lingsync_file_size') for f in files])
total_files_size = sum(file_sizes)
total_files_size_human = human_bytes(total_files_size)
big_file_size_human = human_bytes(BIG_FILE_SIZE)
big_files = [s for s in file_sizes if s > BIG_FILE_SIZE]
we_have_big_files = bool(big_files)
we_have_big_data = total_files_size > BIG_DATA
if we_have_big_files or we_have_big_data:
if we_have_big_files and we_have_big_data:
msg = (u'Your LingSync corpus contains at least %s worth of'
u' (audio/video/image) file data, including at least one'
u' file bigger than %s.' % (total_files_size_human,
big_file_size_human))
elif we_have_big_files:
msg = (u'Your LingSync corpus contains audio/video/image files,'
u' some of which are bigger than %s.' % (
big_file_size_human,))
elif we_have_big_data:
msg = (u'Your LingSync corpus contains at least %s worth of'
u' (audio/video/image) file data.' % (
total_files_size_human,))
response = raw_input(u'%s%s Enter \'y\'/\'Y\' if you want this'
u' script to download all of those files from LingSync and'
u' migrate them to your OLD. Enter \'n\'/\'N\' (or anything'
u' else) to skip the migrating of files:%s ' % (ANSI_WARNING,
msg, ANSI_ENDC))
if response not in ['y', 'Y']:
warnings['general'].add(u'You have lots of file data (i.e.,'
u' audio, video, or images) in your LingSync corpus and you'
u' chose not to migrate them using this script.')
old_data['files'] = []
return (old_data, warnings, 'aborted')
dirpath = create_files_directory_safely(lingsync_db_name)
downloaded_files = []
for file in old_data['files']:
url = file.get('__lingsync_file_url')
fname = file.get('filename')
fsize = file.get('__lingsync_file_size')
if not fname:
try:
fname = os.path.split(url)[1]
except:
fname = None
if url and fname:
filepath = os.path.join(dirpath, fname)
outcome, warnings = download_lingsync_file(url, filepath,
fsize, warnings, force_file_download)
if outcome:
file['__local_file_path'] = filepath
downloaded_files.append(file)
else:
warnings['general'].add(u'We were unable to download the'
u' file data for a file associated to LingSync datum'
u' %s; download and/or local write failed.' % (
file['__lingsync_datum_id'],))
else:
warnings['general'].add(u'We were unable to download the file'
u' data for a file associated to LingSync datum %s; URL or'
u' filename was not retrievable.' % (
file['__lingsync_datum_id'],))
old_data['files'] = downloaded_files
return (old_data, warnings, 'ok')
def download_lingsync_file(url, filepath, fsize, warnings, force_file_download):
"""Download a LingSync file at `url` save it to `filepath`.
"""
if os.path.isfile(filepath) and (not force_file_download):
return (True, warnings)
file_is_big = False
if fsize and fsize > BIG_FILE_SIZE:
file_is_big = True
with open(filepath, 'wb') as handle:
response = requests.get(url, stream=file_is_big, verify=False)
if not response.ok:
warnings['general'].add(u'Attempt to download LingSync file at %s'
u' failed.' % (url,))
return (False, warnings)
if file_is_big:
for block in response.iter_content(1024):
handle.write(block)
else:
handle.write(response.content)
if os.path.isfile(filepath):
return (True, warnings)
else:
return (False, warnings)
def get_old_application_settings(old_data, languages, warnings):
"""Return an OLD application settings dict, given a set of (object)
language names and the grammaticalities (in the forms in `old_data`).
"""
appset = copy.deepcopy(old_schemata['applicationsettings'])
if languages:
languages = list(languages)
language = languages[0]
appset['object_language_name'] = language
if len(languages) > 1:
warnings['general'].add(u'Arbitrarily chose \u2018%s\u2019 as the'
u' OLD object language when the following languages were listed'
u' in the LingSync corpus: \u2018%s\u2019.' % (language,
u'\u2019, \u2018'.join(languages)))
grammaticalities = set()
for form in old_data.get('forms'):
grammaticalities.add(form.get('grammaticality', u''))
grammaticalities = u','.join([g for g in list(grammaticalities) if g])
appset['grammaticalities'] = grammaticalities
return (appset, warnings)
def consolidate_users(duplicates):
"""Given an array of duplicate user objects `duplicates`, return a single
(consolidated) user and an array of warnings, if applicable.
"""
return_user = {'username': duplicates[0]['username']}
user_warnings = []
for attr in duplicates[0]:
if attr != 'username':
vals = list(set([u[attr] for u in duplicates if u[attr]]))
try:
new_val = vals[0]
except:
new_val = u''
if len(vals) > 1:
user_warnings.append(u'Lost data when consolidating users: we'
u' chose \u2018%s\u2019 as the val for \u2018%s\u2019 and'
u' the following values were discarded: \u2018%s\u2019.' % (
new_val, attr, u'\u2019, \u2018'.join(vals[1:])))
return_user[attr] = new_val
return (return_user, user_warnings)
def consolidate_speakers(duplicates):
"""Given an array of duplicate speaker objects `duplicates`, return a single
(consolidated) speaker and an array of warnings, if applicable.
"""
return_speaker = copy.deepcopy(old_schemata['speaker'])
speaker_warnings = []
for attr in return_speaker:
if attr in ['first_name', 'last_name']:
return_speaker[attr] = duplicates[0][attr]
else:
vals = list(set([s[attr] for s in duplicates if s[attr]]))
try:
new_val = vals[0]
except:
new_val = u''
if len(vals) > 1:
speaker_warnings.append(u'Lost data when consolidating'
u' speakers: we chose \u2018%s\u2019 as the val for'
u' \u2018%s\u2019 and the following values were discarded:'
u' \u2018%s\u2019.' % (new_val, attr,
u'\u2019, \u2018'.join(vals[1:])))
return_speaker[attr] = new_val
return (return_speaker, speaker_warnings)
def consolidate_resources(old_data, warnings):
"""Look for duplicate users, speakers and tags in `old_data` and merge the
duplicates into a single resource of the relevant type.
"""
# Consolidate users.
# If multiple user objects have the same `username` value, we merge them
# into one user.
if len(old_data.get('users', [])) > 1:
users = old_data['users']
consolidated_users = []
consolidate_users_warnings = []
processed = []
for user in users:
if user not in processed:
username = user['username']
duplicates = [u for u in users if u['username'] == username]
processed += duplicates
if len(duplicates) > 1:
new_user, user_warnings = consolidate_users(duplicates)
consolidate_users_warnings += user_warnings
consolidated_users.append(new_user)
else:
consolidated_users.append(user)
old_data['users'] = consolidated_users
for warning in consolidate_users_warnings:
warnings['general'].add(warning)
# Consolidate speakers
# If multiple speaker objects have the same `first_name` and `last_name`
# values, we merge them into one user.
if len(old_data.get('speakers', [])) > 1:
speakers = old_data['speakers']
consolidated_speakers = []
consolidate_speakers_warnings = []
processed = []
for speaker in speakers:
if speaker not in processed:
first_name = speaker['first_name']
last_name = speaker['last_name']
duplicates = [u for u in speakers if
u['first_name'] == first_name and
u['last_name'] == last_name]
processed += duplicates
if len(duplicates) > 1:
new_speaker, speaker_warnings = consolidate_speakers(
duplicates)
consolidate_speakers_warnings += speaker_warnings
consolidated_speakers.append(new_speaker)
else:
consolidated_speakers.append(speaker)
old_data['speakers'] = consolidated_speakers
for warning in consolidate_speakers_warnings:
warnings['general'].add(warning)
# Consolidate tags
# If multiple tag objects have the same `name` values, we merge them into
# one user.
if len(old_data.get('tags', [])) > 1:
tags = old_data['tags']
consolidated_tags = []
consolidate_tags_warnings = []
processed = []
for tag in tags:
if tag not in processed:
name = tag['name']
description = tag['description']
duplicates = [t for t in tags if t['name'] == name]
processed += duplicates
if len(duplicates) > 1:
new_tag = tag
new_description = u'\n\n'.join([t['description'] for t in
duplicates if t['description']])
new_tag['description'] = description
if new_description != description:
consolidate_tags_warnings.append(u'Changed description'
u' of tag \u2018%s\u2019 from \u2018%s\u2019 to'
u' \u2018%s\u2019' % (name, description,
new_description))
consolidated_tags.append(new_tag)
else:
consolidated_tags.append(tag)
old_data['tags'] = consolidated_tags
for warning in consolidate_tags_warnings:
warnings['general'].add(warning)
return old_data, warnings
def get_old_json_filename(database_name):
"""Return the relative path where we store the JSON file that holds the
LingSync data in a format that the OLD can ingest.
"""
return os.path.join(OLD_DIR, '%s.json' % database_name)
def write_old_data_to_disk(old_data, database_name):
"""Save the OLD data extracted from the LingSync corpuse to a JSON file so
we don't need to re-migrate/convert it every time.
"""
fname = get_old_json_filename(database_name)
with open(fname, 'w') as outfile:
json.dump(old_data, outfile, indent=4)
return fname
def get_lingsync_corpus_summary(rows):
"""Return a string summarizing the LingSync documents that we downloaded.
"""
collections = {}
summary = [u'\nLingSync documents downloaded.']
for r in rows:
collection = get_collection_for_lingsync_doc(r.get('doc', {}))
if collection is None:
collection = u'NOT DATA'
collections.setdefault(collection, 0)
collections[collection] += 1
for c in sorted(collections.keys()):
collection_count = collections[c]
summary.append(u' %s: %d' % (c, collection_count))
return u'\n'.join(summary)
def get_summary_of_old_data(old_data):
"""Return a string summarizing the OLD resources that will be created.
"""
summary = [u'\nOLD resources to be created.']
for resource_name in sorted(old_data.keys()):
resource_list = old_data[resource_name]
summary.append(u' %s: %d' % (resource_name, len(resource_list)))
return u'\n'.join(summary)
def print_summary(lingsync_db_name, rows, old_data, warnings):
"""Print a summary of the OLD data and warnings generated.
Also save to disk the summaries of downloaded LingSync data and converted
OLD data. We save these so that the --verbose option can work consistently.
"""
lingsync_summary = get_lingsync_corpus_summary(rows)
path = os.path.join(LINGSYNC_DIR, '%s-summary.txt' % lingsync_db_name)
with codecs.open(path, mode='w', encoding='utf-8') as f:
f.write(lingsync_summary)
print lingsync_summary
old_summary = get_summary_of_old_data(old_data)
path = os.path.join(OLD_DIR, '%s-summary.txt' % lingsync_db_name)
with codecs.open(path, mode='w', encoding='utf-8') as f:
f.write(old_summary)
print old_summary
warnings_text = []
if warnings:
warnings_count = 0
for warning_locus, warnings_set in warnings.iteritems():
warnings_count += len(warnings_set)
if warnings_count == 1:
warnings_text.append(u'\n%s%d Conversion Warning.%s' % (ANSI_WARNING,
warnings_count, ANSI_ENDC))
else:
warnings_text.append(u'\n%s%d Conversion Warnings.%s' % (
ANSI_WARNING, warnings_count, ANSI_ENDC))
index = 0
if warnings.get('general'):
warnings_text.append(u'\n General warnings:')
for warning in warnings['general']:
index += 1
warnings_text.append(u' %d. %s' % (index, warning))
for warning_locus in sorted(warnings.keys()):
if warning_locus != 'general':
warnings_set = warnings[warning_locus]
warnings_text.append(u'\n Warning(s) for %s:' % warning_locus)
for warning in sorted(warnings_set):
index += 1
warnings_text.append(u' %d. %s' % (index, warning))
else:
warnings_text.append(u'\nNo warnings.')
warnings_text = u'\n'.join(warnings_text)
path = os.path.join(OLD_DIR, '%s-conversion-warnings.txt' %
lingsync_db_name)
with codecs.open(path, mode='w', encoding='utf-8') as f:
f.write(warnings_text)
print warnings_text
def update_state(old_object, old_data, warnings):
"""Update `old_data` and `warnings` with the contents of `old_object`,
where `old_object` is the OLD resource-as-object/dict that was derived from
a LingSync document.
"""
# Add our primary "old_resource" `old_data`
if old_object['old_resource']:
key = old_object['old_resource']
val = old_object['old_value']
old_data.setdefault(key, [])
if val not in old_data[key]:
old_data[key].append(val)
# Add any auxiliary resources to `old_data`
if len(old_object['old_auxiliary_resources']) > 0:
for rname, rlist in old_object['old_auxiliary_resources'].items():
existing = old_data.setdefault(rname, [])
for resource in rlist:
if resource not in existing:
existing.append(resource)
# Add any gathered warnings to accrued warnings
if old_object['warnings']['docspecific']:
warnings_key = u'OLD %s resource generated from LingSync %s %s' % (
old_object['old_resource'], old_object['lingsync_type'],
old_object['originaldoc']['_id'])
warnings.setdefault(warnings_key, set())
for warning in old_object['warnings']['docspecific']:
warnings[warnings_key].add(warning)
if old_object['warnings']['general']:
warnings.setdefault('general', set())
for warning in old_object['warnings']['general']:
warnings['general'].add(warning)
return (old_data, warnings)
def timestamp2human(timestamp):
"""Return a timestamp in a "human-readable" format.
"""
try:
return datetime.datetime\
.fromtimestamp(int(timestamp) * 0.001).strftime("%Y-%m-%d %H:%M")
except:
return None
def process_lingsync_comments_val(ls_comments, warnings):
"""Process the value of a LingSync datum comments attribute or comments
datum field.
"""
comments_to_return = []
if ls_comments and (type(ls_comments) is type([])):
for comment_obj in ls_comments:
if type(comment_obj) is type({}) and comment_obj.get('text'):
author = u''
if comment_obj.get('username'):
author = u' by %s' % comment_obj['username']
created = u''
if comment_obj.get('dateCreated'):
human_date_created = timestamp2human(
comment_obj['dateCreated'])
if human_date_created:
created = u' on %s' % human_date_created
else:
print u'WARNING: unable to parse timestamp %s' % (
comment_obj['dateCreated'],)
modified = u''
if comment_obj.get('timestampModified'):
human_date_modified = timestamp2human(
comment_obj['timestampModified'])
if human_date_modified:
modified = u' (last modified %s)' % human_date_modified
else:
print u'WARNING: unable to parse timestamp %s' % (
comment_obj['timestampModified'],)
comment_ = u'Comment %s%s%s: %s' % (author, created, modified,
punctuate_period_safe(comment_obj['text']))
comments_to_return.append(comment_)
else:
# An comment whose text value is just an empty string is just
# ignored.
if comment_obj.get('text').strip() != u'':
warnings['docspecific'].append(u'Unable to process the'
' following comment: \u2018%s\u2019' % (
unicode(comment_obj)))
else:
if (type(ls_comments) is type('')) or (type(ls_comments) is type(u'')):
if ls_comments.strip():
comments_to_return.append(u'Comment: %s' % (
punctuate_period_safe(ls_comments),))
return (comments_to_return, warnings)
def process_lingsync_datalist(doc):
"""Convert a LingSync datalist document to an OLD corpus dict.
QUESTIONS:
1. Can a datalist have tags?
2. What does a datalist's `audioVideo` value look like?
"""
# Pretty-print the user document, for inspection.
# p(doc)
datalist_id = doc.get('_id')
auxiliary_resources = {}
# These are the LingSync datalist attrs that we know how to deal with for
# to-OLD conversion.
known_attrs = [
'_id', # u'0d69a355b63fa165273111aa739802c1',
'_rev', # u'1-285126da331e5b2c7df7552599de2960',
'audioVideo', # [],
'collection', # u'datalists',
'comments', # [],
'dateCreated', # u'"2014-11-10T02:29:25.168Z"',
'dateModified', # u'"2014-11-10T02:29:25.309Z"',
'datumIds', # [u'a8b939f86a76109b121100e944b6a758', ...],
'description', # u'This is the result of searching for : morphemes:#nit- In Blackfoot on Sun Nov 09 2014 18:29:24 GMT-0800 (PST)',
'pouchname',
'timestamp', # 1415586565309,
'title', # u'All Data as of Sun Nov 09 2014 18:29:25 GMT-0800 (PST)'}
'trashed', # ignoring this attribute; only value I've seen is "deleted1428640331225" ...
'dbname' # Same as pouchname above, it's the name of the LingSync corpus.
]
# Add warnings to this.
warnings = {
'general': [],
'docspecific': []
}
for k in doc:
if k not in known_attrs:
warnings['docspecific'].append(u'\u2018%s\u2019 not a recognized'
u' attribute in datalist %s' % (k, datalist_id))
# This will be our return value.
oldobj = {
'originaldoc': doc,
'lingsync_type': 'datalist',
'old_resource': 'corpora',
'old_value': {}, # Valuate this with `old_corpus`
'old_auxiliary_resources': {}, # Valuate this with `auxiliary_resources`
'warnings': []
}
# This dict will be used to create the OLD corpus.
old_corpus = copy.deepcopy(old_schemata['corpus'])
# Description.
old_description = []
# Datalist description -> corpus description
ls_description = doc.get('description')
if ls_description:
old_description.append(ls_description)
# Datalist metadata (id, timestamps, etc) -> corpus description
datalist_metadata = []
datalist_metadata.append(u'This corpus was generated from LingSync datalist'
u' %s.' % datalist_id)
ls_dateCreated = doc.get('dateCreated')
if ls_dateCreated:
datalist_metadata.append(u'It was created in LingSync on %s.' % (
ls_dateCreated,))
ls_dateModified = doc.get('dateModified')
if ls_dateModified:
datalist_metadata.append(u'It was last modified in LingSync on %s.' % (
ls_dateModified,))
datalist_metadata = u' '.join(datalist_metadata)
old_description.append(datalist_metadata)
# Datalist comments -> corpus description
ls_comments = doc.get('comments')
if ls_comments:
processed_comments, warnings = process_lingsync_comments_val(
ls_comments, warnings)
if processed_comments:
old_description += processed_comments
old_description = u'\n\n'.join(old_description).strip()
if old_description:
old_corpus['description'] = old_description
# Name. Not empty, max 255 chars. From LingSync datalist title.
title_too_long = False
ls_title = doc.get('title')
if not ls_title:
old_name = u'Corpus from LingSync datalist %s' % datalist_id
warnings['docspecific'].append(u'Datalist %s has no title value; the corpus generated'
' from it has "%s" as its name value.' % (datalist_id, old_name))
elif len(ls_title) > 255:
title_too_long = True
warnings['docspecific'].append('The title "%s" of datalist %s is too long and will be'
' truncated.' % (ls_title, datalist_id))
old_name = ls_title[:255]
else:
old_name = ls_title
old_corpus['name'] = old_name
old_corpus['__lingsync_datalist_id'] = datalist_id
old_corpus['__lingsync_datalist_datum_ids'] = doc.get('datumIds', [])
oldobj['old_value'] = old_corpus
oldobj['old_auxiliary_resources'] = auxiliary_resources
oldobj['warnings'] = warnings
return oldobj
def my_strip(thing):
"""Safely strip `thing`.
"""
try:
return thing.strip()
except:
return thing
def process_lingsync_user(doc):
"""Convert a LingSync user document to an OLD user dict.
"""
# Pretty-print the user document, for inspection.
# p(doc)
user_id = doc.get('_id')
auxiliary_resources = {}
# These are the LingSync user attrs that we know how to deal with for
# to-OLD conversion.
known_attrs = [
'_id',
'_rev',
'authUrl',
'collection',
'gravatar',
'id',
'username',
'firstname',
'lastname',
'description',
'markAsNeedsToBeSaved',
'researchInterest',
'email',
'subtitle',
'affiliation',
# Ignoring the following:
'api',
'fieldDBtype',
'version',
# New from weisskircherisch. Ignored.
'appbrand',
'corpora',
'dateCreated'
]
# Add warnings to this.
warnings = {
'general': [],
'docspecific': []
}
for k in doc:
if k not in known_attrs:
warnings['docspecific'].append(u'\u2018%s\u2019 not a recognized'
u' attribute in user %s' % (k, user_id))
# This will be our return value.
oldobj = {
'originaldoc': doc,
'lingsync_type': 'user',
'old_resource': 'users',
'old_value': {}, # Valuate this with `old_user`
'old_auxiliary_resources': {}, # Valuate this with `auxiliary_resources`
'warnings': []
}
# This dict will be used to create the OLD user.
old_user = copy.deepcopy(old_schemata['user'])
# Attested value: "lingsync". Ignoring.
ls_appbrand = doc.get('appbrand')
# if ls_appbrand:
# print 'User has appbrand: %s' % ls_appbrand
# Attested value: array of corpus objects. Ignoring.
ls_corpora = doc.get('corpora')
# if ls_corpora:
# print 'User has corpora: %s' % ls_corpora
# Attested value: Unix timestamp, assumedly when user was created. Ignoring.
ls_dateCreated = doc.get('dateCreated')
# if ls_dateCreated:
# print 'User has dateCreated: %s' % ls_dateCreated
ls_username = my_strip(doc.get('username'))
ls_firstname = my_strip(doc.get('firstname'))
ls_lastname = my_strip(doc.get('lastname'))
ls_description = my_strip(doc.get('description'))
ls_markAsNeedsToBeSaved = my_strip(doc.get('markAsNeedsToBeSaved'))
ls_researchInterest = my_strip(doc.get('researchInterest'))
ls_email = my_strip(doc.get('email'))
ls_subtitle = my_strip(doc.get('subtitle'))
ls_affiliation = my_strip(doc.get('affiliation'))
if ls_username:
if ls_firstname:
old_first_name = ls_firstname
else:
old_first_name = ls_username
if ls_lastname:
old_last_name = ls_lastname
else:
old_last_name = ls_username
if ls_email:
old_email = ls_email
else:
old_email = FAKE_EMAIL
warnings['general'].append(u'Created a user (with username %s) with a fake'
u' email: %s. Please fix manually, i.e., from within the'
u' Dative/OLD interface.' % (ls_username, FAKE_EMAIL))
old_page_content = []
if ls_description:
old_page_content.append(ls_description)
if ls_researchInterest:
old_page_content.append(u'Research interest: %s' % (
punctuate_period_safe(ls_researchInterest),))
if ls_affiliation:
old_page_content.append(u'Affiliation: %s' % (
punctuate_period_safe(ls_affiliation),))
old_page_content = u'\n\n'.join(old_page_content).strip()
if old_page_content:
old_user['page_content'] = old_page_content
old_user['username'] = ls_username
old_user['first_name'] = old_first_name
old_user['last_name'] = old_last_name
old_user['email'] = old_email
old_user['role'] = u'administrator'
else:
old_user = None
# Sometimes, the value of a LingSync user's username is itself an object,
# so we turn it into a string here.
old_user['username'] = fix_user_name(old_user['username'])
old_user['first_name'] = fix_user_name(old_user['first_name'])
old_user['last_name'] = fix_user_name(old_user['last_name'])
oldobj['old_value'] = old_user
oldobj['old_auxiliary_resources'] = auxiliary_resources
oldobj['warnings'] = warnings
return oldobj
def process_lingsync_datum(doc, collections, lingsync_db_name):
"""Process a LingSync datum. This will be encoded as an OLD form.
"""
# Pretty-print the datum document, for inspection.
# p(doc)
datum_id = doc['_id']
datum_fields = doc.get('datumFields', doc.get('fields'))
if datum_fields is None:
print 'Warning: unable to retrieve datumFields for datum %s' % datum_id
p(doc)
print '\n'
return None
# These are the LingSync datum fields that we know how to deal with for
# to-OLD conversion.
known_fields = [
'judgement',
'morphemes',
'allomorphs', # gina-nuktitut uses this and it is sometimes different from 'morphemes'?
'utterance',
'gloss',
'translation',
'another_translation', # in gina-inuktitut
'context_translation', # in gina-inuktitut
'validationStatus',
'tags',
'syntacticCategory',
'syntacticTreeLatex',
'enteredByUser', # Can contain a dict in its 'user' attribute (see below)
'modifiedByUser', # Can contain an array in its 'users' attribute. Forget what, exactly, this array can contain.
'comments',
'markAsNeedsToBeSaved', # Ignoring this. Strangely, there can be multiple fields with this label in a datimFields array ...
'checked', # Ignoring this. It can evaluate to `true`, but and may be relevant to `validationStatus` and the OLD form's `status`, but I think it's safe to ignore it.
'notes', # non-standard but attested
'phonetic', # non-standard but attested
'itemNumber', # added to comments field, when content-ful
'speaker', # added to OLD form.speaker (and possibly also comments field)
'context', # added to comments field, when content-ful
'documentation', # Never seen this not empty, so ignoring it for now.
'links', # added to comments field, when content-ful
'audio', # Name of .wav audio file, but don't know how to get URL (TODO)
'contextFile', # Name of .wav audio file, but don't know how to get URL (TODO)
'consultant', # Ignoring this for now, it's just a string of digits, e.g., '1' or '15'
'contextTranslation', # Adding this to OLD form's translations list
'housekeeping', # This field appears to always be empty ...
'orthography', # This field appears to always be empty ...
'spanish', # This field appears to always be empty ...
'consultants', # This field appears to always be empty ...
'dataelicited', # This field appears to always be empty ...
'dialect', # This field appears to always be empty ...
'judgment', # This trumps the standard "judgement" field (see above).
'language', # This field appears to always be empty ...
'morpheme', # This field trumps the standard "morphemes" field (see above).
'undefined', # Ignorable: never has a real value; only values ever seen are sequences of question marks.
'chapter', # Used in gina-inuktitut for chapter of Genesis.
'dateSEntered', # Timestamp that is ignored.
'verse', # Used in gina-inuktitut for verse of Genesis.
# New fields from weisskircherisch-firstcorpus
'audioFileName',
'begintimehh:mm:ssms',
'begintimehhMmSsms',
'endTime',
'fields',
'genDach',
'german',
'modality',
'relatedData',
'rudi',
'startTime',
'tier',
'ursula'
]
known_attrs = [
'_id', # u'c297e5ceecafe6b340876e07ac477736',
'_rev', # u'2-63e6d77f0e9f834000b77ff59fa7abd2',
'audioVideo', # [],
'collection', # u'datums',
'comments', # [],
'dateEntered', # u'2015-04-01T16:50:30.852Z',
'dateModified', # u'2015-04-01T16:50:30.852Z',
'datumFields', # []
'fields', # [] Sometimes used instead of 'datumFields'
'datumTags', # [],
'images', # [],
'jsonType', # u'Datum',
'pouchname',
'session', # {...} redundantly stores the session of each datum ...
'timestamp', # 1427907030852,
'trashed', # u'deleted'
'api', # Ignorable
'dateCreated', # Unix timestamp; Is this different value from `dateEntered`? Doesn't really matter for this migration script.
'dbname', # Ignorable
'fieldDBtype', # Ignorable
'version', # Ignorable
'datumStates', # Ignoring this. It's only found in gina-inuktitut.
'lastModifiedBy', # This is ignored because you can't write modifiers to OLD forms, only the OLD does that, server-side.
'enteredByUser', # This attribute appears to be redundant, given the enteredByUser-labelled field (see above). Ignoring.
'_attachments', # May contain references to audio files, but I don't know how to get their URLs. TODO.
'attachmentInfo' # Ignorable: never has a real value.
]
# Fill this with OLD resources that are implicit in the LingSync datum.
auxiliary_resources = {}
# Add warnings to this.
warnings = {
'general': [],
'docspecific': []
}
for k in doc:
if k not in known_attrs:
warnings['docspecific'].append(u'\u2018%s\u2019 not a recognized'
u' attribute in datum %s' % (k, datum_id))
for obj in datum_fields:
if obj['label'] and obj['label'] not in known_fields:
warnings['docspecific'].append(u'\u2018%s\u2019 not a recognized'
u' label in fields for datum %s' % (obj['label'], datum_id))
# This will be our return value.
oldobj = {
'originaldoc': doc,
'lingsync_type': 'datum',
'old_resource': 'forms',
'old_value': {}, # Valuate this with `old_form`
'old_auxiliary_resources': {}, # Valuate this with `auxiliary_resources`
'warnings': []
}
# This dict will be used to create the OLD collection.
old_form = copy.deepcopy(old_schemata['form'])
old_form['status'] = u'tested'
# LingSync datum metadata, as well as truncated or invalid values, and
# LingSync comments will all be placed in this value, which is stringified,
# ultimately.
old_comments = []
# Some datums have an `itemNumber` field.
ls_itemNumber = get_val_from_datum_fields('itemNumber', datum_fields)
if ls_itemNumber:
old_comments.append('Item number: %s' % (
punctuate_period_safe(ls_itemNumber),))
# Some datums have a `context` field.
ls_context = get_val_from_datum_fields('context', datum_fields)
if ls_context:
old_comments.append('Context: %s' % (
punctuate_period_safe(ls_context),))
# Some datums have a `links` field, which contains references to other
# datums.
# The `links` field appears to consistently be a string of comma-separated
# expressions of the form "similarTo:4f868ba9a79e57479ddbe4f62ae671c8"
# where the string after the colon is a datum id. That datum id should be
# transformed into a form id, if possible.
# TODO: post-process links; i.e., once the forms have been entered, update
# those with links so that the links reference the ids of the corresponding
# forms in the new OLD.
ls_links = get_val_from_datum_fields('links', datum_fields)
if ls_links:
old_comments.append('Links: %s' % (
punctuate_period_safe(ls_links),))
# Certain LingSync values may also be made into tags.
old_tags = []
# NOTE: these values cannot be valuated by LingSync datum values.
# 'elicitation_method': None, # = ValidOLDModelObject(model_name='ElicitationMethod')
# 'syntactic_category': None, # = ValidOLDModelObject(model_name='SyntacticCategory')
# 'source': None, # = ValidOLDModelObject(model_name='Source')
# These values (from LingSync datum fields) are used elsewhere.
ls_judgement = get_val_from_datum_fields('judgement', datum_fields)
ls_morphemes = get_val_from_datum_fields('morphemes', datum_fields)
ls_allomorphs = get_val_from_datum_fields('allomorphs', datum_fields)
ls_utterance = get_val_from_datum_fields('utterance', datum_fields)
ls_gloss = get_val_from_datum_fields('gloss', datum_fields)
ls_translation = get_val_from_datum_fields('translation', datum_fields)
ls_another_translation = get_val_from_datum_fields('another_translation',
datum_fields)
ls_context_translation = get_val_from_datum_fields('context_translation',
datum_fields)
ls_validationStatus = get_val_from_datum_fields('validationStatus', datum_fields)
ls_tags = get_val_from_datum_fields('tags', datum_fields)
ls_syntacticTreeLatex = get_val_from_datum_fields('syntacticTreeLatex', datum_fields)
ls_chapter = get_val_from_datum_fields('chapter', datum_fields)
ls_verse = get_val_from_datum_fields('verse', datum_fields)
ls_datumTags = doc.get('datumTags')
ls_session = doc.get('session')
ls_datumStates = doc.get('datumStates')
if ls_datumStates:
print 'datumStates: %s\n' % ls_datumStates
# Some datums have a 'documentation' field; adding it to the comments field.
ls_documentation = get_val_from_datum_fields('documentation', datum_fields)
if ls_documentation:
ls_documentation = ls_documentation.strip()
old_comments.append(
u'Documentation: \u2018%s\u2019' % punctuate_period_safe(
ls_documentation))
# Date Elicited. Date in 'MM/DD/YYYY' format. From
# datum.session.sessionFields.dateElicited.
# Attempt to create a MM/DD/YYYY string from `date_session_elicited`. At
# present, we are only recognizing date strings in MM/DD/YYYY and
# YYYY-MM-DD formats.
date_datum_elicited_unparseable = False
if ls_session:
session_fields = ls_session.get('sessionFields', [])
date_session_elicited = get_val_from_session_fields('dateElicited', session_fields)
if date_session_elicited:
date_elicited = None
try:
datetime_inst = datetime.datetime.strptime(date_session_elicited,
'%Y-%m-%d')
except Exception, e:
try:
datetime_inst = datetime.datetime.strptime(
date_session_elicited, '%m/%d/%Y')
except Exception, e:
datetime_inst = None
date_datum_elicited_unparseable = True
if date_session_elicited != 'none' and not QUIET:
warnings['docspecific'].append(u'Unable to parse %s to an OLD-compatible date'
u' in MM/DD/YYYY format for datum %s.' % (
date_session_elicited, datum_id))
if datetime_inst:
y = datetime_inst.year
m = datetime_inst.month
d = datetime_inst.day
date_elicited = u'%s/%s/%s' % (str(m).zfill(2),
str(d).zfill(2), str(y))
else:
date_elicited = None
if date_elicited:
old_form['date_elicited'] = date_elicited
# TODO: this datum field ("audio") can sometimes name an audio file.
# However, I have not been able to discover how to get my hands on the URL of
# that file.
ls_audio = get_val_from_datum_fields('audio', datum_fields)
# if ls_audio:
# print '\naudio field:'
# pprint.pprint(ls_audio)
# print
# TODO: this datum field ("contextFile") can sometimes also name an audio
# file. As with the "audio" field mentioned above, I have not been able to
# discover how to get my hands on the URL of that file. Note, these are all
# .wav file names.
ls_contextFile = get_val_from_datum_fields('contextFile', datum_fields)
if ls_contextFile and ls_contextFile != 'contextFile' and ('.wav' not in ls_contextFile):
print '\ncontextFile field:'
pprint.pprint(ls_contextFile)
print
# TODO: this datum attribute ("_attachments") can sometimes also name an audio file, of sorts.
ls_attachments = doc.get('_attachments')
# The only value I have seen is this object:
# {
# u'1398457871136.wav': {u'stub': True, u'length': 81964,
# u'content_type': u'audio/wav', u'revpos': 2, u'digest':
# u'md5-vl3deBSesSf4uWsn6Ctf5g=='},
# u'1398457871477.wav': {u'stub': True, u'length': 81964,
# u'content_type': u'audio/wav', u'revpos': 3, u'digest':
# u'md5-vl3deBSesSf4uWsn6Ctf5g=='}
# }
# Files. Array of OLD file objects. The `audioVideo` attribute holds an
# array of objects, each of which has 'URL' and 'type' attributes. The
# `images` attribute holds ...
ls_audioVideo = doc.get('audioVideo')
ls_images = doc.get('images')
if ls_audioVideo and (type(ls_audioVideo) is type([])):
for av in ls_audioVideo:
if (type(av) is type({})) and av.get('URL') and \
(av.get('trashed') != 'deleted'):
# We're guessing the MIME type based on the extension, not the
# file contents, cuz we're lazy right now...
mime_type = mimetypes.guess_type(av['URL'])[0]
print 'MIME type of audioVideo object: %s' % mime_type
if (not mime_type) or (mime_type not in old_allowed_file_types):
continue
old_file = copy.deepcopy(old_schemata['file'])
old_file['MIME_type'] = mime_type
file_description = [(u'This file was generated from the LingSync'
u' audio/video file stored at %s.' % av['URL'])]
if av.get('description'):
file_description.append(av['description'].strip())
if av.get('dateCreated'):
file_description.append(u'This file was created on LingSync'
u' at %s.' % av['dateCreated'])
old_file['description'] = u'\n\n'.join(file_description)
if av.get('filename'):
old_file['filename'] = av['filename'].strip()
# The only value I've seen here is "304 Not Modified", i.e., an
# HTTP status code. Ignoring this.
ls_av_uploadStatus = av.get('uploadStatus', u'')
# if ls_av_uploadStatus:
# print 'audioVideo uploadStatus: %s' % ls_av_uploadStatus
# Loop through all of the A/V attributes that are "known"
# and issue warnings when unknown ones are encountered.
for attr in av:
if attr not in known_audio_video_attrs:
warnings['docspecific'].append(u'Attribute'
u' \u2018%s\u2019 is not recognized in the'
u' `audioVideo` value of datum %s' % (attr,
datum_id))
# Store these "private" keys for possible use during file data
# download.
old_file['__lingsync_datum_id'] = datum_id
old_file['__lingsync_file_url'] = av['URL']
if av.get('size'):
old_file['__lingsync_file_size'] = av['size']
# LingSync's `type` attr is OLD's MIME_type. We probably want
# to programmatically extract this value from the filename
# and/or the file data though.
if av.get('type'):
old_file['__lingsync_MIME_type'] = av['type']
old_form['files'].append(old_file)
auxiliary_resources.setdefault('files', []).append(old_file)
# Files -- Images. Add `datum.images` to `form.files`, once we know what is
# in a LingSync datum's images attribute.
if ls_images:
warnings['docspecific'].append(u'Datum %s has an `images` attribute that has been'
u' ignored.' % datum_id)
# Tags. [] or a list of OLD tags.
if ls_tags:
if type(ls_tags) is type(u''):
# print (u'Processing datum field "tags" %s by splitting it into these'
# ' %d tags: "%s"' % (ls_tags, len(ls_tags.split()),
# ', '.join(ls_tags.split())))
global TAGSTOFIX
TAGSTOFIX.setdefault(ls_tags, {})
TAGSTOFIX[ls_tags].setdefault('datum_ids', []).append(datum_id)
TAGSTOFIX[ls_tags]['tags_created'] = ls_tags.split()
for tag in ls_tags.split():
old_tag = copy.deepcopy(old_schemata['tag'])
old_tag['name'] = tag
old_tags.append(old_tag)
else:
warnings['docspecific'].append(u'Unable to use value \u2018%s\u2019'
u' from datumField tags of datum %s' % (unicode(ls_tags),
datum_id))
if ls_datumTags:
if type(ls_datumTags) is type([]):
for tag in ls_datumTags:
if type(tag) is type({}):
if tag.get('tag'):
# print ('Processing datum attribute "datumTags" and'
# ' getting tag named %s' % tag['tag'])
global TAGSTOFIX
TAGSTOFIX.setdefault(tag['tag'], {})
TAGSTOFIX[tag['tag']].setdefault('datum_ids', []).append(datum_id)
TAGSTOFIX[tag['tag']]['tags_created'] = [tag['tag']]
old_tag = copy.deepcopy(old_schemata['tag'])
old_tag['name'] = tag['tag']
old_tags.append(old_tag)
else:
if not QUIET:
warnings['docspecific'].append(u'Tag object \u2018%s\u2019'
u' from datum.datumTags of datum %s has no `tag`'
u' attribute and cannot be used.' % (
unicode(tag), datum_id))
else:
warnings['docspecific'].append(u'Unable to use tag \u2018%s\u2019'
u' from datum.datumTags of datum %s' %
(unicode(tag), datum_id))
else:
warnings['docspecific'].append(u'Unable to use value \u2018%s\u2019'
u' from datum.datumTags of datum %s' %
(unicode(ls_datumTags), datum_id))
# If `ls_trashed == 'deleted'` then we mark the to-be-uploaded form as such
# and we will delete it in the OLD after creating it.
ls_trashed = doc.get('trashed')
if ls_trashed == 'deleted':
old_form['__lingsync_deleted'] = True
# TODO: do these validation statuses really mean the datum has been
# deleted? WAITING FOR gina.
if ls_validationStatus and ls_validationStatus in ['Deleted', 'Deleted, Checked']:
old_form['__lingsync_deleted'] = True
# TODO: some datums have a field labelled "consultant". However, from what
# I have seen, the value of this field is just a string containing digits,
# like '1' or '15' so I am ignoring it for now.
ls_consultant = get_val_from_datum_fields('consultant', datum_fields)
if ls_consultant:
non_digits = []
for ch in ls_consultant:
if ch not in '0123456789':
non_digits.append(ch)
if non_digits and ls_consultant != 'participant':
print '\nconsultant field:'
pprint.pprint(ls_consultant)
print
# Speaker. Null or a valid speaker resource. From datum.session.consultants.
# WARNING: it's not practical to try to perfectly parse free-form
# consultants values.
speakers = []
if ls_session:
session_fields = ls_session.get('sessionFields')
if not session_fields:
session_fields = ls_session.get('fields', [])
consultants = get_val_from_session_fields('consultants', session_fields)
dialect = get_val_from_session_fields('dialect', session_fields)
if not dialect:
dialect = ls_session.get('dialect')
if consultants:
consultants_list = consultants.split()
# If consultants is two capitalized words, e.g., <NAME>, then
# we assume we have a first name/ last name situation.
if len(consultants_list) == 2 and \
consultants_list[0] == consultants_list[0].lower().capitalize() and \
consultants_list[1] == consultants_list[1].lower().capitalize():
old_speaker = copy.deepcopy(old_schemata['speaker'])
old_speaker['first_name'] = consultants_list[0]
old_speaker['last_name'] = consultants_list[1]
speakers.append(old_speaker)
# Otherwise, we assume we have an initials situation (e.g., DS).
else:
for consultant in consultants_list:
old_speaker = copy.deepcopy(old_schemata['speaker'])
# If consultant is all-caps, we assume it is initials, where the
# first char is the first name initial and the remaining char(s)
# is/are the last name initial(s).
if consultant.upper() == consultant:
old_speaker['first_name'] = consultant[0]
old_speaker['last_name'] = consultant[1:]
else:
old_speaker['first_name'] = consultant
old_speaker['last_name'] = consultant
if dialect:
old_speaker['dialect'] = dialect
speakers.append(old_speaker)
# Aside from the datum's session's consultants field, some datums can have
# a 'speaker' field. This speaker value seems to consistently be a
# one-character string.
ls_speaker = get_val_from_datum_fields('speaker', datum_fields)
if ls_speaker:
old_speaker = copy.deepcopy(old_schemata['speaker'])
old_speaker['first_name'] = ls_speaker
old_speaker['last_name'] = ls_speaker
speakers.append(old_speaker)
if len(speakers) >= 1:
old_form['speaker'] = speakers[0]
if len(speakers) > 1:
if not QUIET:
warnings['docspecific'].append('Datum %s has more than one'
' consultant listed. Since OLD forms only allow one speaker, we'
' are just going to associate the first speaker to the OLD form'
' created form this LingSync datum. The additional LingSync'
' speakers will still be created as OLD speakers, however, and'
' ALL LingSync consultants will be documented in the form\'s'
' comments field.' % datum_id)
speaker_strs = [u'%s %s' % (s['first_name'], s['last_name']) for s
in speakers]
old_comments.append(punctuate_period_safe(
'Consultants: %s' % u', '.join(speaker_strs)))
for speaker in speakers:
auxiliary_resources.setdefault('speakers', []).append(speaker)
# Elicitor. Null or a valid user resource. From datum enteredByUser.
ls_enteredByUser = get_val_from_datum_fields('enteredByUser', datum_fields)
if ls_enteredByUser:
warnings['general'].append(u'Form elicitor values are being supplied by'
u' datum.session.enteredByUser values. This may be inaccurate. Change'
u' as needed in the Dative/OLD interface.')
old_elicitor = copy.deepcopy(old_schemata['user'])
old_elicitor['username'] = fix_user_name(ls_enteredByUser)
old_elicitor['first_name'] = fix_user_name(ls_enteredByUser)
old_elicitor['last_name'] = fix_user_name(ls_enteredByUser)
warnings['general'].append(u'Created a user (with username %s) with a'
u' fake email: %s. Please fix manually, i.e., from within the'
u' Dative/OLD interface.' % (ls_enteredByUser, FAKE_EMAIL))
old_elicitor['email'] = FAKE_EMAIL
old_elicitor['role'] = u'administrator'
old_form['elicitor'] = old_elicitor
auxiliary_resources.setdefault('users', []).append(old_elicitor)
# Status. Must be 'tested' or 'requires testing'. LingSync's
# validationStatus is similar. A common value is 'Checked'. The LingSync
# value 'toBeChecked' is taken here to mean that the OLD's value should be
# 'requires testing'.
# We ignore the validation status 'Deleted' here and use it later on to
# mark the form/datum as deleted.j
# TODO: does validationStatus=Deleted mean that the datum has been deleted?
# Ask Gina.
if ls_validationStatus:
if ls_validationStatus == 'toBeChecked':
old_form['status'] = 'requires testing'
elif ls_validationStatus not in ['Checked', 'Deleted', 'Deleted, Checked']:
old_tags.append({
'name': u'validation status: %s' % ls_validationStatus,
'description': u''
})
"""
Commenting this out for now: it's saved in comments field anyway.
warnings['docspecific'].append(u'Unrecognized validationStatus'
u' \u2018%s\u2019 in datum %s' % (ls_validationStatus,
datum_id))
"""
# Transcription. Not empty, max 510 chars. From LingSync utterance.
ls_utterance_too_long = False
if not ls_utterance:
old_transcription = u'PLACEHOLDER'
# warnings['docspecific'].append(u'Datum %s has no utterance value; the form generated'
# ' from it has "PLACEHOLDER" as its transcription value.' % datum_id)
elif len(ls_utterance) > 510:
ls_utterance_too_long = True
if not QUIET:
OVERFLOWS.add(len(ls_utterance))
warnings['docspecific'].append('The utterance "%s" of datum %s is too long (%d chars) and will be'
' truncated.' % (ls_utterance, datum_id, len(ls_utterance)))
old_transcription = ls_utterance[:510]
else:
old_transcription = ls_utterance
old_form['transcription'] = old_transcription
# Morpheme Break. Max 510 chars. From LingSync morphemes.
ls_morphemes_too_long = False
# The idiosyncratic "morpheme" field trumps the standard "morpheme" one.
# I've only seen this field rarely, but when it is present it appears to
# contain more information than the "morphemes", hence the trump.
ls_morpheme = get_val_from_datum_fields('morpheme', datum_fields)
if ls_morpheme:
ls_morphemes = ls_morpheme
if ls_morphemes:
if len(ls_morphemes) > 510:
ls_morphemes_too_long = True
if not QUIET:
OVERFLOWS.add(len(ls_morphemes))
warnings['docspecific'].append('The morphemes "%s" of datum %s is too long and'
' will be truncated.' % (ls_morphemes, datum_id))
old_form['morpheme_break'] = ls_morphemes[:510]
else:
old_form['morpheme_break'] = ls_morphemes
# If we have allomorphs and they are different from our morphemes, then put
# them in the comments field.
if ls_allomorphs and ls_allomorphs != ls_morphemes:
old_comments.append(u'Allomorphs: \u201c%s\u201d' % (
punctuate_period_safe(ls_allomorphs)))
# Phonetic Transcription. Max 510 chars. From the non-standard LingSync
# field "phonetic".
ls_phonetic = get_val_from_datum_fields('phonetic', datum_fields)
ls_phonetic_too_long = False
if ls_phonetic:
if len(ls_phonetic) > 510:
ls_phonetic_too_long = True
if not QUIET:
OVERFLOWS.add(len(ls_phonetic))
warnings['docspecific'].append('The phonetic value "%s" of datum %s'
' is too long and will be truncated.' % (ls_phonetic, datum_id))
old_form['phonetic_transcription'] = ls_phonetic[:510]
else:
old_form['phonetic_transcription'] = ls_phonetic
############################################################################
# START New datum fields from weisskircherisch-firstcorpus
############################################################################
# german field (in weisskircherisch) is an (assumedly Standard German)
# rendering/translation of the Transylvanian Saxon utterance. Putting it in
# the comments field.
ls_german = get_val_from_datum_fields('german', datum_fields)
if ls_german:
old_comments.append(
u'German: \u2018%s\u2019' % punctuate_period_safe(ls_german))
# rudi field (in weisskircherisch) is like the German field, an alternative
# transcription of some kind.
ls_rudi = get_val_from_datum_fields('rudi', datum_fields)
if ls_rudi:
old_comments.append(
u'Rudi: \u2018%s\u2019' % punctuate_period_safe(ls_rudi))
# ursula field (in weisskircherisch) is like the German field, an
# alternative transcription of some kind.
ls_ursula = get_val_from_datum_fields('ursula', datum_fields)
if ls_ursula:
old_comments.append(
u'Ursula: \u2018%s\u2019' % punctuate_period_safe(ls_ursula))
# audioFileName. Ignoring this: no value attested yet.
ls_audioFileName = get_val_from_datum_fields('audioFileName', datum_fields)
if ls_audioFileName:
print 'Datum has ls_audioFileName: ', ls_audioFileName
# begintimehh:mm:ssms. Assumedly the time in an audio/video file that the
# utterance comes from. Format is (hh:)mm:ss.ms, e.t., "49:37.9".
ls_begintimehhmmssms = get_val_from_datum_fields('begintimehh:mm:ssms', datum_fields)
if ls_begintimehhmmssms:
old_comments.append(
u'Begin time (hh:mm:ss.ms): %s' % punctuate_period_safe(
str(ls_begintimehhmmssms)))
# begintimehhMmSsms. Ignoring this: no value attested. Assumedly the time
# in an audio/video file that the utterance comes from.
ls_begintimehhMmSsms = get_val_from_datum_fields(
'begintimehhMmSsms', datum_fields)
if ls_begintimehhMmSsms:
print 'Datum has ls_begintimehhMmSsms: ', ls_begintimehhMmSsms
# endTime. Ignoring this: no value attested.
ls_endTime = get_val_from_datum_fields('endTime', datum_fields)
if ls_endTime:
print 'Datum has ls_endTime: ', ls_endTime
# fields. Ignoring this: no value attested.
ls_fields = get_val_from_datum_fields('fields', datum_fields)
if ls_fields:
print 'Datum has ls_fields: ', ls_fields
# genDach. Ignoring this: no value attested.
ls_genDach = get_val_from_datum_fields('genDach', datum_fields)
if ls_genDach:
print 'Datum has ls_genDach: ', ls_genDach
# modality. Only one token attested ("spoken"). Creating it as an OLD tag.
ls_modality = get_val_from_datum_fields('modality', datum_fields)
if ls_modality:
old_tags.append({
'name': u'modality: %s' % ls_modality,
'description': u''
})
# relatedData. Ignoring this: no value attested.
ls_relatedData = get_val_from_datum_fields('relatedData', datum_fields)
if ls_relatedData:
if (isinstance(ls_relatedData, dict) and
isinstance(ls_relatedData.get('relatedData'), list) and
len(ls_relatedData['relatedData']) == 0):
pass
else:
print 'Datum has ls_relatedData: ', ls_relatedData
# startTime. Ignoring this: no value attested.
ls_startTime = get_val_from_datum_fields('startTime', datum_fields)
if ls_startTime:
print 'Datum has ls_startTime: ', ls_startTime
# tier. Ignoring this: no value attested.
ls_tier = get_val_from_datum_fields('tier', datum_fields)
if ls_tier:
print 'Datum has ls_tier: ', ls_tier
############################################################################
# END New datum fields from weisskircherisch-firstcorpus
############################################################################
# Grammaticality. From LingSync judgement (or from LingSync judgment, note spelling)
# The LingSync field named "judgment" is non-standard. However, in the
# migrations I have made, it appears to contain roughly the same
# information as "judgement", only better formatted. Therefore, if judgment
# exists, we use it instead of judgement.
ls_judgment = get_val_from_datum_fields('judgment', datum_fields)
if ls_judgment:
ls_judgement = ls_judgment
if ls_judgement:
# In some LingSync corpora, users added comments into the
# grammaticality field. We try to detect and repair that here.
if len(ls_judgement) > 3:
warnings['general'].append(u'You have some grammaticality values'
' that contain more than three characters, suggesting that'
' these values are comments and not true grammaticalities. We'
' have tried to separate the true grammaticalities from the'
' comments. Search for "Comment from LingSync judgement field:"'
' in the comments field of forms in the resulting OLD database.')
grammaticality_prefix = []
for char in ls_judgement:
if char in (u'*', u'?', u'#', u'!'):
grammaticality_prefix.append(char)
else:
break
old_form['grammaticality'] = u''.join(grammaticality_prefix)
comment = punctuate_period_safe(u'Comment from LingSync judgement'
u' field: %s' % ls_judgement)
old_comments.append(comment)
else:
old_form['grammaticality'] = ls_judgement
# Morpheme Gloss. Max 510 chars. From LingSync gloss.
ls_gloss_too_long = False
if ls_gloss:
if len(ls_gloss) > 510:
ls_gloss_too_long = True
if not QUIET:
OVERFLOWS.add(len(ls_gloss))
warnings['docspecific'].append('The gloss "%s" of datum %s is too long and'
' will be truncated.' % (ls_gloss, datum_id))
old_form['morpheme_gloss'] = ls_gloss[:510]
else:
old_form['morpheme_gloss'] = ls_gloss
# Translations. Has to be at least one. From LingSync translation. Also
# potentially from LingSync "contextTranslation".
translations = []
if ls_translation:
# old_translation_transcription = ls_translation
translations.append(ls_translation)
# "contextTranslation", an idiosyncratic field. TODO: does this belong in
# OLD translations?
ls_contextTranslation = get_val_from_datum_fields('contextTranslation', datum_fields)
if ls_contextTranslation:
translations.append(ls_contextTranslation)
if ls_another_translation:
translations.append(ls_another_translation)
# This is another "context translation". This is how it's spelt in
# gina-inuktitut.
if ls_context_translation:
translations.append(ls_context_translation)
old_comments.append(u'Context translation: \u201c%s\u201d' % (
punctuate_period_safe(ls_context_translation)))
if len(translations) == 0:
old_form['translations'] = [{
'transcription': u'PLACEHOLDER',
'grammaticality': u''
}]
else:
old_form['translations'] = []
for t in translations:
old_form['translations'].append({
'transcription': t,
'grammaticality': u''
})
# Chapter and verse. Only used in gina-inuktitut, a Genesis translation.
if ls_chapter:
old_comments.append(u'Chapter %s' % punctuate_period_safe(ls_chapter))
if ls_verse:
old_comments.append(u'Verse %s' % punctuate_period_safe(ls_verse))
# Syntax. Max 1023 chars. From LingSync syntacticTreeLatex.
ls_syntacticTreeLatex_too_long = False
if ls_syntacticTreeLatex:
if len(ls_syntacticTreeLatex) > 1023:
ls_syntacticTreeLatex_too_long = True
if not QUIET:
warnings['docspecific'].append('The syntacticTreeLatex "%s" of datum %s is too'
' long and will be truncated.' % (ls_syntacticTreeLatex, datum_id))
old_form['syntax'] = ls_syntacticTreeLatex[:1023]
else:
old_form['syntax'] = ls_syntacticTreeLatex
# Comments.
# This is a text grab-bag of data points. I use this to summarize the
# LingSync datum that this form was derived from. Certain attributes are
# added here only if they were found invalid or too long above.
# All of these values should go into prose in the OLD's form.comments value.
# Datum metadata -> form comments
ls_modifiedByUser = get_val_from_datum_fields('modifiedByUser', datum_fields)
ls_dateModified = doc.get('dateModified')
ls_dateEntered = doc.get('dateEntered')
# We remember the date entered so that we can get the correct sort order for
# forms in collections (since datums are sorted in their sessions according
# to date entered).
if ls_dateEntered:
old_form['date_entered'] = ls_dateEntered
else:
print '%sWarning: no date entered value.%s' % (ANSI_WARNING, ANSI_ENDC)
old_form_creation_metadata = []
if ls_enteredByUser and ls_dateEntered:
old_form_creation_metadata.append(u'This form was created from LingSync'
u' datum %s (in corpus %s), which was created by %s on %s.' % (
datum_id, lingsync_db_name, ls_enteredByUser, ls_dateEntered))
if ls_modifiedByUser and ls_dateModified:
old_form_creation_metadata.append(u'The datum was last modified in'
u' LingSync by %s on %s.' % (ls_modifiedByUser, ls_dateModified))
if date_datum_elicited_unparseable:
date_session_elicited
old_form_creation_metadata.append(u'The datum was elicited on %s.' % (
date_session_elicited))
old_form_creation_metadata = ' '.join(old_form_creation_metadata).strip()
if old_form_creation_metadata:
old_comments.append(old_form_creation_metadata)
# Datum comments field -> form comments
ls_comments = get_val_from_datum_fields('comments', datum_fields)
if ls_comments:
processed_comments, warnings = process_lingsync_comments_val(
ls_comments, warnings)
if processed_comments:
old_comments += processed_comments
# Datum comments attribute -> form comments
ls_comments_attr = doc.get('comments')
if ls_comments_attr:
processed_comments, warnings = process_lingsync_comments_val(
ls_comments_attr, warnings)
if processed_comments:
old_comments += processed_comments
# Datum notes field -> form comments. (Some LingSync corpora have the
# non-standard "notes" field in their datums.)
ls_notes = get_val_from_datum_fields('notes', datum_fields)
if ls_notes:
old_comments.append('LingSync notes: %s' % punctuate_period_safe(ls_notes))
# Datum errored fields -> put them (redundantly) into a paragraph in form
# comments.
# The datum.syntacticCategory string can't be used to specify the OLD
# forms' syntactic_category_string field since that field is read-only. We
# can add it to the comments prose though.
old_form_errored_data = []
ls_syntacticCategory = get_val_from_datum_fields('syntacticCategory',
datum_fields)
if ls_utterance_too_long:
old_form_errored_data.append(u'LingSync datum utterance value without'
u' truncation: \u2018%s\u2019' %
(punctuate_period_safe(ls_utterance),))
if ls_morphemes_too_long:
old_form_errored_data.append(u'LingSync morphemes value without'
u' truncation: \u2018%s\u2019' % (
punctuate_period_safe(ls_morphemes),))
if ls_phonetic_too_long:
old_form_errored_data.append(u'LingSync phonetic value without'
u' truncation: \u2018%s\u2019' % (
punctuate_period_safe(ls_phonetic),))
if ls_gloss_too_long:
old_form_errored_data.append(u'LingSync datum gloss value without'
u' truncation: \u2018%s\u2019' % punctuate_period_safe(ls_gloss))
if ls_syntacticCategory:
old_form_errored_data.append(u'LingSync syntacticCategory value:'
u' \u2018%s\u2019' % ( ls_syntacticCategory))
if ls_syntacticTreeLatex_too_long:
old_form_errored_data.append(u'LingSync datum syntacticTreeLatex value'
u' without truncation: \u2018%s\u2019' % (
punctuate_period_safe(ls_syntacticTreeLatex),))
old_form_errored_data = ' '.join(old_form_errored_data).strip()
if old_form_errored_data:
old_comments.append(old_form_errored_data)
old_comments = u'\n\n'.join(old_comments).strip()
if old_comments:
old_form['comments'] = old_comments
# Process accumulated tags. They can come from various datum values, so we
# add them to the old_form at the end.
old_form['tags'] = old_tags
for tag in old_tags:
auxiliary_resources.setdefault('tags', []).append(tag)
if ls_session:
session_id = ls_session['_id']
old_form['__lingsync_session_id'] = session_id
else:
print '%sWarning: no LingSync session for datum %s.%s' % (
ANSI_WARNING, datum_id, ANSI_ENDC)
old_form['__lingsync_datum_id'] = datum_id
oldobj['old_value'] = old_form
oldobj['old_auxiliary_resources'] = auxiliary_resources
oldobj['warnings'] = warnings
return oldobj
# These are the attributes of a LingSync Datum's AudioVideo object attribute
# that we know about.
known_audio_video_attrs = [
'_id',
'dateCreated',
'URL',
'api',
'checksum',
'dbname',
'description',
'fieldDBtype',
'fileBaseName',
'filename',
'mtime',
'name',
'pouchname',
'praatAudioExtension',
'resultInfo',
'resultStatus',
'script',
'serviceVersion',
'size',
'syllablesAndUtterances',
'textGridInfo',
'textGridStatus',
'textgrid',
'trashed',
'type',
'uploadInfo',
'version',
'webResultInfo',
'webResultStatus',
# New from weisskircherisch corpus
'uploadStatus'
]
def process_lingsync_session(doc):
"""Process a LingSync session. This will be encoded as an OLD collection.
Note: the LingSync session fields/attributes dateSEntered and other
datetime/timestamp entered/modified values are *not* migrated. The OLD
has its own creation/modification timestamps and these values cannot be
user-specified.
"""
# Pretty-print the session document, for inspection.
# p(doc)
session_id = doc['_id']
session_fields = doc.get('sessionFields')
if not session_fields:
session_fields = doc.get('fields')
if not session_fields:
print (u'ERROR: unable to find `sessionFields` or `fields` attr in'
u' session:')
p(doc)
return None
# If a session is marked as deleted, we don't add it to the OLD.
if doc.get('trashed') == 'deleted':
return None
# These are the LingSync session fields that we know how to deal with for
# to-OLD conversion.
known_fields = [
'goal',
'consultants',
'dialect',
'language',
'dateElicited',
'user',
'dateSEntered',
'participants', # Sometimes a field with this label. I'm ignoring it. It seems to consitently be an empty string.
'DateSessionEntered', # Sometimes a field with this label. I'm ignoring it. It seems to consistently be an empty string.
'dateSessionEntered', # Sometimes a field with this label. I'm ignoring it. It seems to be the same date as the date_created datetime, just in a different format.
# The following idiosyncratic session fields have never been observed with values, so they are being ignored.
'annotationDate', # I've never seen this field with a value, so I'm ignoring it for now.
'annotationsFundedBy', # I've never seen this field with a value, so I'm ignoring it for now.
'attributionInfo', # I've never seen this field with a value, so I'm ignoring it for now.
'collection', # I've never seen this field with a value, so I'm ignoring it for now.
'originalTranscriber', # I've never seen this field with a value, so I'm ignoring it for now.
'publisher', # I've never seen this field with a value, so I'm ignoring it for now.
# new fields from weisskircherisch
'device',
'location',
'register',
'source'
]
known_attrs = [
'_id',
'_rev',
'collection',
'comments',
'dateCreated',
'dateModified',
'lastModifiedBy',
'pouchname',
'sessionFields',
'title', # This attr occurs in some sessions. I am ignoring this attr in sessions; I think it holds 'Change this session'.
'timestamp', # This attr also occurs only sometimes. I am ignoring it. It appears to be the same value as the dateModified.
'api', # Ignorable
'dbname', # Ignorable
'fieldDBtype', # Ignorable
'fields', # Ignorable
'modifiedByUser', # NOTE: this should maybe be migrated, but its `value` value is just a string of usernames and its `json.users` value is an array of objects whose only relevant attribute appears to be `username`, which is redundant with the aforementioned `value`. No modification timestamp for each modification.
'version', # Ignorable
'dialect', # Note: this attr appears to be valuated when its corresponding field is not, and vice versa.
'language', # Note: this attr appears to be valuated when its corresponding field is not, and vice versa.
'trashed', # May be set to 'deleted'. Note: we are ignoring deleted sessions and will not create and then delete OLD corpora to simulate them (as we do with forms).
'trashedReason' # Optional text describing why the session was deleted.
]
# Fill this with OLD resources that are implicit in the LingSync session.
auxiliary_resources = {}
# Add warnings to this.
warnings = {
'general': [],
'docspecific': []
}
for k in doc:
if k not in known_attrs:
warnings['docspecific'].append(u'\u2018%s\u2019 not a recognized'
u' attribute in session %s' % (k, session_id))
for obj in session_fields:
if obj['label'] not in known_fields:
warnings['docspecific'].append(u'\u2018%s\u2019 not a recognized'
u' label in fields for session %s' % (obj['label'], session_id))
# This will be our return value.
oldobj = {
'originaldoc': doc,
'lingsync_type': 'session',
'old_resource': 'collections',
'old_value': {}, # Valuate this with `old_collection`
'old_auxiliary_resources': [], # Valuate this with `auxiliary_resources`
'warnings': []
}
# This dict will be used to create th eOLD collection.
old_collection = copy.deepcopy(old_schemata['collection'])
old_collection['type'] = u'elicitation'
# Get the values of the LingSync session fields.
goal = get_val_from_session_fields('goal', session_fields)
consultants = get_val_from_session_fields('consultants', session_fields)
date_session_elicited = get_val_from_session_fields('dateElicited',
session_fields)
user = get_val_from_session_fields('user', session_fields)
date_created = doc.get('dateCreated')
date_modified = doc.get('dateModified')
last_modified_by = doc.get('lastModifiedBy')
############################################################################
# START New session fields from weisskircherisch corpus
############################################################################
# Device. Ignoring this because I've never seen it not empty.
ls_device = get_val_from_session_fields('device',
session_fields)
if ls_device:
print 'Session has device: %s' % ls_device
# Location. Ignoring this because I've never seen it not empty.
ls_location = get_val_from_session_fields('location',
session_fields)
if ls_location:
print 'Session has location: %s' % ls_location
# Register. Ignoring this because I've never seen it not empty.
ls_register = get_val_from_session_fields('register',
session_fields)
if ls_register:
print 'Session has register: %s' % ls_register
# Source. Ignoring this because I've never seen it not empty.
ls_source = get_val_from_session_fields('source',
session_fields)
if ls_source and ls_source not in ('XY', 'Unknown'):
print 'Session has source: %s' % ls_source
############################################################################
# END New session fields from weisskircherisch corpus
############################################################################
# Annotation Date. Ignoring this because I've never seen it not empty.
ls_annotationDate = get_val_from_session_fields('annotationDate',
session_fields)
if ls_annotationDate:
print 'Session has annotationDate: %s' % ls_annotationDate
# Annotations Funded By. Ignoring this because I've never seen it not empty.
ls_annotationsFundedBy = get_val_from_session_fields('annotationsFundedBy',
session_fields)
if ls_annotationsFundedBy:
print 'Session has annotationsFundedBy: %s' % ls_annotationsFundedBy
# Attribution Info. Ignoring this because I've never seen it not empty.
ls_attributionInfo = get_val_from_session_fields('attributionInfo',
session_fields)
if ls_attributionInfo:
print 'Session has ls_attributionInfo: %s' % ls_attributionInfo
# Collection. Ignoring this because I've never seen it not empty.
ls_collection = get_val_from_session_fields('collection', session_fields)
if ls_collection:
print 'Session has ls_collection: %s' % ls_collection
# Original Transcriber. Ignoring this because I've never seen it not empty.
ls_originalTranscriber = get_val_from_session_fields('originalTranscriber',
session_fields)
if ls_originalTranscriber:
print 'Session has ls_originalTranscriber: %s' % ls_originalTranscriber
# Publisher. Ignoring this because I've never seen it not empty.
ls_publisher = get_val_from_session_fields('publisher', session_fields)
if ls_publisher:
print 'Session has ls_publisher: %s' % ls_publisher
# We use the dialect and language fields if present. If not, we try to get
# these values from the corresponding attributes.
dialect = get_val_from_session_fields('dialect', session_fields)
if not dialect:
dialect = doc.get('dialect')
language = get_val_from_session_fields('language', session_fields)
if not language:
language = doc.get('language')
# Title. Get the OLD collection's title value.
if (not goal) or len(goal) == 0:
if not QUIET:
warnings['docspecific'].append('Session %s has no goal so its date'
' elicited is being used for title of the the OLD collection'
' built from it.' % (session_id,))
if date_session_elicited and len(date_session_elicited) > 0:
title = u'Elicitation Session on %s' % date_session_elicited
else:
if not QUIET:
warnings['docspecific'].append('Session %s has no date elicited so its id is being'
' used for the title of the OLD collection built from it.' % (
session_id,))
title = u'Elicitation Session %s' % session_id
elif len(goal) > 255:
if not QUIET:
warnings['docspecific'].append('The goal "%s" of session %s is too long'
' and will be truncated. However, its non-truncated form is in the'
' collection\'s description field.' % (goal, session_id))
title = goal[:255]
else:
title = goal
old_collection['title'] = title
# Description.
# Get the OLD collection's description value. This will contain most of the
# metadata from the LingSync session. It's redundant, but it's informative,
# so that's fine.
description = []
description.append(u'This collection was created from a LingSync session with id'
' %s.' % session_id)
if goal:
description.append(u'Goal: %s' % punctuate_period_safe(goal))
if consultants:
description.append(u'Consultants: %s' % (
punctuate_period_safe(consultants),))
if language:
description.append(u'Language: %s' % (
punctuate_period_safe(language),))
if dialect:
description.append(u'Dialect: %s' % (
punctuate_period_safe(dialect),))
if date_session_elicited:
description.append(u'Elicitation session date: %s' % (
punctuate_period_safe(date_session_elicited),))
creation_metadata = []
if user and date_created:
creation_metadata.append(u'Session created in LingSync by %s on %s.' % (
user, date_created))
if last_modified_by and date_modified:
creation_metadata.append(u'Session last modified in LingSync by %s on'
' %s.' % (last_modified_by, date_modified))
creation_metadata = ' '.join(creation_metadata).strip()
if creation_metadata:
description.append(creation_metadata)
ls_comments = doc.get('comments')
if ls_comments:
comments_string = lingsync_comments2old_description(doc['comments'])
if comments_string:
description.append(comments_string)
description = u'\n\n'.join(description)
old_collection['description'] = description
# Speaker.
# Use the LingSync `consultants` and `dialect` fields to create one or more
# OLD speakers.
speakers = []
if consultants:
for consultant in consultants.split():
old_speaker = copy.deepcopy(old_schemata['speaker'])
# If consultant is all-caps, we assume it is initials wehre the
# first char is the first name initial and the remaining char(s)
# is/are the last name initial(s).
if consultant.upper() == consultant:
old_speaker['first_name'] = consultant[0]
old_speaker['last_name'] = consultant[1:]
else:
old_speaker['first_name'] = consultant
old_speaker['last_name'] = consultant
if dialect:
old_speaker['dialect'] = dialect
speakers.append(old_speaker)
if len(speakers) >= 1:
old_collection['speaker'] = speakers[0]
if len(speakers) > 1:
if not QUIET:
warnings['docspecific'].append('Session %s has more than one consultant listed. Since'
' OLD collections only allow one speaker, we are just going to'
' associate the first speaker to the OLD collection created form this'
' LingSync session. The additional LingSync speakers will still be'
' created as OLD speakers, however, and all OLD collections'
' will list all of the consultants from their source'
' LingSync sessions in their description values.' % session_id)
for speaker in speakers:
auxiliary_resources.setdefault('speakers', []).append(speaker)
# Elicitor.
# Use the LingSync session's user to valuate the OLD collection's elicitor.
# We stupidly just set the username, first_name, and lastname attributes to
# the LingSync user value.
if user:
old_elicitor = copy.deepcopy(old_schemata['user'])
old_elicitor['username'] = user
old_elicitor['first_name'] = user
old_elicitor['last_name'] = user
old_elicitor['email'] = FAKE_EMAIL
old_elicitor['role'] = u'administrator'
old_collection['elicitor'] = old_elicitor
auxiliary_resources.setdefault('users', []).append(old_elicitor)
# Date elicited.
# Attempt to create a MM/DD/YYYY string from `date_session_elicited`. At
# present, we are only recognizing date strings in MM/DD/YYYY and
# YYYY-MM-DD formats.
if date_session_elicited:
date_elicited = None
try:
datetime_inst = datetime.datetime.strptime(date_session_elicited,
'%Y-%m-%d')
except Exception, e:
try:
datetime_inst = datetime.datetime.strptime(
date_session_elicited, '%m/%d/%Y')
except Exception, e:
datetime_inst = None
# No point in warning about this. Any unparseable dateElicited
# values will be put in the text of the description anyway.
# warnings['docspecific'].append(u'Unable to parse %s to an'
# u' OLD-compatible date in MM/DD/YYYY format.' % (
# date_session_elicited,))
if datetime_inst:
y = datetime_inst.year
m = datetime_inst.month
d = datetime_inst.day
date_elicited = u'%s/%s/%s' % (str(m).zfill(2),
str(d).zfill(2), str(y))
else:
date_elicited = None
if date_elicited:
old_collection['date_elicited'] = date_elicited
old_collection['__lingsync_session_id'] = session_id
oldobj['old_value'] = old_collection
oldobj['old_auxiliary_resources'] = auxiliary_resources
oldobj['warnings'] = warnings
if language:
oldobj['language'] = language
# return (old_collection, auxiliary_resources, warnings)
return oldobj
def punctuate_period_safe(string):
"""Add a period at the end of `string` if no sentence-final punctuation is
there already.
"""
if string[-1] in ['?', '.', '!']:
return string
else:
return '%s.' % string
def get_dict_from_session_fields(attr, session_fields):
"""Given a list of dicts (`session_fields`), return the first one whose
'label' value is `attr`.
"""
val_list = [f for f in session_fields if f['label'] == attr]
if len(val_list) is 0:
return None
elif len(val_list) is 1:
return val_list[0]
else:
print 'WARNING: more than one %s in field list!' % attr
return val_list[0]
def get_val_from_session_fields(attr, session_fields):
"""Given a list of dicts (`session_fields`), return the first one whose
'label' value is `attr` and return its 'value' value.
"""
val_dict = get_dict_from_session_fields(attr, session_fields)
if val_dict:
return val_dict.get('value')
else:
return val_dict
def get_dict_from_datum_fields(attr, datum_fields):
"""Given a list of dicts (`datum_fields`), return the first one whose
'label' value is `attr`.
"""
val_list = [f for f in datum_fields if f['label'] == attr]
if len(val_list) is 0:
return None
elif len(val_list) is 1:
return val_list[0]
else:
if len(set([x.get('value') for x in val_list])) != 1:
print 'WARNING: more than one %s in field list!' % attr
p(val_list)
return val_list[0]
def get_val_from_datum_fields(attr, datum_fields):
"""Given a list of dicts (`datum_fields`), return the first one whose
'label' value is `attr` and return its 'value' value.
"""
val_dict = get_dict_from_datum_fields(attr, datum_fields)
if val_dict:
return val_dict.get('value')
else:
return val_dict
def lingsync_comments2old_description(comments_list):
"""Return a LingSync session comments array as a string of text that can be
put into the description of an OLD collection. Each comment should be its
own paragraph.
"""
description = []
if type(comments_list) is list:
for comment in comments_list:
if type(comment) is dict:
if comment.get('text'):
if comment.get('username'):
description.append('User %s made this comment: %s' % (
comment['username'],
punctuate_period_safe(comment['text'])))
else:
description.append('Comment: %s' % (
punctuate_period_safe(comment['text'])))
if len(description) > 0:
return u'\n\n'.join(description)
else:
return ''
def main():
"""This function performs the conversion.
"""
options, lingsync_config, lingsync_db_name = get_params()
lingsync_data_fname = download(options, lingsync_config, lingsync_db_name)
old_data_fname = convert(options, lingsync_data_fname, lingsync_db_name)
upload(options, old_data_fname)
# pprint.pprint(TAGSTOFIX)
with open('tag-fix-data.json', 'w') as outfile:
json.dump(TAGSTOFIX, outfile)
all_tags_created = set()
good_tags = set()
print '\n\n\n'
for tagorig, meta in TAGSTOFIX.iteritems():
all_tags_created.add(tagorig)
if tagorig == meta['tags_created'][0]:
good_tags.add(tagorig)
else:
good = [x.strip() for x in tagorig.split(',')]
print '"%s" =>\n BAD: "%s"\n GOOD: "%s"' % (
tagorig, '", "'.join(meta['tags_created']),
'", "'.join(good))
print
print '\n\n\n'
print 'GOOD TAGS'
print '\n'.join(sorted(list(good_tags)))
print '\n\n\n'
cleanup()
def createdirs():
"""Create the needed directories in the current folder.
"""
for dirpath in [LINGSYNC_DIR, OLD_DIR, FILES_DIR]:
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
def cleanup():
"""If the user doesn't dissent, destroy the local directories used to hold
the LingSync JSON downloaded, the OLD JSON created by the conversion, and
the media files downloaded from LingSync.
"""
r = raw_input('Save migration files? Enter \'y\'/\'Y\' to save the JSON'
u' from the LingSync download and the OLD-compatible JSON from the OLD'
u' download. Otherwise, these files will be destroyed.')
if r not in ['y', 'Y']:
for dirpath in [LINGSYNC_DIR, OLD_DIR, FILES_DIR]:
if os.path.isdir(dirpath):
shutil.rmtree(dirpath)
def get_params():
"""Get the parameters and options entered at the command line.
Return them in their raw form, as well as a config dict that
`FieldDBClient` can init on and the name of the LingSync corpus.
"""
createdirs()
usage = "usage: ./%prog [options]"
parser = optparse.OptionParser(usage)
add_optparser_options(parser)
(options, args) = parser.parse_args()
lingsync_url = getattr(options, 'ls_url', None)
lingsync_db_name = lingsync_corpus = getattr(options, 'ls_corpus', None)
lingsync_username = getattr(options, 'ls_username', None)
lingsync_password = getattr(options, 'ls_password', None)
old_url = getattr(options, 'old_url', None)
old_username = getattr(options, 'old_username', None)
old_password = getattr(options, 'old_password', None)
# If the required params haven't been supplied as options, we prompt the
# user for them.
if len(filter(None, [lingsync_url, lingsync_corpus, lingsync_username,
lingsync_password, old_url, old_username, old_password])) < 7:
if not lingsync_db_name:
lingsync_db_name = lingsync_corpus = raw_input(u'%sPlease enter the'
u' name of the LingSync corpus to migrate:%s ' % (ANSI_WARNING,
ANSI_ENDC))
if lingsync_corpus:
options.ls_corpus = lingsync_corpus
else:
sys.exit(u'%sYou must provide a LingSync corpus name.'
u' Aborting.%s' % (ANSI_FAIL, ANSI_ENDC))
if not lingsync_username:
lingsync_username = raw_input(u'%sPlease enter the username of a'
u' LingSync user with sufficient privileges to fetch data from'
u' corpus %s:%s ' % (ANSI_WARNING, lingsync_corpus, ANSI_ENDC))
if lingsync_username:
options.ls_username = lingsync_username
else:
sys.exit(u'%sYou must provide a LingSync username. Aborting.%s'
% (ANSI_FAIL, ANSI_ENDC))
if not lingsync_password:
lingsync_password = getpass.getpass(u'%sPlease enter the password'
u' for LingSync user %s:%s ' % (ANSI_WARNING, lingsync_username,
ANSI_ENDC))
if lingsync_password:
options.ls_password = lingsync_password
else:
sys.exit(u'%sYou must provide the password for your LingSync'
u' user. Aborting.%s' % (ANSI_FAIL, ANSI_ENDC))
if not old_url:
old_url = raw_input(u'%sPlease enter the URL of the destination'
u' OLD:%s ' % (ANSI_WARNING, ANSI_ENDC))
if old_url:
options.old_url = old_url
else:
sys.exit(u'%sYou must provide a destination OLD URL.'
u' Aborting.%s' % (ANSI_FAIL, ANSI_ENDC))
if not old_username:
old_username = raw_input(u'%sPlease enter the username of an'
u' OLD user with sufficient privileges to fetch, add, update'
u' and delete data from the OLD at %s:%s ' % (ANSI_WARNING,
old_url, ANSI_ENDC))
if old_username:
options.old_username = old_username
else:
sys.exit(u'%sYou must provide an OLD username. Aborting.%s'
% (ANSI_FAIL, ANSI_ENDC))
if not old_password:
old_password = getpass.getpass(u'%sPlease enter the password for'
u' OLD user %s:%s ' % (ANSI_WARNING, old_username,
ANSI_ENDC))
if old_password:
options.old_password = old_password
else:
sys.exit(u'%sYou must provide the password for your OLD user.'
u' Aborting.%s' % (ANSI_FAIL, ANSI_ENDC))
# The FieldDBClient class constructor requires a strange dict param where
# the URLs are split into scheme, host and port.
parsed_lingsync_url = urlparse.urlparse(lingsync_url)
parsed_old_url = urlparse.urlparse(old_url)
lingsync_config = {
"auth_protocol": parsed_lingsync_url.scheme,
"auth_host": parsed_lingsync_url.hostname,
"auth_port": parsed_lingsync_url.port,
"corpus_protocol": parsed_lingsync_url.scheme,
"corpus_host": parsed_lingsync_url.hostname,
"corpus_port": parsed_lingsync_url.port,
"couch_protocol": parsed_lingsync_url.scheme,
"couch_host": parsed_lingsync_url.hostname,
"couch_port": parsed_lingsync_url.port,
"username": lingsync_username,
"password": <PASSWORD>,
"admin_username": lingsync_username,
"admin_password": <PASSWORD>,
"server_code": "local",
"app_version_when_created": "unknown"
}
print '\n%sLingSync-to-OLD Migration.%s' % (ANSI_HEADER, ANSI_ENDC)
print (u'We are going to move the data in the LingSync corpus %s at %s to'
u' an OLD database at %s' % (lingsync_db_name, lingsync_url, old_url))
return (options, lingsync_config, lingsync_db_name)
def download(options, lingsync_config, lingsync_db_name):
"""Step 1: Download the LingSync JSON data, if necessary, and set
`lingsync_data_fname` to the path to the JSON file where those data
are stored locally (after the fetch).
"""
print '\n%sStep 1. Download the LingSync data.%s' % (ANSI_HEADER, ANSI_ENDC)
if options.force_download:
flush('Downloading the LingSync data...')
lingsync_data_fname = download_lingsync_json(lingsync_config,
lingsync_db_name)
else:
lingsync_data_fname = get_lingsync_json_filename(lingsync_db_name)
if os.path.isfile(lingsync_data_fname):
print 'We already have the LingSync data in %s.' % (
lingsync_data_fname,)
else:
print ('The LingSync data have not been downloaded; downloading them'
u' now')
lingsync_data_fname = download_lingsync_json(lingsync_config,
lingsync_db_name)
if lingsync_data_fname is None:
sys.exit('Unable to download the LingSync JSON data.\nAborting.')
return lingsync_data_fname
def convert(options, lingsync_data_fname, lingsync_db_name):
"""Step 2: Process the LingSync JSON data and write a new JSON file that
holds an OLD-compatible data structure.
"""
print ('\n%sStep 2. Convert the LingSync data to an OLD-compatible'
u' structure.%s' % (ANSI_HEADER, ANSI_ENDC))
if options.force_convert:
flush('Converting the LingSync data to an OLD-compatible format...')
old_data_fname = lingsync2old(lingsync_data_fname, lingsync_db_name,
options.force_file_download)
else:
old_data_fname = get_old_json_filename(lingsync_db_name)
if os.path.isfile(old_data_fname):
print 'We already have the converted OLD data in %s.' % (
old_data_fname,)
if options.verbose:
path = os.path.join(LINGSYNC_DIR,
'%s-summary.txt' % lingsync_db_name)
if os.path.isfile(path):
print open(path).read()
path = os.path.join(OLD_DIR,
'%s-summary.txt' % lingsync_db_name)
if os.path.isfile(path):
print open(path).read()
path = os.path.join(OLD_DIR,
'%s-conversion-warnings.txt' % lingsync_db_name)
if os.path.isfile(path):
print open(path).read()
else:
print ('The LingSync data have not yet been converted; doing that'
u' now.')
old_data_fname = lingsync2old(lingsync_data_fname,
lingsync_db_name, options.force_file_download)
if old_data_fname is None:
sys.exit('Unable to convert the LingSync JSON data to an OLD-compatible'
' format.\nAborting.')
print ', '.join(map(str, sorted(list(OVERFLOWS))))
return old_data_fname
def create_old_application_settings(old_data, c):
"""Create the application settings in `old_data` on the OLD that the client
`c` is connected to. Return the `relational_map`.
"""
appsett = old_data['applicationsettings'][0]
# Only set new grammaticalities if the existing grammaticalities doesn't
# contain all of the grammaticality values we need.
existing_appsett = c.get('applicationsettings')[-1]
existing_grammaticalities = existing_appsett\
.get('grammaticalities', u'').split(u',')
to_add_grammaticalities = appsett.get('grammaticalities', u'').split(u',')
if set(to_add_grammaticalities).issubset(set(existing_grammaticalities)):
existing_appsett['grammaticalities'] = \
u','.join(existing_grammaticalities)
else:
existing_appsett['grammaticalities'] = \
u','.join(to_add_grammaticalities)
existing_appsett['object_language_name'] = appsett['object_language_name']
r = c.create('applicationsettings', existing_appsett)
try:
assert r['object_language_name'] == appsett['object_language_name']
print 'Created the OLD application settings.'
except:
print r
sys.exit(u'%sSomething went wrong when attempting to create an OLD'
u' application settings using the LingSync data. Aborting.%s' % (
ANSI_FAIL, ANSI_ENDC))
def upload(options, old_data_fname):
"""Step 3: Upload the generated OLD data to the OLD at the specified URL.
Sub-steps:
1. Create application settings.
2. Create users, speakers and tags.
3. Create files.
4. Create forms.
5. Create corpora and collections.
"""
# Keys will be OLD resource names. Values will be dicts that map LingSync
# identifiers (i.e., ids, usernames, tagnames) to OLD identifiers (ids).
relational_map = {}
print (u'\n%sStep 3. Upload the converted data to the OLD web service.%s' % (
ANSI_HEADER, ANSI_ENDC))
# Get converted JSON data.
try:
old_data = json.load(open(old_data_fname))
except:
sys.exit(u'%sUnable to locate file %s. Aborting.%s' % (ANSI_FAIL,
old_data_fname, ANSI_ENDC))
# Get an OLD client.
old_url = getattr(options, 'old_url', None)
old_username = getattr(options, 'old_username', None)
old_password = getattr(options, 'old_password', None)
lingsync_corpus_name = getattr(options, 'ls_corpus', None)
c = OLDClient(old_url)
# Log in to the OLD.
logged_in = c.login(old_username, old_password)
if not logged_in:
sys.exit(u'%sUnable to log in to %s with username %s and password %s.'
u' Aborting.%s' % (ANSI_FAIL, old_url, old_username, old_password,
ANSI_ENDC))
# Create the resources.
create_old_application_settings(old_data, c)
relational_map, users_created = create_old_users(old_data, c, old_url,
relational_map)
relational_map, speakers_created = create_old_speakers(old_data, c,
old_url, relational_map)
relational_map, tags_created = create_old_tags(old_data, c, old_url,
lingsync_corpus_name, relational_map)
relational_map, files_created = create_old_files(old_data, c, old_url,
relational_map)
relational_map, forms_created = create_old_forms(old_data, c, old_url,
relational_map)
relational_map, corpora_created = create_old_corpora(old_data, c, old_url,
relational_map)
relational_map, collections_created = create_old_collections(old_data, c,
old_url, relational_map)
# Alert the user about the results of the upload.
print u'\n%sSummary.%s' % (ANSI_HEADER, ANSI_ENDC)
if users_created.get('created'):
c = len(users_created['created'])
print u'%d OLD %s created.' % (c, pluralize_by_count('user', c))
print (u'%sAll created OLD users are administrators and have the'
u' password \<PASSWORD>; some may also have the fake email'
u' address %s.%s' % (ANSI_WARNING, DEFAULT_PASSWORD, FAKE_EMAIL,
ANSI_ENDC))
if users_created.get('updated'):
c = len(users_created['updated'])
print (u'%d pre-existing and LingSync-matching OLD %s updated or'
u' left unaltered.' % (c, pluralize_by_count('user', c)))
if speakers_created.get('created'):
c = len(speakers_created['created'])
print u'%d OLD %s created.' % (c, pluralize_by_count('speaker', c))
if speakers_created.get('updated'):
c = len(speakers_created['updated'])
print (u'%d pre-existing and LingSync-matching OLD %s updated or'
u' left unaltered.' % (c, pluralize_by_count('speaker', c)))
if tags_created:
c = len(tags_created)
print u'%d OLD %s created.' % (c, pluralize_by_count('tag', c))
if files_created:
c = len(files_created)
print u'%d OLD %s created.' % (c, pluralize_by_count('file', c))
if forms_created.get('created'):
c = len(forms_created['created'])
print u'%d OLD %s created.' % (c, pluralize_by_count('form', c))
if forms_created.get('deleted'):
c = len(forms_created['deleted'])
print (u'%d OLD %s created and then deleted (to simulate trashed'
u' LingSync forms).' % (c, pluralize_by_count('form', c)))
if corpora_created:
c = len(corpora_created)
print u'%d OLD %s created.' % (c, pluralize_by_count('corpus', c))
if collections_created:
c = len(collections_created)
print u'%d OLD %s created.' % (c, pluralize_by_count('collection', c))
def pluralize_by_count(noun, count):
"""Pluralize string `noun`, depending on the number of them (`count`).
"""
if count == 1:
return noun
else:
return pluralize(noun)
def pluralize(noun):
"""Pluralize `noun`: extremely domain-specific.
"""
if noun.endswith('pus'):
return u'%sora' % noun[:-2]
else:
return u'%ss' % noun
def create_old_collections(old_data, c, old_url, relational_map):
"""Create the collections in `old_data` on the OLD that the client `c` is
connected to.
"""
resources_created = []
if old_data.get('collections'):
flush('Creating OLD collections...')
relational_map.setdefault('collections', {})
# Get the "migration tag" id.
migration_tag_id = relational_map.get('tags',
{}).get(migration_tag_name)
if not migration_tag_id:
sys.exit(u'%sFailed to get the OLD id for the migration tag.'
u' Aborting.%s' % (ANSI_FAIL, ANSI_ENDC))
# Issue the create (POST) requests.
for collection in old_data['collections']:
session_id = collection.get('__lingsync_session_id')
# Convert arrays of tag objects to arrays of OLD tag ids.
if collection.get('tags'):
new_tags = []
for tag in collection['tags']:
tag_id = relational_map.get('tags', {}).get(tag['name'])
if tag_id:
new_tags.append(tag_id)
else:
print (u'%sWarning: unable to find id for OLD tag'
u' "%s".%s' % (ANSI_WARNING, tag['name'],
ANSI_ENDC))
collection['tags'] = new_tags
# Convert speaker objects to OLD speaker ids.
if collection.get('speaker'):
speakerobj = collection['speaker']
key = u'%s %s' % (speakerobj['first_name'],
speakerobj['last_name'])
speaker_id = relational_map.get('speakers', {}).get(key)
if speaker_id:
collection['speaker'] = speaker_id
else:
collection['speaker'] = None
print (u'%sWarning: unable to find id for OLD speaker'
u' "%s".%s' % (ANSI_WARNING, key, ANSI_ENDC))
# Convert elicitor objects to OLD elicitor ids.
if collection.get('elicitor'):
elicitorobj = collection['elicitor']
key = elicitorobj['username']
elicitor_id = relational_map.get('users', {}).get(key)
if elicitor_id:
collection['elicitor'] = elicitor_id
else:
collection['elicitor'] = None
print (u'%sWarning: unable to find id for OLD elicitor'
u' "%s".%s' % (ANSI_WARNING, key, ANSI_ENDC))
# Get the `contents` value as a bunch of references to form ids.
# TODO: something is going wrong here. Some OLD collections are
# being created without any forms in them even though the LingSync
# sessions that they are derived from do have datums in them.
contents = []
for form in old_data.get('forms', []):
if not form.get('__lingsync_deleted'):
form_s_id = form.get('__lingsync_session_id')
form_d_id = form['__lingsync_datum_id']
if form_s_id == session_id:
form_id = relational_map.get('forms', {}).get(form_d_id)
if form_id:
contents.append((form['date_entered'], form_id))
else:
print (u'%sWarning: unable to find id for OLD form'
u' generated from LingSync datum %s.%s' % (
ANSI_WARNING, form['__lingsync_datum_id'],
ANSI_ENDC))
if not contents:
print '%sWARNING: collection "%s" has no contents.%s' % (
ANSI_WARNING, collection['title'], ANSI_ENDC)
collection['contents'] = u'\n'.join([u'form[%d]' % t[1] for t in
sorted(contents)])
# Create the collection on the OLD
collection['tags'].append(migration_tag_id)
r = c.create('collections', collection)
try:
assert r.get('id')
relational_map['collections'][session_id] = r['id']
resources_created.append(r['id'])
except:
p(r)
sys.exit(u'%sFailed to create an OLD collection for the LingSync'
u' session \u2018%s\u2019. Aborting.%s' % (ANSI_FAIL,
session_id, ANSI_ENDC))
print 'Done.'
return (relational_map, resources_created)
def create_old_corpora(old_data, c, old_url, relational_map):
"""Create the corpora in `old_data` on the OLD that the client `c` is
connected to.
"""
resources_created = []
if old_data.get('corpora'):
flush('Creating OLD corpora...')
relational_map.setdefault('corpora', {})
# Get the "migration tag" id.
migration_tag_id = relational_map.get('tags',
{}).get(migration_tag_name)
if not migration_tag_id:
sys.exit(u'%sFailed to get the OLD id for the migration tag.'
u' Aborting.%s' % (ANSI_FAIL, ANSI_ENDC))
# Issue the create (POST) requests.
for corpus in old_data['corpora']:
datalist_id = corpus.get('__lingsync_datalist_id')
datum_ids_array = corpus.get('__lingsync_datalist_datum_ids', [])
# Convert arrays of tag objects to arrays of OLD tag ids.
if corpus.get('tags'):
new_tags = []
for tag in corpus['tags']:
tag_id = relational_map.get('tags', {}).get(tag['name'])
if tag_id:
new_tags.append(tag_id)
else:
print (u'%sWarning: unable to find id for OLD tag'
u' "%s".%s' % (ANSI_WARNING, tag['name'],
ANSI_ENDC))
corpus['tags'] = new_tags
# Get the `content` value as a comma-delimited list of form ids.
content = []
for d_id in datum_ids_array:
f_id = relational_map.get('forms', {}).get(d_id)
if f_id:
content.append(f_id)
else:
print (u'%sWarning: unable to find OLD form id'
u' corresponding to LingSync datum %s. Corpus %s will not'
u' contain all of the data that it did as a datalist in'
u' LingSync.%s' % (ANSI_WARNING, d_id, corpus['name'],
ANSI_ENDC))
corpus['content'] = u', '.join([unicode(id) for id in content])
# Create the corpus on the OLD
corpus['tags'].append(migration_tag_id)
r = c.create('corpora', corpus)
try:
assert r.get('id')
relational_map['corpora'][datalist_id] = r['id']
resources_created.append(r['id'])
except:
if r.get('errors', {}).get('name') == u'The submitted value for Corpus.name is not unique.':
corpus['name'] = '%s-%s' % (corpus['name'], randstr())
r = c.create('corpora', corpus)
try:
assert r.get('id')
relational_map['corpora'][datalist_id] = r['id']
resources_created.append(r['id'])
except:
p(r)
sys.exit(u'%sFailed to create an OLD corpus for the LingSync'
u' datalist \u2018%s\u2019. Aborting.%s' % (ANSI_FAIL,
datalist_id, ANSI_ENDC))
print 'Done.'
return (relational_map, resources_created)
def randstr():
return ''.join(random.choice(string.letters) for x in range(8))
def fix_morphemes(morphemes):
"""Remove all sequences of question marks in morpheme_break and
morpheme_gloss values. These cause Internal Server Errors in the OLD.
"""
while '??' in morphemes:
morphemes = morphemes.replace('??', '?-?')
return morphemes
def create_old_forms(old_data, c, old_url, relational_map):
"""Create the forms in `old_data` on the OLD that the client `c` is
connected to.
"""
resources_created = {
'created': [],
'deleted': [],
}
if old_data.get('forms'):
flush('Creating OLD forms...')
relational_map.setdefault('forms', {})
# Get the "migration tag" id.
migration_tag_id = relational_map.get('tags',
{}).get(migration_tag_name)
if not migration_tag_id:
sys.exit(u'%sFailed to get the OLD id for the migration tag.'
u' Aborting.%s' % (ANSI_FAIL, ANSI_ENDC))
# Issue the create (POST) requests.
last_form = None
for form in old_data['forms']:
datum_id = form.get('__lingsync_datum_id')
# Convert arrays of tag objects to arrays of OLD tag ids.
if form.get('tags'):
new_tags = []
for tag in form['tags']:
tag_id = relational_map.get('tags', {}).get(tag['name'])
if tag_id:
new_tags.append(tag_id)
else:
print (u'%sWarning: unable to find id for OLD tag'
u' "%s".%s' % (ANSI_WARNING, tag['name'],
ANSI_ENDC))
form['tags'] = new_tags
# Convert speaker objects to OLD speaker ids.
if form.get('speaker'):
speakerobj = form['speaker']
key = u'%s %s' % (speakerobj['first_name'],
speakerobj['last_name'])
speaker_id = relational_map.get('speakers', {}).get(key)
if speaker_id:
form['speaker'] = speaker_id
else:
form['speaker'] = None
print (u'%sWarning: unable to find id for OLD speaker'
u' "%s".%s' % (ANSI_WARNING, key, ANSI_ENDC))
# Convert elicitor objects to OLD elicitor ids.
if form.get('elicitor'):
elicitorobj = form['elicitor']
key = elicitorobj['username']
elicitor_id = relational_map.get('users', {}).get(key)
if elicitor_id:
form['elicitor'] = elicitor_id
else:
form['elicitor'] = None
print (u'%sWarning: unable to find id for OLD elicitor'
u' "%s".%s' % (ANSI_WARNING, key, ANSI_ENDC))
# Convert arrays of file objects to arrays of OLD file ids.
if form.get('files'):
file_id_array = relational_map.get('files', {}).get(datum_id)
if file_id_array and (type(file_id_array) is type([])):
form['files'] = file_id_array
else:
form['files'] = []
print (u'%sWarning: unable to get the array of OLD file ids'
u' for the OLD form generated from the LingSync datum'
u' with id %s.%s' % (ANSI_WARNING, datum_id, ANSI_ENDC))
# Create the form on the OLD
form['tags'].append(migration_tag_id)
form['morpheme_break'] = fix_morphemes(form['morpheme_break'])
form['morpheme_gloss'] = fix_morphemes(form['morpheme_gloss'])
try:
r = c.create('forms', form)
except requests.exceptions.SSLError:
print ('%sWarning: SSLError; probably'
' CERTIFICATE_VERIFY_FAILED.%s' % (ANSI_WARNING, ANSI_ENDC))
r = c.create('forms', form, False)
try:
assert r.get('id')
form['id'] = r['id']
resources_created['created'].append(r['id'])
# We don't want to map datum ids to form ids for
# trashed/deleted datums/forms.
if not form.get('__lingsync_deleted'):
relational_map['forms'][datum_id] = r['id']
except Exception, e:
# This shouldn't happen, but sometimes the grammaticality value
# isn't recognized by the OLD's application settings. If so,
# remove it and print a warning for the user to fix it later.
if r.get('errors', {}).get('grammaticality') == u'The grammaticality submitted does not match any of the available options.':
old_grammaticality = form['grammaticality']
form['grammaticality'] = u''
r = c.create('forms', form, False)
try:
assert r.get('id')
form['id'] = r['id']
resources_created['created'].append(r['id'])
# We don't want to map datum ids to form ids for
# trashed/deleted datums/forms.
if not form.get('__lingsync_deleted'):
relational_map['forms'][datum_id] = r['id']
print ('WARNING: OLD form %d should have the'
' grammaticality value `%s`; however, that value was'
' not permitted so we created it with no'
' grammaticality value (i.e., as grammatical). Please'
' fix manually.' % (form['id'], old_grammaticality))
except Exception, e:
p(r)
print e
if r.get('error') == u'Internal Server Error':
print '\n\nInternal Server Error when trying to create this form:'
p(form)
print 'No error when trying to create this form:'
p(last_form)
print '\n\n\n'
else:
sys.exit(u'%sFailed to create an OLD form for the LingSync'
u' datum \u2018%s\u2019. Aborting.%s' % (ANSI_FAIL,
datum_id, ANSI_ENDC))
else:
p(r)
print e
if r.get('error') == u'Internal Server Error':
print '\n\nInternal Server Error when trying to create this form:'
p(form)
print 'No error when trying to create this form:'
p(last_form)
print '\n\n\n'
else:
sys.exit(u'%sFailed to create an OLD form for the LingSync'
u' datum \u2018%s\u2019. Aborting.%s' % (ANSI_FAIL,
datum_id, ANSI_ENDC))
# Delete migrated OLD forms that were previously trashed in
# LingSync.
if form.get('__lingsync_deleted'):
r = c.delete('forms/%s' % r['id'], {})
try:
assert r.get('id')
resources_created['deleted'].append(r['id'])
except:
p(r)
sys.exit(u'%sFailed to delete on the OLD the trashed'
u' LingSync form %s that was migrated.%s' % (ANSI_FAIL,
datum_id, ANSI_ENDC))
last_form = form
# If the form has "Links: " in it, then we convert the LingSync ids to
# OLD id references.
# The LingSync `links` field appears to consistently be a string of
# comma-separated expressions of the form "similarTo:4f868ba9a79e57479ddbe4f62ae671c8"
# where the string after the colon is a datum id.
patt = re.compile('similarTo:([a-f0-9]{32})')
def fix(m):
datum_id = m.group(1)
print 'in fix'
if relational_map:
print 'we have relational map'
form_id = relational_map['forms'].get(datum_id)
if form_id:
return 'form(%d)' % form_id
else:
return 'similar to LingSync datum %s' % datum_id
for form in old_data['forms']:
if u'Links: ' in form['comments']:
print 'requesting form for linking ...'
form_r = c.get('forms/%d' % form['id'])
print 'form_r is '
print form_r
if form_r.get('error'):
print ('Form %d has LingSync links but we could not retrieve'
' it.' % form['id'])
continue
form_r['comments'] = patt.sub(fix, form_r['comments'])
if form_r['elicitation_method']:
form_r['elicitation_method'] = form_r['elicitation_method']['id']
if form_r['syntactic_category']:
form_r['syntactic_category'] = form_r['syntactic_category']['id']
if form_r['speaker']:
form_r['speaker'] = form_r['speaker']['id']
if form_r['elicitor']:
form_r['elicitor'] = form_r['elicitor']['id']
if form_r['verifier']:
form_r['verifier'] = form_r['verifier']['id']
if form_r['source']:
form_r['source'] = form_r['source']['id']
if form_r['tags']:
form_r['tags'] = [t['id'] for t in form_r['tags']]
if form_r['files']:
form_r['files'] = [t['id'] for t in form_r['files']]
if form_r['date_elicited']:
x = form_r['date_elicited']
if len(x.split('-')) == 3:
y, m, d = x.split('-')
form_r['date_elicited'] = u'%s/%s/%s' % (m, d, y)
r = c.update('forms/%d' % form_r['id'], form_r)
print 'Done.'
return (relational_map, resources_created)
def create_old_files(old_data, c, old_url, relational_map):
"""Create the files in `old_data` on the OLD that the client `c` is
connected to.
"""
resources_created = []
if old_data.get('files'):
relational_map.setdefault('files', {})
flush('Creating OLD files...')
# Issue the create (POST) requests.
for file in old_data['files']:
#p(file)
path = file.get('__local_file_path')
if not path:
print u'%sNo file path.%s' % (ANSI_WARNING, ANSI_ENDC)
continue
if not os.path.isfile(path):
print u'%sNo file at %s.%s' % (ANSI_WARNING, path, ANSI_ENDC)
continue
size = os.path.getsize(path)
# Files bigger than 20MB have to be uploaded using Multipart
# form-data, not as base64-encoded JSON.
if size > 20971520:
# NOTE: app.lingsync.org is failing when I attempt to upload a
# 50MB file. So I'll have to simulate this case ... TODO
print 'PASSING ON MULTIPART FORM DATA!!!!'
pass
else:
with open(path, 'rb') as f:
file['base64_encoded_file'] = base64.b64encode(f.read())
r = c.create('files', file)
try:
assert r.get('id')
resources_created.append(r['id'])
# Note: we map the LingSync id of the datum that the file
# was associated to to a list of OLD file ids. This way,
# when we create the OLD forms, we can use their datum ids
# to get the list of OLD file ids that should be in their
# `files` attribute.
relational_map['files']\
.setdefault(file['__lingsync_datum_id'], [])\
.append(r['id'])
except:
sys.exit(u'%sFailed to create an OLD file \u2018%s\u2019.'
u' Aborting.%s' % (ANSI_FAIL, file['filename'],
ANSI_ENDC))
print 'Done.'
return (relational_map, resources_created)
# These are the IME/MIME types that the OLD currently allows.
old_allowed_file_types = (
u'application/pdf',
u'image/gif',
u'image/jpeg',
u'image/png',
u'audio/mpeg',
u'audio/ogg',
u'audio/x-wav',
u'video/mpeg',
u'video/mp4',
u'video/ogg',
u'video/quicktime',
u'video/x-ms-wmv'
)
def create_old_tags(old_data, c, old_url, lingsync_corpus_name, relational_map):
"""Create the tags in `old_data` on the OLD that the client `c` is
connected to.
"""
resources_created = []
relational_map.setdefault('tags', {})
flush('Creating OLD tags...')
# Create a tag for this migration
global migration_tag_name
migration_tag_name = u'Migrated from LingSync corpus %s on %s' % (
lingsync_corpus_name, datetime.datetime.utcnow().isoformat())
migration_tag_description = (u'This resource was generated during an'
u' automated migration from the LingSync corpus %s.' % (
lingsync_corpus_name,))
migration_tag = {
'name': migration_tag_name,
'description': migration_tag_description
}
r = c.create('tags', migration_tag)
try:
assert r.get('id')
resources_created.append(r['id'])
relational_map['tags'][r['name']] = r['id']
except:
sys.exit(u'%sFailed to create the migration tag on the OLD.'
u' Aborting.%s' % (ANSI_FAIL, migration_tag['name'], ANSI_ENDC))
if old_data.get('tags'):
tags_to_create = []
tags = old_data.get('tags')
tag_names = [t['name'] for t in tags]
# Retrieve the existing tags from the OLD. This may affect what
# tags we create.
existing_tags = c.get('tags')
existing_tag_names = [t['name'] for t in existing_tags]
# Populate our lists of tags to create and update. If a tag
# already exists, we may just use it instead of creating or even
# updating.
for tag in tags:
if tag['name'] in existing_tag_names:
counterpart = [t for t in existing_tags if
t['name'] == tag['name']][0]
relational_map['tags'][counterpart['name']] = counterpart['id']
else:
tags_to_create.append(tag)
# Issue the create (POST) requests.
for tag in tags_to_create:
r = c.create('tags', tag)
try:
assert r.get('id')
resources_created.append(r['id'])
relational_map['tags'][tag['name']] = r['id']
except:
p(r)
sys.exit(u'%sFailed to create an OLD tag \u2018%s\u2019.'
u' Aborting.%s' % (ANSI_FAIL, tag['name'], ANSI_ENDC))
print 'Done.'
return (relational_map, resources_created)
def create_old_speakers(old_data, c, old_url, relational_map):
"""Create the speakers in `old_data` on the OLD that the client `c` is
connected to.
"""
resources_created = {
'created': [],
'updated': []
}
if old_data.get('speakers'):
flush('Creating OLD speakers...')
relational_map.setdefault('speakers', {})
speakers_to_create = []
speakers_to_update = []
speakers = old_data.get('speakers')
speaker_names = [(s['first_name'], s['last_name']) for s in speakers]
# Retrieve the existing speakers from the OLD. This may affect what
# speakers we create.
existing_speakers = c.get('speakers')
existing_speaker_names = [(s['first_name'], s['last_name']) for s in
existing_speakers]
duplicates = list(set(existing_speaker_names) & set(speaker_names))
ls_speaker_overwrites_old = False
if len(duplicates) > 0:
duplicates_string = u'", "'.join([u'%s %s' % (s[0], s[1]) for s in
duplicates])
response = raw_input(u'%sUpdate existing speakers? The OLD at %s'
u' already contains the speaker(s) "%s". Enter \'y\'/\'Y\' to'
u' update these OLD speakers with the data from LingSync. Enter'
u' \'n\'/\'N\' (or anything else) to use the existing OLD'
u' speakers without modification.%s' % (ANSI_WARNING, old_url,
duplicates_string, ANSI_ENDC))
if response in ['y', 'Y']:
ls_speaker_overwrites_old = True
# Populate our lists of speakers to create and update. If a speaker
# already exists, we may just use it instead of creating or even
# updating.
for speaker in speakers:
if (speaker['first_name'], speaker['last_name']) in \
existing_speaker_names:
counterpart_original = [s for s in existing_speakers if
s['first_name'] == speaker['first_name'] and
s['last_name'] == speaker['last_name']][0]
if ls_speaker_overwrites_old:
counterpart = copy.deepcopy(counterpart_original)
if speaker['dialect'] != counterpart['dialect']:
counterpart['dialect'] = speaker['dialect']
if speaker['page_content'] and \
speaker['page_content'] != counterpart['page_content']:
counterpart['page_content'] = speaker['page_content']
if counterpart_original != counterpart:
speakers_to_update.append(counterpart)
else:
key = u'%s %s' % (counterpart_original['first_name'],
counterpart_original['last_name'])
relational_map['speakers'][key] = counterpart_original['id']
else:
speakers_to_create.append(speaker)
# Issue the create (POST) and update (PUT) requests.
for speaker in speakers_to_create:
if (not speaker['first_name']) or (not speaker['last_name']):
continue
r = c.create('speakers', speaker)
key = u'%s %s' % (speaker['first_name'], speaker['last_name'])
try:
assert r.get('id')
relational_map['speakers'][key] = r['id']
resources_created['created'].append(r['id'])
except:
print r
sys.exit(u'%sFailed to create an OLD speaker \u2018%s\u2019.'
u' Aborting.%s' % (ANSI_FAIL, key, ANSI_ENDC))
for speaker in speakers_to_update:
resources_created['updated'].append(r['id'])
r = c.update('speakers/%s' % speaker['id'], speaker)
key = u'%s %s' % (speaker['first_name'], speaker['last_name'])
if r.get('error') == (u'The update request failed because the'
u' submitted data were not new.'):
relational_map['speakers'][key] = speaker['id']
else:
try:
assert r.get('id')
relational_map['speakers'][key] = speaker['id']
except:
sys.exit(u'%sFailed to update OLD speaker %s'
u' (\u2018%s\u2019). Aborting.%s' % (ANSI_FAIL,
speaker['id'], key, speaker['speakername'], ANSI_ENDC))
print 'Done.'
return (relational_map, resources_created)
def fix_user_name(name):
"""Make sure the user's username is a string/Unicode; if it's not,
try to make it into one. If we can't, we return `None`.
"""
if type(name) is dict:
return name.get('username')
elif not isinstance(name, basestring):
return None
else:
return name
def reconcile_users(users_list):
reconciled_user = {}
for user in users_list:
for attr, val in user.items():
reconciled_user.setdefault(attr, []).append(val)
for attr, val in reconciled_user.items():
reconciled_user[attr] = val[0]
# if len(set(val)) != 1:
# # TODO: fix this, if we find migrations where this is needed.
# print 'We must merge the following values for %s' % attr
# for x in val:
# print x
# print
return reconciled_user
def create_old_users(old_data, c, old_url, relational_map):
"""Create the users in `old_data` on the OLD that the client `c` is
connected to.
"""
users_created = {
'created': [],
'updated': [],
}
if old_data.get('users'):
flush('Creating OLD users...')
relational_map.setdefault('users', {})
users_to_create = []
users_to_update = []
users = old_data.get('users')
# LingSync users may have objects/dicts as values for their 'username'
# fields. Therefore we transform these to strings here.
new_users = []
for user in users:
user['username'] = fix_user_name(user['username'])
user['first_name'] = fix_user_name(user['first_name'])
user['last_name'] = fix_user_name(user['last_name'])
if (not user['username']) or (not user['first_name']) or (not user['last_name']):
print 'WARNING: unable to create user ...'
pprint.pprint(user)
continue
else:
new_users.append(user)
users = new_users
# Because of the above processing, we may end up with multiple users
# with the same username. Since the OLD doesn't allow this, we have to
# fix it here.
new_users = []
usersdict = {}
for user in users:
usersdict.setdefault(user['username'], []).append(user)
for username, users_list in usersdict.iteritems():
if len(users_list) == 1:
new_users.append(users_list[0])
else:
new_users.append(reconcile_users(users_list))
users = new_users
# Retrieve the existing users from the OLD. This may affect what users
# we create.
usernames = [u['username'] for u in users]
existing_users = c.get('users')
existing_usernames = filter(None, [u.get('username') for u in
existing_users])
duplicates = list(set(existing_usernames) & set(usernames))
ls_user_overwrites_old = False
if len(duplicates) > 0:
duplicates_string = u'", "'.join(duplicates)
response = raw_input(u'%sUpdate existing users? The OLD at %s'
u' already contains the user(s) "%s". Enter \'y\'/\'Y\' to'
u' update these OLD users with the data from LingSync. Enter'
u' \'n\'/\'N\' (or anything else) to use the existing OLD users'
u' without modification.%s' % (ANSI_WARNING, old_url,
duplicates_string, ANSI_ENDC))
if response in ['y', 'Y']:
ls_user_overwrites_old = True
# BEGIN GAP
# Populate our lists of users to create and update. If a user already
# exists, we may just use it instead of creating or even updating.
for user in users:
if user['username'] in existing_usernames:
counterpart_original = [u for u in existing_users if
u['username'] == user['username']][0]
if ls_user_overwrites_old:
counterpart = copy.deepcopy(counterpart_original)
# Don't change an existing user's password
# counterpart['password'] = user['password']
# counterpart['password_confirm'] = user['password']
if user['first_name'] != counterpart['username']:
counterpart['first_name'] = user['first_name']
if user['last_name'] != counterpart['username']:
counterpart['last_name'] = user['last_name']
if user['email'] != FAKE_EMAIL:
counterpart['email'] = user['email']
if user['affiliation']:
counterpart['affiliation'] = user['affiliation']
counterpart['role'] = user['role']
if user['page_content']:
counterpart['page_content'] = user['page_content']
counterpart['password'] = u''
counterpart['password_confirm'] = u''
users_to_update.append(counterpart)
else:
relational_map['users'][counterpart_original['username']] = \
counterpart_original['id']
else:
user['password'] = <PASSWORD>
user['password_confirm'] = <PASSWORD>
# If the LingSync username is OLD-invalid, we make it valid
# here.
p = re.compile('[^\w]+')
if p.search(user['username']):
print u'WARNING: username %s is OLD-invalid.' % user['username']
new_username = []
for char in user['username']:
if not p.search(char):
new_username.append(char)
new_username = u''.join(new_username)
if new_username:
user['__original_username'] = user['username']
user['username'] = new_username
print (u'%sWarning: we have changed the LingSync'
u' username %s to the OLD-valid username %s.%s' % (
ANSI_WARNING, user['__original_username'],
user['username'], ANSI_ENDC))
else:
sys.exit(u'%sError: unable to create a valid OLD'
u' username for LingSync user with username %s.%s' % (
ANSI_FAIL, user['username'], ANSI_ENDC))
users_to_create.append(user)
# print 'Users to create:'
# pprint.pprint([u['username'] for u in users_to_create])
# Issue the create (POST) and update (PUT) requests.
for user in users_to_create:
r = c.create('users', user)
try:
assert r.get('id')
if user.get('__original_username'):
key = user['__original_username']
else:
key = user['username']
relational_map['users'][key] = r['id']
users_created['created'].append(key)
except:
print 'failed to create this user'
pprint.pprint(user)
pprint.pprint(r)
sys.exit(u'%sFailed to create an OLD user with username'
u' \u2018%s\u2019. Aborting.%s' % (ANSI_FAIL,
user['username'], ANSI_ENDC))
# END GAP
for user in users_to_update:
r = c.update('users/%s' % user['id'], user)
users_created['updated'].append(user['username'])
if r.get('error') == (u'The update request failed because the'
u' submitted data were not new.'):
relational_map['users'][user['username']] = user['id']
else:
try:
assert r.get('id')
relational_map['users'][user['username']] = user['id']
except:
p(r)
sys.exit(u'%sFailed to update OLD user %s (\u2018%s\u2019).'
u' Aborting.%s' % (ANSI_FAIL, user['id'], user['username'],
ANSI_ENDC))
print 'Done.'
return (relational_map, users_created)
if __name__ == '__main__':
main()
|
# coding: utf-8
# # 欢迎来到线性回归项目
#
# 若项目中的题目有困难没完成也没关系,我们鼓励你带着问题提交项目,评审人会给予你诸多帮助。
#
# 所有选做题都可以不做,不影响项目通过。如果你做了,那么项目评审会帮你批改,也会因为选做部分做错而判定为不通过。
#
# 其中非代码题可以提交手写后扫描的 pdf 文件,或使用 Latex 在文档中直接回答。
# ### 目录:
# [1 矩阵运算](#1-矩阵运算)
# [2 Gaussian Jordan 消元法](#2-Gaussian-Jordan-消元法)
# [3 线性回归](#3-线性回归)
# In[40]:
# 任意选一个你喜欢的整数,这能帮你得到稳定的结果
seed = 99
# # 1 矩阵运算
#
# ## 1.1 创建一个 4*4 的单位矩阵
# In[41]:
# 这个项目设计来帮你熟悉 python list 和线性代数
# 你不能调用任何NumPy以及相关的科学计算库来完成作业
# 本项目要求矩阵统一使用二维列表表示,如下:
A = [[1,2,3],
[2,3,3],
[1,2,5]]
B = [[1,2,3,5],
[2,3,3,5],
[1,2,5,1]]
# 向量也用二维列表表示
C = [[1],
[2],
[3]]
#TODO 创建一个 4*4 单位矩阵
I = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
# ## 1.2 返回矩阵的行数和列数
# In[42]:
# TODO 返回矩阵的行数和列数
def shape(M):
return len(M), len(M[0])
M = [[1,2,3,5],
[2,3,3,5],
[1,2,5,1]]
print shape(M)
# In[43]:
# 运行以下代码测试你的 shape 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_shape')
# ## 1.3 每个元素四舍五入到特定小数数位
# In[44]:
# TODO 每个元素四舍五入到特定小数数位
# 直接修改参数矩阵,无返回值
def matxRound(M, decPts=4):
for i in range(0,len(M)):
for j in range(len(M[0])):
M[i][j] = round(M[i][j], decPts)
# In[45]:
# 运行以下代码测试你的 matxRound 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_matxRound')
# ## 1.4 计算矩阵的转置
# In[46]:
# TODO 计算矩阵的转置
def transpose(M):
return list(map(list, zip(*M)))
# In[47]:
# 运行以下代码测试你的 transpose 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_transpose')
# ## 1.5 计算矩阵乘法 AB
# In[48]:
# TODO 计算矩阵乘法 AB,如果无法相乘则raise ValueError
def matxMultiply(A, B):
try:
Ar = len(A)
Ac = len(A[0])
Br = len(B)
Bc = len(B[0])
p = len(B)
if Ac != Br:
raise ValueError("Matrix A\'s column number doesn\'t equal to Matrix b\'s row number")
p_tuple = range(0,p)
# init AB[r][c] as 0
AB = [[0] * Bc for row in range(Ar)]
for r in range(Ar):
for c in range(Bc):
for p in p_tuple:
AB[r][c] = AB[r][c] + A[r][p] * B[p][c]
return AB
except ValueError as e:
raise e
# In[49]:
# 运行以下代码测试你的 matxMultiply 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_matxMultiply')
# ---
#
# # 2 Gaussian Jordan 消元法
#
# ## 2.1 构造增广矩阵
#
# $ A = \begin{bmatrix}
# a_{11} & a_{12} & ... & a_{1n}\\
# a_{21} & a_{22} & ... & a_{2n}\\
# a_{31} & a_{22} & ... & a_{3n}\\
# ... & ... & ... & ...\\
# a_{n1} & a_{n2} & ... & a_{nn}\\
# \end{bmatrix} , b = \begin{bmatrix}
# b_{1} \\
# b_{2} \\
# b_{3} \\
# ... \\
# b_{n} \\
# \end{bmatrix}$
#
# 返回 $ Ab = \begin{bmatrix}
# a_{11} & a_{12} & ... & a_{1n} & b_{1}\\
# a_{21} & a_{22} & ... & a_{2n} & b_{2}\\
# a_{31} & a_{22} & ... & a_{3n} & b_{3}\\
# ... & ... & ... & ...& ...\\
# a_{n1} & a_{n2} & ... & a_{nn} & b_{n} \end{bmatrix}$
# In[50]:
# TODO 构造增广矩阵,假设A,b行数相同
def augmentMatrix(A, b):
Ab = [i + j for i,j in zip(A,b)]
return Ab
# In[51]:
# 运行以下代码测试你的 augmentMatrix 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_augmentMatrix')
# ## 2.2 初等行变换
# - 交换两行
# - 把某行乘以一个非零常数
# - 把某行加上另一行的若干倍:
# In[52]:
# TODO r1 <---> r2
# 直接修改参数矩阵,无返回值
def swapRows(M, r1, r2):
M[r1],M[r2] = M[r2],M[r1]
# In[53]:
# 运行以下代码测试你的 swapRows 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_swapRows')
# In[54]:
# TODO r1 <--- r1 * scale
# scale为0是非法输入,要求 raise ValueError
# 直接修改参数矩阵,无返回值
def scaleRow(M, r, scale):
Mc_range = range(len(M[0]))
try:
if scale == 0:
raise ValueError("Scale can't be 0")
for i in Mc_range:
M[r][i] *= scale
except Exception as e:
raise e
# In[55]:
# 运行以下代码测试你的 scaleRow 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_scaleRow')
# In[56]:
# TODO r1 <--- r1 + r2*scale
# 直接修改参数矩阵,无返回值
def addScaledRow(M, r1, r2, scale):
Mc_range = range(len(M[0]))
for i in Mc_range:
M[r1][i] += scale * M[r2][i]
# In[57]:
# 运行以下代码测试你的 addScaledRow 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_addScaledRow')
# ## 2.3 Gaussian Jordan 消元法求解 Ax = b
# ### 2.3.1 算法
#
# 步骤1 检查A,b是否行数相同
#
# 步骤2 构造增广矩阵Ab
#
# 步骤3 逐列转换Ab为化简行阶梯形矩阵 [中文维基链接](https://zh.wikipedia.org/wiki/%E9%98%B6%E6%A2%AF%E5%BD%A2%E7%9F%A9%E9%98%B5#.E5.8C.96.E7.AE.80.E5.90.8E.E7.9A.84-.7Bzh-hans:.E8.A1.8C.3B_zh-hant:.E5.88.97.3B.7D-.E9.98.B6.E6.A2.AF.E5.BD.A2.E7.9F.A9.E9.98.B5)
#
# 对于Ab的每一列(最后一列除外)
# 当前列为列c
# 寻找列c中 对角线以及对角线以下所有元素(行 c~N)的绝对值的最大值
# 如果绝对值最大值为0
# 那么A为奇异矩阵,返回None (你可以在选做问题2.4中证明为什么这里A一定是奇异矩阵)
# 否则
# 使用第一个行变换,将绝对值最大值所在行交换到对角线元素所在行(行c)
# 使用第二个行变换,将列c的对角线元素缩放为1
# 多次使用第三个行变换,将列c的其他元素消为0
#
# 步骤4 返回Ab的最后一列
#
# **注:** 我们并没有按照常规方法先把矩阵转化为行阶梯形矩阵,再转换为化简行阶梯形矩阵,而是一步到位。如果你熟悉常规方法的话,可以思考一下两者的等价性。
# ### 2.3.2 算法推演
#
# 为了充分了解Gaussian Jordan消元法的计算流程,请根据Gaussian Jordan消元法,分别手动推演矩阵A为***可逆矩阵***,矩阵A为***奇异矩阵***两种情况。
# #### 推演示例
#
#
# $Ab = \begin{bmatrix}
# -7 & 5 & -1 & 1\\
# 1 & -3 & -8 & 1\\
# -10 & -2 & 9 & 1\end{bmatrix}$
#
# $ --> $
# $\begin{bmatrix}
# 1 & \frac{1}{5} & -\frac{9}{10} & -\frac{1}{10}\\
# 0 & -\frac{16}{5} & -\frac{71}{10} & \frac{11}{10}\\
# 0 & \frac{32}{5} & -\frac{73}{10} & \frac{3}{10}\end{bmatrix}$
#
# $ --> $
# $\begin{bmatrix}
# 1 & 0 & -\frac{43}{64} & -\frac{7}{64}\\
# 0 & 1 & -\frac{73}{64} & \frac{3}{64}\\
# 0 & 0 & -\frac{43}{4} & \frac{5}{4}\end{bmatrix}$
#
# $ --> $
# $\begin{bmatrix}
# 1 & 0 & 0 & -\frac{3}{16}\\
# 0 & 1 & 0 & -\frac{59}{688}\\
# 0 & 0 & 1 & -\frac{5}{43}\end{bmatrix}$
#
#
# #### 推演有以下要求:
# 1. 展示每一列的消元结果, 比如3*3的矩阵, 需要写三步
# 2. 用分数来表示
# 3. 分数不能再约分
# 4. 我们已经给出了latex的语法,你只要把零改成你要的数字(或分数)即可
# 5. 可以用[这个页面](http://www.math.odu.edu/~bogacki/cgi-bin/lat.cgi?c=sys)检查你的答案(注意只是答案, 推演步骤两者算法不一致)
#
# _你可以用python的 [fractions](https://docs.python.org/2/library/fractions.html) 模块辅助你的约分_
# #### 分数的输入方法
# (双击这个区域就能看到语法啦)
#
# 示例一: $\frac{n}{m}$
#
# 示例二: $-\frac{a}{b}$
# #### 以下开始你的尝试吧!
# In[58]:
# 不要修改这里!
from helper import *
A = generateMatrix(3,seed,singular=False)
b = np.ones(shape=(3,1),dtype=int) # it doesn't matter
Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了
printInMatrixFormat(Ab,padding=3,truncating=0)
# 请按照算法的步骤3,逐步推演***可逆矩阵***的变换。
#
# 在下面列出每一次循环体执行之后的增广矩阵(注意使用[分数语法](#分数的输入方法))
#
# $ Ab = \begin{bmatrix}
# -9 & -7 & -2 & 1 \\
# -1 & -2 & 8 & 1 \\
# -6 & -5 & -9 & 1 \end{bmatrix}$
#
# $ --> \begin{bmatrix}
# 1 & \frac{7}{9} & \frac{2}{9} & -\frac{1}{9} \\
# 0 & -\frac{11}{9} & \frac{74}{9} & \frac{8}{9} \\
# 0 & -\frac{1}{3} & -\frac{23}{3} & \frac{1}{3} \end{bmatrix}$
#
# $ -->\begin{bmatrix}
# 1 & 0 & \frac{60}{11} & \frac{5}{11} \\
# 0 & 1 & -\frac{74}{11} & -\frac{8}{11} \\
# 0 & 0 & -\frac{109}{11} & \frac{1}{11} \end{bmatrix}$
#
# $ --> \begin{bmatrix}
# 1 & 0 & 0 & \frac{55}{109} \\
# 0 & 1 & 0 & -\frac{86}{109} \\
# 0 & 0 & 1 & -\frac{1}{109} \end{bmatrix}$
#
# $...$
# In[59]:
# 不要修改这里!
A = generateMatrix(3,seed,singular=True)
b = np.ones(shape=(3,1),dtype=int)
Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了
printInMatrixFormat(Ab,padding=3,truncating=0)
# 请按照算法的步骤3,逐步推演***奇异矩阵***的变换。
#
# 在下面列出每一次循环体执行之后的增广矩阵(注意使用[分数语法](#分数的输入方法))
#
# $ Ab = \begin{bmatrix}
# -9 & -9 & 0 & 1 \\
# 1 & -8 & -9 & 1 \\
# -7 & -10 & -3 & 1 \end{bmatrix}$
#
# $ --> \begin{bmatrix}
# 1 & 1 & 0 & -\frac{1}{9} \\
# 0 & -9 & -9 & \frac{10}{9} \\
# 0 & -3 & -3 & \frac{2}{9} \end{bmatrix}$
#
# $ --> \begin{bmatrix}
# 1 & 0 & -1 & 0 \\
# 0 & 1 & 1 & 0 \\
# 0 & 0 & 0 & 1 \end{bmatrix}$
#
# $...$
# ### 2.3.3 实现 Gaussian Jordan 消元法
# In[60]:
# TODO 实现 方法求解 Ax = b
""" Gaussian Jordan 方法求解 Ax = b.
参数
A: 方阵
b: 列向量
decPts: 四舍五入位数,默认为4
epsilon: 判读是否为0的阈值,默认 1.0e-16
返回列向量 x 使得 Ax = b
返回None,如果 A,b 高度不同
返回None,如果 A 为奇异矩阵
"""
def gj_Solve(A, b, decPts=4, epsilon=1.0e-16):
# 1.检查A,b是否行数相同
if len(A) != len(b):
return None
# 2 构造增广矩阵Ab
Ab = augmentMatrix(A, b)
# 3 逐列转换Ab为化简行阶梯形矩阵
Ab_row = len(Ab)
Ab_column = len(Ab[0])
for c in range(Ab_column - 1):
max_val,max_row_idx = 0,0
for i in range(Ab_row):
if i < c:
continue
abs_row = abs(Ab[i][c])
if abs_row > max_val:
max_val = abs_row
max_row_idx = i
# 如果绝对值最大值为0,那么A为奇异矩阵,返回None
if max_val < epsilon:
return None
swapRows(Ab, c, max_row_idx)
if Ab[c][c] != 0:
scaleRow(Ab, c, 1.0 / Ab[c][c])
# 多次使用第三个行变换,将列c的其他元素消为0
for i in range(Ab_row):
# 除去第c行,且除数不为0
if i != c:
# 修改参数矩阵
scale = - Ab[i][c]
if abs(scale) <= epsilon:
continue
addScaledRow(Ab, i, c, scale)
answer = []
for row in Ab:
answer.append([round(row[-1], decPts)])
return answer
# In[61]:
# 运行以下代码测试你的 gj_Solve 函数
get_ipython().magic(u'run -i -e test.py LinearRegressionTestCase.test_gj_Solve')
# ## (选做) 2.4 算法正确判断了奇异矩阵:
#
# 在算法的步骤3 中,如果发现某一列对角线和对角线以下所有元素都为0,那么则断定这个矩阵为奇异矩阵。
#
# 我们用正式的语言描述这个命题,并证明为真。
#
# 证明下面的命题:
#
# **如果方阵 A 可以被分为4个部分: **
#
# $ A = \begin{bmatrix}
# I & X \\
# Z & Y \\
# \end{bmatrix} , \text{其中 I 为单位矩阵,Z 为全0矩阵,Y 的第一列全0}$,
#
# **那么A为奇异矩阵。**
#
# 提示:从多种角度都可以完成证明
# - 考虑矩阵 Y 和 矩阵 A 的秩
# - 考虑矩阵 Y 和 矩阵 A 的行列式
# - 考虑矩阵 A 的某一列是其他列的线性组合
# TODO 证明:
# # 3 线性回归
# ## 3.1 随机生成样本点
# In[62]:
# 不要修改这里!
get_ipython().magic(u'matplotlib notebook')
from helper import *
X,Y = generatePoints2D(seed)
vs_scatter_2d(X, Y)
# ## 3.2 拟合一条直线
#
# ### 3.2.1 猜测一条直线
# In[63]:
#TODO 请选择最适合的直线 y = mx + b
m1 = 2.3950
b1 = 13.6646
# 不要修改这里!
vs_scatter_2d(X, Y, m1, b1)
# ### 3.2.2 计算平均平方误差 (MSE)
# 我们要编程计算所选直线的平均平方误差(MSE), 即数据集中每个点到直线的Y方向距离的平方的平均数,表达式如下:
# $$
# MSE = \frac{1}{n}\sum_{i=1}^{n}{(y_i - mx_i - b)^2}
# $$
# In[64]:
# TODO 实现以下函数并输出所选直线的MSE
def calculateMSE2D(X,Y,m,b):
n = len(X)
for i in range(n):
s_sum = 0
s_sum += (Y[i] - m * X[i] - b) ** 2
MSE = 1.0 / n * s_sum
return MSE
# TODO 检查这里的结果, 如果你上面猜测的直线准确, 这里的输出会在1.5以内
print(calculateMSE2D(X,Y,m1,b1))
# ### 3.2.3 调整参数 $m, b$ 来获得最小的平方平均误差
#
# 你可以调整3.2.1中的参数 $m1,b1$ 让蓝点均匀覆盖在红线周围,然后微调 $m1, b1$ 让MSE最小。
# ## 3.3 (选做) 找到参数 $m, b$ 使得平方平均误差最小
#
# **这一部分需要简单的微积分知识( $ (x^2)' = 2x $ )。因为这是一个线性代数项目,所以设为选做。**
#
# 刚刚我们手动调节参数,尝试找到最小的平方平均误差。下面我们要精确得求解 $m, b$ 使得平方平均误差最小。
#
# 定义目标函数 $E$ 为
# $$
# E = \frac{1}{2}\sum_{i=1}^{n}{(y_i - mx_i - b)^2}
# $$
#
# 因为 $E = \frac{n}{2}MSE$, 所以 $E$ 取到最小值时,$MSE$ 也取到最小值。要找到 $E$ 的最小值,即要找到 $m, b$ 使得 $E$ 相对于 $m$, $E$ 相对于 $b$ 的偏导数等于0.
#
# 因此我们要解下面的方程组。
#
# $$
# \begin{cases}
# \displaystyle
# \frac{\partial E}{\partial m} =0 \\
# \\
# \displaystyle
# \frac{\partial E}{\partial b} =0 \\
# \end{cases}
# $$
#
# ### 3.3.1 计算目标函数相对于参数的导数
# 首先我们计算两个式子左边的值
#
# 证明/计算:
# $$
# \frac{\partial E}{\partial m} = \sum_{i=1}^{n}{-x_i(y_i - mx_i - b)}
# $$
#
# $$
# \frac{\partial E}{\partial b} = \sum_{i=1}^{n}{-(y_i - mx_i - b)}
# $$
# TODO 证明:
# ### 3.3.2 实例推演
#
# 现在我们有了一个二元二次方程组
#
# $$
# \begin{cases}
# \displaystyle
# \sum_{i=1}^{n}{-x_i(y_i - mx_i - b)} =0 \\
# \\
# \displaystyle
# \sum_{i=1}^{n}{-(y_i - mx_i - b)} =0 \\
# \end{cases}
# $$
#
# 为了加强理解,我们用一个实际例子演练。
#
# 我们要用三个点 $(1,1), (2,2), (3,2)$ 来拟合一条直线 y = m*x + b, 请写出
#
# - 目标函数 $E$,
# - 二元二次方程组,
# - 并求解最优参数 $m, b$
# TODO 写出目标函数,方程组和最优参数
# ### 3.3.3 将方程组写成矩阵形式
#
# 我们的二元二次方程组可以用更简洁的矩阵形式表达,将方程组写成矩阵形式更有利于我们使用 Gaussian Jordan 消元法求解。
#
# 请证明
# $$
# \begin{bmatrix}
# \frac{\partial E}{\partial m} \\
# \frac{\partial E}{\partial b}
# \end{bmatrix} = X^TXh - X^TY
# $$
#
# 其中向量 $Y$, 矩阵 $X$ 和 向量 $h$ 分别为 :
# $$
# Y = \begin{bmatrix}
# y_1 \\
# y_2 \\
# ... \\
# y_n
# \end{bmatrix}
# ,
# X = \begin{bmatrix}
# x_1 & 1 \\
# x_2 & 1\\
# ... & ...\\
# x_n & 1 \\
# \end{bmatrix},
# h = \begin{bmatrix}
# m \\
# b \\
# \end{bmatrix}
# $$
# TODO 证明:
# 至此我们知道,通过求解方程 $X^TXh = X^TY$ 来找到最优参数。这个方程十分重要,他有一个名字叫做 **Normal Equation**,也有直观的几何意义。你可以在 [子空间投影](http://open.163.com/movie/2010/11/J/U/M6V0BQC4M_M6V2AJLJU.html) 和 [投影矩阵与最小二乘](http://open.163.com/movie/2010/11/P/U/M6V0BQC4M_M6V2AOJPU.html) 看到更多关于这个方程的内容。
# ### 3.4 求解 $X^TXh = X^TY$
#
# 在3.3 中,我们知道线性回归问题等价于求解 $X^TXh = X^TY$ (如果你选择不做3.3,就勇敢的相信吧,哈哈)
# In[65]:
# TODO 实现线性回归
'''
参数:X, Y 存储着一一对应的横坐标与纵坐标的两个一维数组
返回:线性回归的系数(如上面所说的 m, b)
'''
def linearRegression2D(X,Y):
# 一维数组X转矩阵,一维数组Y转矩阵
matxX = [[x, 1] for x in X]
matxY = [[y] for y in Y]
# 获得 A 和 b
A = matxMultiply(transpose(matxX),matxX)
b = matxMultiply(transpose(matxX),matxY)
# 高斯消元求解
h = gj_Solve(A, b, decPts=4, epsilon=1.0e-16)
m, b = 0.0, 0.0
if not h:
return m, b
h_len = len(h)
if h_len > 0:
m = h[0][0]
if h_len > 1:
b = h[1][0]
return m, b
m2,b2 = linearRegression2D(X,Y)
assert isinstance(m2,float),"m is not a float"
assert isinstance(b2,float),"b is not a float"
print(m2,b2)
# # 请不要修改下面的代码
# m2,b2 = linearRegression2D(X,Y)
# assert isinstance(m2,float),"m is not a float"
# assert isinstance(b2,float),"b is not a float"
# print(m2,b2)
# 你求得的回归结果是什么?
# 请使用运行以下代码将它画出来。
# In[66]:
## 请不要修改下面的代码
vs_scatter_2d(X, Y, m2, b2)
print(calculateMSE2D(X,Y,m2,b2))
# ## Bonus !!!
# 如果你的高斯约当消元法通过了单元测试, 那么它将能够解决多维的回归问题
# 你将会在更高维度考验你的线性回归实现
# In[67]:
# 生成三维的数据点
X_3d, Y_3d = generatePoints3D(seed)
vs_scatter_3d(X_3d, Y_3d)
# 你的线性回归是否能够对付三维的情况?
# In[68]:
def linearRegression(X,Y):
return None
# In[69]:
coeff = linearRegression(X_3d, Y_3d)
vs_scatter_3d(X_3d, Y_3d, coeff)
|
# Published Jan 2013
# Author : <NAME>, <EMAIL>
# Multiple contributors : see https://github.com/philippelt/netatmo-api-python
# License : GPL V3
"""
This API provides access to the Netatmo weather station or/and the Welcome camera
This package can be used with Python2 or Python3 applications and do not
require anything else than standard libraries
PythonAPI Netatmo REST data access
coding=utf-8
"""
import warnings
if __name__ == "__main__": warnings.filterwarnings("ignore") # For installation test only
from sys import version_info
from os import getenv
from os.path import expanduser, exists
import platform
import json, time
import imghdr
import warnings
import logging
# Just in case method could change
PYTHON3 = (version_info.major > 2)
# HTTP libraries depends upon Python 2 or 3
if PYTHON3 :
import urllib.parse, urllib.request
else:
from urllib import urlencode
import urllib2
######################## AUTHENTICATION INFORMATION ######################
# To be able to have a program accessing your netatmo data, you have to register your program as
# a Netatmo app in your Netatmo account. All you have to do is to give it a name (whatever) and you will be
# returned a client_id and secret that your app has to supply to access netatmo servers.
# To ease Docker packaging of your application, you can setup your authentication parameters through env variables
# Authentication use :
# 1 - Values hard coded in the library
# 2 - The .netatmo.credentials file in JSON format in your home directory
# 3 - Values defined in environment variables : CLIENT_ID, CLIENT_SECRET, USERNAME, PASSWORD
# Note: The USERNAME environment variable may interfer with the envvar used by Windows for login name
# if you have this issue, do not forget to "unset USERNAME" before running your program
# Each level override values defined in the previous level. You could define CLIENT_ID and CLIENT_SECRET hard coded in the library
# and username/password in .netatmo.credentials or environment variables
# 1 : Embedded credentials
cred = { # You can hard code authentication information in the following lines
"CLIENT_ID" : "", # Your client ID from Netatmo app registration at http://dev.netatmo.com/dev/listapps
"CLIENT_SECRET" : "", # Your client app secret ' '
"USERNAME" : "", # Your netatmo account username
"PASSWORD" : "" # Your netatmo account password
}
# Other authentication setup management (optionals)
CREDENTIALS = expanduser("~/.netatmo.credentials")
def getParameter(key, default):
return getenv(key, default[key])
# 2 : Override hard coded values with credentials file if any
if exists(CREDENTIALS) :
with open(CREDENTIALS, "r") as f:
cred.update({k.upper():v for k,v in json.loads(f.read()).items()})
# 3 : Override final value with content of env variables if defined
# Warning, for Windows user, USERNAME contains by default the windows logged user name
# This usually lead to an authentication error
if platform.system() == "Windows" and getenv("USERNAME", None):
warnings.warn("You are running on Windows and the USERNAME env var is set. " \
"Be sure this env var contains Your Netatmo username " \
"or clear it with <SET USERNAME=> before running your program\n", RuntimeWarning, stacklevel=3)
_CLIENT_ID = getParameter("CLIENT_ID", cred)
_CLIENT_SECRET = getParameter("CLIENT_SECRET", cred)
_USERNAME = getParameter("USERNAME", cred)
_PASSWORD = getParameter("PASSWORD", cred)
#########################################################################
# Common definitions
_BASE_URL = "https://api.netatmo.com/"
_AUTH_REQ = _BASE_URL + "oauth2/token"
_GETMEASURE_REQ = _BASE_URL + "api/getmeasure"
_GETSTATIONDATA_REQ = _BASE_URL + "api/getstationsdata"
_GETTHERMOSTATDATA_REQ = _BASE_URL + "api/getthermostatsdata"
_GETHOMEDATA_REQ = _BASE_URL + "api/gethomedata"
_GETCAMERAPICTURE_REQ = _BASE_URL + "api/getcamerapicture"
_GETEVENTSUNTIL_REQ = _BASE_URL + "api/geteventsuntil"
#TODO# Undocumented (but would be very usefull) API : Access currently forbidden (403)
_POST_UPDATE_HOME_REQ = _BASE_URL + "/api/updatehome"
# For presence setting (POST BODY):
# _PRES_BODY_REC_SET = "home_id=%s&presence_settings[presence_record_%s]=%s" # (HomeId, DetectionKind, DetectionSetup.index)
_PRES_DETECTION_KIND = ("humans", "animals", "vehicles", "movements")
_PRES_DETECTION_SETUP = ("ignore", "record", "record & notify")
# _PRES_BODY_ALERT_TIME = "home_id=%s&presence_settings[presence_notify_%s]=%s" # (HomeID, "from"|"to", "hh:mm")
# Regular (documented) commands (both cameras)
_PRES_CDE_GET_SNAP = "/live/snapshot_720.jpg"
#TODO# Undocumented (taken from https://github.com/KiboOst/php-NetatmoCameraAPI/blob/master/class/NetatmoCameraAPI.php)
# Work with local_url only (undocumented scope control probably)
# For Presence camera
_PRES_CDE_GET_LIGHT = "/command/floodlight_get_config"
# Not working yet, probably due to scope restriction
#_PRES_CDE_SET_LIGHT = "/command/floodlight_set_config?config=mode:%s" # "auto"|"on"|"off"
# For all cameras
_CAM_CHANGE_STATUS = "/command/changestatus?status=%s" # "on"|"off"
# Not working yet
#_CAM_FTP_ACTIVE = "/command/ftp_set_config?config=on_off:%s" # "on"|"off"
# UNITS used by Netatmo services
UNITS = {
"unit" : {
0: "metric",
1: "imperial"
},
"windunit" : {
0: "kph",
1: "mph",
2: "ms",
3: "beaufort",
4: "knot"
},
"pressureunit" : {
0: "mbar",
1: "inHg",
2: "mmHg"
}
}
# Logger context
logger = logging.getLogger("lnetatmo")
class NoDevice( Exception ):
pass
class NoHome( Exception ):
pass
class AuthFailure( Exception ):
pass
class ClientAuth:
"""
Request authentication and keep access token available through token method. Renew it automatically if necessary
Args:
clientId (str): Application clientId delivered by Netatmo on dev.netatmo.com
clientSecret (str): Application Secret key delivered by Netatmo on dev.netatmo.com
username (str)
password (str)
scope (Optional[str]): Default value is 'read_station'
read_station: to retrieve weather station data (Getstationsdata, Getmeasure)
read_camera: to retrieve Welcome data (Gethomedata, Getcamerapicture)
access_camera: to access the camera, the videos and the live stream.
Several value can be used at the same time, ie: 'read_station read_camera'
"""
def __init__(self, clientId=_CLIENT_ID,
clientSecret=_CLIENT_SECRET,
username=_USERNAME,
password=_PASSWORD,
scope="read_station read_camera access_camera write_camera " \
"read_presence access_presence write_presence read_thermostat write_thermostat"):
postParams = {
"grant_type" : "password",
"client_id" : clientId,
"client_secret" : clientSecret,
"username" : username,
"password" : password,
"scope" : scope
}
resp = postRequest(_AUTH_REQ, postParams)
if not resp: raise AuthFailure("Authentication request rejected")
self._clientId = clientId
self._clientSecret = clientSecret
self._accessToken = resp['access_token']
self.refreshToken = resp['refresh_token']
self._scope = resp['scope']
self.expiration = int(resp['expire_in'] + time.time())
@property
def accessToken(self):
if self.expiration < time.time(): # Token should be renewed
postParams = {
"grant_type" : "refresh_token",
"refresh_token" : self.refreshToken,
"client_id" : self._clientId,
"client_secret" : self._clientSecret
}
resp = postRequest(_AUTH_REQ, postParams)
self._accessToken = resp['access_token']
self.refreshToken = resp['refresh_token']
self.expiration = int(resp['expire_in'] + time.time())
return self._accessToken
class User:
"""
This class returns basic information about the user
Args:
authData (ClientAuth): Authentication information with a working access Token
"""
warnings.warn("The 'User' class is no longer maintained by Netatmo",
DeprecationWarning )
def __init__(self, authData):
postParams = {
"access_token" : authData.accessToken
}
resp = postRequest(_GETSTATIONDATA_REQ, postParams)
self.rawData = resp['body']
self.devList = self.rawData['devices']
self.ownerMail = self.rawData['user']['mail']
class UserInfo:
"""
This class is populated with data from various Netatmo requests to provide
complimentary data (eg Units for Weatherdata)
"""
pass
class ThermostatData:
"""
List the Thermostat and temperature modules
Args:
authData (clientAuth): Authentication information with a working access Token
home : Home name or id of the home who's thermostat belongs to
"""
def __init__(self, authData, home=None):
# I don't own a thermostat thus I am not able to test the Thermostat support
warnings.warn("The Thermostat code is not tested due to the lack of test environment.\n" \
"As Netatmo is continuously breaking API compatibility, risk that current bindings are wrong is high.\n" \
"Please report found issues (https://github.com/philippelt/netatmo-api-python/issues)",
RuntimeWarning )
self.getAuthToken = authData.accessToken
postParams = {
"access_token" : self.getAuthToken
}
resp = postRequest(_GETTHERMOSTATDATA_REQ, postParams)
self.rawData = resp['body']['devices']
if not self.rawData : raise NoDevice("No thermostat available")
self.thermostatData = filter_home_data(self.rawData, home)
if not self.thermostatData : raise NoHome("No home %s found" % home)
self.thermostatData['name'] = self.thermostatData['home_name']
for m in self.thermostatData['modules']:
m['name'] = m['module_name']
self.defaultThermostat = self.thermostatData['home_name']
self.defaultThermostatId = self.thermostatData['_id']
self.defaultModule = self.thermostatData['modules'][0]
def getThermostat(self, name=None):
if ['name'] != name: return None
else: return
return self.thermostat[self.defaultThermostatId]
def moduleNamesList(self, name=None, tid=None):
thermostat = self.getThermostat(name=name, tid=tid)
return [m['name'] for m in thermostat['modules']] if thermostat else None
def getModuleByName(self, name, thermostatId=None):
thermostat = self.getThermostat(tid=thermostatId)
for m in thermostat['modules']:
if m['name'] == name: return m
return None
class WeatherStationData:
"""
List the Weather Station devices (stations and modules)
Args:
authData (ClientAuth): Authentication information with a working access Token
"""
def __init__(self, authData, home=None, station=None):
self.getAuthToken = authData.accessToken
postParams = {
"access_token" : self.getAuthToken
}
resp = postRequest(_GETSTATIONDATA_REQ, postParams)
self.rawData = resp['body']['devices']
# Weather data
if not self.rawData : raise NoDevice("No weather station in any homes")
# Stations are no longer in the Netatmo API, keeping them for compatibility
self.stations = { d['station_name'] : d for d in self.rawData }
self.homes = { d['home_name'] : d["station_name"] for d in self.rawData }
# Keeping the old behavior for default station name
if home and home not in self.homes : raise NoHome("No home with name %s" % home)
self.default_home = home or list(self.homes.keys())[0]
if station and station not in self.stations: raise NoDevice("No station with name %s" % station)
self.default_station = station or [v["station_name"] for k,v in self.stations.items() if v["home_name"] == self.default_home][0]
self.modules = dict()
self.default_station_data = self.stationByName(self.default_station)
if 'modules' in self.default_station_data:
for m in self.default_station_data['modules']:
self.modules[ m['_id'] ] = m
# User data
userData = resp['body']['user']
self.user = UserInfo()
setattr(self.user, "mail", userData['mail'])
for k,v in userData['administrative'].items():
if k in UNITS:
setattr(self.user, k, UNITS[k][v])
else:
setattr(self.user, k, v)
def modulesNamesList(self, station=None, home=None):
res = [m['module_name'] for m in self.modules.values()]
res.append(self.stationByName(station)['module_name'])
return res
def stationByName(self, station=None):
if not station : station = self.default_station
for i,s in self.stations.items():
if s['station_name'] == station :
return self.stations[i]
return None
def stationById(self, sid):
return self.stations.get(sid)
def moduleByName(self, module):
for m in self.modules:
mod = self.modules[m]
if mod['module_name'] == module :
return mod
return None
def moduleById(self, mid):
return self.modules.get(mid)
def lastData(self, exclude=0):
s = self.default_station_data
# Breaking change from Netatmo : dashboard_data no longer available if station lost
if not s or 'dashboard_data' not in s : return None
lastD = dict()
# Define oldest acceptable sensor measure event
limit = (time.time() - exclude) if exclude else 0
ds = s['dashboard_data']
if ds.get('time_utc',limit+10) > limit :
lastD[s['module_name']] = ds.copy()
lastD[s['module_name']]['When'] = lastD[s['module_name']].pop("time_utc") if 'time_utc' in lastD[s['module_name']] else time.time()
lastD[s['module_name']]['wifi_status'] = s['wifi_status']
if 'modules' in s:
for module in s["modules"]:
# Skip lost modules that no longer have dashboard data available
if 'dashboard_data' not in module : continue
ds = module['dashboard_data']
if ds.get('time_utc',limit+10) > limit :
# If no module_name has been setup, use _id by default
if "module_name" not in module : module['module_name'] = module["_id"]
lastD[module['module_name']] = ds.copy()
lastD[module['module_name']]['When'] = lastD[module['module_name']].pop("time_utc") if 'time_utc' in lastD[module['module_name']] else time.time()
# For potential use, add battery and radio coverage information to module data if present
for i in ('battery_vp', 'battery_percent', 'rf_status') :
if i in module : lastD[module['module_name']][i] = module[i]
return lastD
def checkNotUpdated(self, delay=3600):
res = self.lastData()
ret = []
for mn,v in res.items():
if time.time()-v['When'] > delay : ret.append(mn)
return ret if ret else None
def checkUpdated(self, delay=3600):
res = self.lastData()
ret = []
for mn,v in res.items():
if time.time()-v['When'] < delay : ret.append(mn)
return ret if ret else None
def getMeasure(self, device_id, scale, mtype, module_id=None, date_begin=None, date_end=None, limit=None, optimize=False, real_time=False):
postParams = { "access_token" : self.getAuthToken }
postParams['device_id'] = device_id
if module_id : postParams['module_id'] = module_id
postParams['scale'] = scale
postParams['type'] = mtype
if date_begin : postParams['date_begin'] = date_begin
if date_end : postParams['date_end'] = date_end
if limit : postParams['limit'] = limit
postParams['optimize'] = "true" if optimize else "false"
postParams['real_time'] = "true" if real_time else "false"
return postRequest(_GETMEASURE_REQ, postParams)
def MinMaxTH(self, module=None, frame="last24"):
s = self.default_station_data
if frame == "last24":
end = time.time()
start = end - 24*3600 # 24 hours ago
elif frame == "day":
start, end = todayStamps()
if module and module != s['module_name']:
m = self.moduleById(module) or self.moduleByName(module)
if not m : raise NoDevice("Can't find module %s" % module)
# retrieve module's data
resp = self.getMeasure(
device_id = s['_id'],
module_id = m['_id'],
scale = "max",
mtype = "Temperature,Humidity",
date_begin = start,
date_end = end)
else : # retrieve station's data
resp = self.getMeasure(
device_id = s['_id'],
scale = "max",
mtype = "Temperature,Humidity",
date_begin = start,
date_end = end)
if resp:
T = [v[0] for v in resp['body'].values()]
H = [v[1] for v in resp['body'].values()]
return min(T), max(T), min(H), max(H)
else:
return None
class DeviceList(WeatherStationData):
"""
This class is now deprecated. Use WeatherStationData directly instead
"""
warnings.warn("The 'DeviceList' class was renamed 'WeatherStationData'",
DeprecationWarning )
pass
class HomeData:
"""
List the Netatmo home informations (Homes, cameras, events, persons)
Args:
authData (ClientAuth): Authentication information with a working access Token
"""
def __init__(self, authData, home=None):
self.getAuthToken = authData.accessToken
postParams = {
"access_token" : self.getAuthToken
}
resp = postRequest(_GETHOMEDATA_REQ, postParams)
self.rawData = resp['body']
# Collect homes
self.homes = { d['id'] : d for d in self.rawData['homes'] }
if not self.homes : raise NoDevice("No home available")
self.default_home = home or list(self.homes.values())[0]['name']
# Split homes data by category
self.persons = dict()
self.events = dict()
self.cameras = dict()
self.lastEvent = dict()
for i in range(len(self.rawData['homes'])):
curHome = self.rawData['homes'][i]
nameHome = curHome['name']
if nameHome not in self.cameras:
self.cameras[nameHome] = dict()
if 'persons' in curHome:
for p in curHome['persons']:
self.persons[ p['id'] ] = p
if 'events' in curHome:
for e in curHome['events']:
if e['camera_id'] not in self.events:
self.events[ e['camera_id'] ] = dict()
self.events[ e['camera_id'] ][ e['time'] ] = e
if 'cameras' in curHome:
for c in curHome['cameras']:
self.cameras[nameHome][ c['id'] ] = c
c["home_id"] = curHome['id']
for camera in self.events:
self.lastEvent[camera] = self.events[camera][sorted(self.events[camera])[-1]]
if not self.cameras[self.default_home] : raise NoDevice("No camera available in default home")
self.default_camera = list(self.cameras[self.default_home].values())[0]
def homeById(self, hid):
return None if hid not in self.homes else self.homes[hid]
def homeByName(self, home=None):
if not home: home = self.default_home
for key,value in self.homes.items():
if value['name'] == home:
return self.homes[key]
def cameraById(self, cid):
for home,cam in self.cameras.items():
if cid in self.cameras[home]:
return self.cameras[home][cid]
return None
def cameraByName(self, camera=None, home=None):
if not camera and not home:
return self.default_camera
elif home and camera:
if home not in self.cameras:
return None
for cam_id in self.cameras[home]:
if self.cameras[home][cam_id]['name'] == camera:
return self.cameras[home][cam_id]
elif not home and camera:
for home, cam_ids in self.cameras.items():
for cam_id in cam_ids:
if self.cameras[home][cam_id]['name'] == camera:
return self.cameras[home][cam_id]
else:
return list(self.cameras[home].values())[0]
return None
def cameraUrls(self, camera=None, home=None, cid=None):
"""
Return the vpn_url and the local_url (if available) of a given camera
in order to access to its live feed
Can't use the is_local property which is mostly false in case of operator
dynamic IP change after presence start sequence
"""
local_url = None
vpn_url = None
if cid:
camera_data=self.cameraById(cid)
else:
camera_data=self.cameraByName(camera=camera, home=home)
if camera_data:
vpn_url = camera_data['vpn_url']
resp = postRequest(vpn_url + '/command/ping')
temp_local_url=resp['local_url']
try:
resp = postRequest(temp_local_url + '/command/ping',timeout=1)
if resp and temp_local_url == resp['local_url']:
local_url = temp_local_url
except: # On this particular request, vithout errors from previous requests, error is timeout
local_url = None
return vpn_url, local_url
def url(self, camera=None, home=None, cid=None):
vpn_url, local_url = self.cameraUrls(camera, home, cid)
# Return local if available else vpn
return local_url or vpn_url
def personsAtHome(self, home=None):
"""
Return the list of known persons who are currently at home
"""
if not home: home = self.default_home
home_data = self.homeByName(home)
atHome = []
for p in home_data['persons']:
#Only check known persons
if 'pseudo' in p:
if not p["out_of_sight"]:
atHome.append(p['pseudo'])
return atHome
def getCameraPicture(self, image_id, key):
"""
Download a specific image (of an event or user face) from the camera
"""
postParams = {
"access_token" : self.getAuthToken,
"image_id" : image_id,
"key" : key
}
resp = postRequest(_GETCAMERAPICTURE_REQ, postParams)
image_type = imghdr.what('NONE.FILE',resp)
return resp, image_type
def getProfileImage(self, name):
"""
Retrieve the face of a given person
"""
for p in self.persons:
if 'pseudo' in self.persons[p]:
if name == self.persons[p]['pseudo']:
image_id = self.persons[p]['face']['id']
key = self.persons[p]['face']['key']
return self.getCameraPicture(image_id, key)
return None, None
def updateEvent(self, event=None, home=None):
"""
Update the list of event with the latest ones
"""
if not home: home=self.default_home
if not event:
#If not event is provided we need to retrieve the oldest of the last event seen by each camera
listEvent = dict()
for cam_id in self.lastEvent:
listEvent[self.lastEvent[cam_id]['time']] = self.lastEvent[cam_id]
event = listEvent[sorted(listEvent)[0]]
home_data = self.homeByName(home)
postParams = {
"access_token" : <PASSWORD>.getAuthToken,
"home_id" : home_data['id'],
"event_id" : event['id']
}
resp = postRequest(_GETEVENTSUNTIL_REQ, postParams)
eventList = resp['body']['events_list']
for e in eventList:
self.events[ e['camera_id'] ][ e['time'] ] = e
for camera in self.events:
self.lastEvent[camera]=self.events[camera][sorted(self.events[camera])[-1]]
def personSeenByCamera(self, name, home=None, camera=None):
"""
Return True if a specific person has been seen by a camera
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
#Check in the last event is someone known has been seen
if self.lastEvent[cam_id]['type'] == 'person':
person_id = self.lastEvent[cam_id]['person_id']
if 'pseudo' in self.persons[person_id]:
if self.persons[person_id]['pseudo'] == name:
return True
return False
def _knownPersons(self):
known_persons = dict()
for p_id,p in self.persons.items():
if 'pseudo' in p:
known_persons[ p_id ] = p
return known_persons
def someoneKnownSeen(self, home=None, camera=None):
"""
Return True if someone known has been seen
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
#Check in the last event is someone known has been seen
if self.lastEvent[cam_id]['type'] == 'person':
if self.lastEvent[cam_id]['person_id'] in self._knownPersons():
return True
return False
def someoneUnknownSeen(self, home=None, camera=None):
"""
Return True if someone unknown has been seen
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
#Check in the last event is someone known has been seen
if self.lastEvent[cam_id]['type'] == 'person':
if self.lastEvent[cam_id]['person_id'] not in self._knownPersons():
return True
return False
def motionDetected(self, home=None, camera=None):
"""
Return True if movement has been detected
"""
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
if self.lastEvent[cam_id]['type'] == 'movement':
return True
return False
def presenceUrl(self, camera=None, home=None, cid=None, setting=None):
camera = self.cameraByName(home=home, camera=camera) or self.cameraById(cid=cid)
if camera["type"] != "NOC": return None # Not a presence camera
vpnUrl, localUrl = self.cameraUrls(cid=camera["id"])
return localUrl
def presenceLight(self, camera=None, home=None, cid=None, setting=None):
url = self.presenceUrl(home=home, camera=camera) or self.cameraById(cid=cid)
if not url or setting not in ("on", "off", "auto"): return None
if setting : return "Currently unsupported"
return cameraCommand(url, _PRES_CDE_GET_LIGHT)["mode"]
# Not yet supported
#if not setting: return cameraCommand(url, _PRES_CDE_GET_LIGHT)["mode"]
#else: return cameraCommand(url, _PRES_CDE_SET_LIGHT, setting)
def presenceStatus(self, mode, camera=None, home=None, cid=None):
url = self.presenceUrl(home=home, camera=camera) or self.cameraById(cid=cid)
if not url or mode not in ("on", "off") : return None
r = cameraCommand(url, _CAM_CHANGE_STATUS, mode)
return mode if r["status"] == "ok" else None
def presenceSetAction(self, camera=None, home=None, cid=None,
eventType=_PRES_DETECTION_KIND[0], action=2):
return "Currently unsupported"
if eventType not in _PRES_DETECTION_KIND or \
action not in _PRES_DETECTION_SETUP : return None
camera = self.cameraByName(home=home, camera=camera) or self.cameraById(cid=cid)
postParams = { "access_token" : self.getAuthToken,
"home_id" : camera["home_id"],
"presence_settings[presence_record_%s]" % eventType : _PRES_DETECTION_SETUP.index(action)
}
resp = postRequest(_POST_UPDATE_HOME_REQ, postParams)
self.rawData = resp['body']
def getLiveSnapshot(self, camera=None, home=None, cid=None):
camera = self.cameraByName(home=home, camera=camera) or self.cameraById(cid=cid)
vpnUrl, localUrl = self.cameraUrls(cid=camera["id"])
url = localUrl or vpnUrl
return cameraCommand(url, _PRES_CDE_GET_SNAP)
class WelcomeData(HomeData):
"""
This class is now deprecated. Use HomeData instead
Home can handle many devices, not only Welcome cameras
"""
warnings.warn("The 'WelcomeData' class was renamed 'HomeData' to handle new Netatmo Home capabilities",
DeprecationWarning )
pass
# Utilities routines
def filter_home_data(rawData, home):
if home:
# Find a home who's home id or name is the one requested
for h in rawData:
if h["home_name"] == home or h["home_id"] == home:
return h
return None
# By default, the first home is returned
return rawData[0]
def cameraCommand(cameraUrl, commande, parameters=None, timeout=3):
url = cameraUrl + ( commande % parameters if parameters else commande)
return postRequest(url, timeout=timeout)
def postRequest(url, params=None, timeout=10):
if PYTHON3:
req = urllib.request.Request(url)
if params:
req.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
params = urllib.parse.urlencode(params).encode('utf-8')
try:
resp = urllib.request.urlopen(req, params, timeout=timeout) if params else urllib.request.urlopen(req, timeout=timeout)
except urllib.error.HTTPError as err:
logger.error("code=%s, reason=%s" % (err.code, err.reason))
return None
else:
if params:
params = urlencode(params)
headers = {"Content-Type" : "application/x-www-form-urlencoded;charset=utf-8"}
req = urllib2.Request(url=url, data=params, headers=headers) if params else urllib2.Request(url)
try:
resp = urllib2.urlopen(req, timeout=timeout)
except urllib2.HTTPError as err:
logger.error("code=%s, reason=%s" % (err.code, err.reason))
return None
data = b""
for buff in iter(lambda: resp.read(65535), b''): data += buff
# Return values in bytes if not json data to handle properly camera images
returnedContentType = resp.getheader("Content-Type") if PYTHON3 else resp.info()["Content-Type"]
return json.loads(data.decode("utf-8")) if "application/json" in returnedContentType else data
def toTimeString(value):
return time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime(int(value)))
def toEpoch(value):
return int(time.mktime(time.strptime(value,"%Y-%m-%d_%H:%M:%S")))
def todayStamps():
today = time.strftime("%Y-%m-%d")
today = int(time.mktime(time.strptime(today,"%Y-%m-%d")))
return today, today+3600*24
# Global shortcut
def getStationMinMaxTH(station=None, module=None, home=None):
authorization = ClientAuth()
devList = WeatherStationData(authorization, station=station, home=home)
if module == "*":
pass
elif module:
module = devList.moduleById(module) or devList.moduleByName(module)
if not module: raise NoDevice("No such module %s" % module)
else: module = module["module_name"]
else:
module = list(devList.modules.values())[0]["module_name"]
lastD = devList.lastData()
if module == "*":
result = dict()
for m in lastD.keys():
if time.time()-lastD[m]['When'] > 3600 : continue
r = devList.MinMaxTH(module=m)
result[m] = (r[0], lastD[m]['Temperature'], r[1])
else:
if time.time()-lastD[module]['When'] > 3600 : result = ["-", "-"]
else :
result = [lastD[module]['Temperature'], lastD[module]['Humidity']]
result.extend(devList.MinMaxTH(module))
return result
# auto-test when executed directly
if __name__ == "__main__":
from sys import exit, stdout, stderr
logging.basicConfig(format='%(name)s - %(levelname)s: %(message)s', level=logging.INFO)
if not _CLIENT_ID or not _CLIENT_SECRET or not _USERNAME or not _PASSWORD :
stderr.write("Library source missing identification arguments to check lnetatmo.py (user/password/etc...)")
exit(1)
authorization = ClientAuth() # Test authentication method
try:
weatherStation = WeatherStationData(authorization) # Test DEVICELIST
except NoDevice:
logger.warning("No weather station available for testing")
else:
weatherStation.MinMaxTH() # Test GETMEASUR
try:
homes = HomeData(authorization)
except NoDevice :
logger.warning("No home available for testing")
try:
thermostat = ThermostatData(authorization)
except NoDevice:
logger.warning("No thermostat avaible for testing")
# If we reach this line, all is OK
logger.info("OK")
exit(0)
|
import datetime
import typing
import unittest.mock
from flask import Flask
from pytest import fixture, mark
from pytest_localserver.http import WSGIServer
from requests.exceptions import ConnectionError, Timeout
from requests_mock import Mocker, mock
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.orm.session import Session
from typeguard import typechecked
from nekoyume.block import Block
from nekoyume.broadcast import (
broadcast_block,
broadcast_move,
broadcast_node,
multicast,
)
from nekoyume.move import Move
from nekoyume.node import Node
from nekoyume.user import User
@fixture
def fx_other_server(request, fx_other_app: Flask) -> WSGIServer:
server = WSGIServer(application=fx_other_app.wsgi_app)
server.start()
request.addfinalizer(server.stop)
return server
@typechecked
def test_broadcast_node(
fx_server: WSGIServer,
fx_session: scoped_session,
fx_other_server: WSGIServer,
fx_other_session: Session,
):
now = datetime.datetime.utcnow()
node = Node(url=fx_server.url,
last_connected_at=now)
node2 = Node(url=fx_other_server.url,
last_connected_at=datetime.datetime.utcnow())
fx_session.add(node)
fx_session.commit()
fx_other_session.add(node2)
fx_other_session.commit()
assert not fx_session.query(Node).filter(Node.url == node2.url).first()
multicast(serialized={'url': fx_other_server.url},
broadcast=broadcast_node)
assert fx_session.query(Node).filter(Node.url == node2.url).first()
assert node.last_connected_at > now
@typechecked
def test_broadcast_node_same_url(fx_session: scoped_session):
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
fx_session.add(node)
fx_session.commit()
with Mocker() as m:
multicast(serialized={'url': url}, sent_node=node,
broadcast=broadcast_node)
assert not m.called
assert node.last_connected_at == now
@typechecked
def test_broadcast_my_node(fx_session: scoped_session):
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
fx_session.add(node)
fx_session.commit()
with Mocker() as m:
m.post('http://test.neko/nodes', json={'result': 'success'})
multicast({'url': url}, my_node=node, broadcast=broadcast_node)
assert node.last_connected_at > now
# check request.json value
assert m.request_history[0].json() == {
'url': 'http://test.neko',
'sent_node': 'http://test.neko'
}
@mark.parametrize('error', [ConnectionError, Timeout])
def broadcast_node_failed(fx_session: scoped_session,
fx_other_session: Session, error):
now = datetime.datetime.utcnow()
node = Node(url='http://test.neko',
last_connected_at=now)
node2 = Node(url='http://other.neko',
last_connected_at=datetime.datetime.utcnow())
fx_session.add(node)
fx_session.commit()
fx_other_session.add(node2)
fx_other_session.commit()
assert not fx_session.query(Node).filter(Node.url == node2.url).first()
with Mocker() as m:
m.post('http://test.neko', exc=error)
multicast(serialized={'url': fx_other_server.url},
broadcast=broadcast_node)
assert not fx_session.query(Node).filter(Node.url == node2.url).first()
assert node.last_connected_at == now
@typechecked
def test_broadcast_block(
fx_server: WSGIServer,
fx_session: scoped_session,
fx_other_session: Session,
fx_other_server: WSGIServer,
fx_user: User
):
now = datetime.datetime.utcnow()
node = Node(url=fx_server.url,
last_connected_at=now)
node2 = Node(url=fx_other_server.url,
last_connected_at=datetime.datetime.utcnow())
block = Block.create(fx_user, [])
fx_session.add_all([node, node2, block])
fx_session.flush()
assert fx_session.query(Block).get(block.id)
assert not fx_other_session.query(Block).get(block.id)
multicast(
serialized=block.serialize(
use_bencode=False,
include_suffix=True,
include_moves=True,
include_hash=True
),
broadcast=broadcast_block,
)
assert node.last_connected_at > now
assert fx_session.query(Block).count() == 1
assert fx_other_session.query(Block).get(block.id)
@typechecked
def test_broadcast_block_my_node(fx_session: scoped_session, fx_user: User):
block = Block.create(fx_user, [])
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
fx_session.add(node)
fx_session.flush()
with Mocker() as m:
m.post('http://test.neko/blocks', text='success')
expected = serialized = block.serialize(
use_bencode=False,
include_suffix=True,
include_moves=True,
include_hash=True
)
multicast(serialized=serialized, my_node=node,
broadcast=broadcast_block)
expected['sent_node'] = url
assert node.last_connected_at > now
assert node.last_connected_at > now
# check request.json value
assert m.request_history[0].json() == expected
@typechecked
def test_broadcast_block_same_node(fx_session: scoped_session, fx_user: User):
block = Block.create(fx_user, [])
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
fx_session.add(node)
fx_session.flush()
multicast(
serialized=block.serialize(
use_bencode=False,
include_suffix=True,
include_moves=True,
include_hash=True
),
sent_node=node,
broadcast=broadcast_block,
)
assert node.last_connected_at == now
@mark.parametrize('error', [ConnectionError, Timeout])
def test_broadcast_block_raise_exception(
fx_session: scoped_session, fx_user: User,
error: typing.Union[ConnectionError, Timeout]
):
block = Block.create(fx_user, [])
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
fx_session.add(node)
fx_session.flush()
with Mocker() as m:
m.post('http://test.neko/blocks', exc=error)
multicast(
serialized=block.serialize(
use_bencode=False,
include_suffix=True,
include_moves=True,
include_hash=True
),
broadcast=broadcast_block,
)
assert node.last_connected_at == now
@mark.parametrize('limit, blocks, expected', [
(1, 2, 3),
(2, 5, 6),
])
def test_broadcast_block_retry(
fx_session: scoped_session,
fx_user: User, limit: int, blocks: int, expected: int
):
for i in range(blocks):
block = Block.create(fx_user, [])
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
fx_session.add(node)
fx_session.flush()
patch = unittest.mock.patch('nekoyume.broadcast.BROADCAST_LIMIT', limit)
with mock() as m, patch:
m.register_uri('POST', 'http://test.neko/blocks', [
{
'json': {
'result': 'failed',
'block_id': 0,
'mesage': "new block isn't our next block."
},
'status_code': 403
},
{
'json': {
'result': 'success',
},
'status_code': 200
}
])
multicast(
serialized=block.serialize(
use_bencode=False,
include_suffix=True,
include_moves=True,
include_hash=True
),
broadcast=broadcast_block,
)
assert m.call_count == expected
assert node.last_connected_at > now
@typechecked
def test_broadcast_move(
fx_server: WSGIServer,
fx_session: scoped_session,
fx_other_server: WSGIServer,
fx_other_session: Session,
fx_user: User,
fx_novice_status: typing.Mapping[str, str],
):
now = datetime.datetime.utcnow()
node = Node(url=fx_server.url,
last_connected_at=now)
node2 = Node(url=fx_other_server.url,
last_connected_at=datetime.datetime.utcnow())
move = fx_user.create_novice(fx_novice_status)
fx_session.add_all([node, node2, move])
fx_session.commit()
assert not fx_other_session.query(Move).get(move.id)
serialized = move.serialize(
use_bencode=False,
include_signature=True,
include_id=True,
)
multicast(serialized=serialized, broadcast=broadcast_move)
assert fx_other_session.query(Move).get(move.id)
assert node.last_connected_at > now
@typechecked
def test_broadcast_move_same_url(fx_session: scoped_session,
fx_user: User,
fx_novice_status: typing.Mapping[str, str]):
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
move = fx_user.create_novice(fx_novice_status)
fx_session.add_all([node, move])
fx_session.commit()
with Mocker() as m:
serialized = move.serialize(
use_bencode=False,
include_signature=True,
include_id=True,
)
multicast(serialized=serialized, sent_node=node,
broadcast=broadcast_move)
assert not m.called
assert node.last_connected_at == now
@typechecked
def test_broadcast_move_my_node(fx_session: scoped_session,
fx_user: User,
fx_novice_status: typing.Mapping[str, str]):
url = 'http://test.neko'
now = datetime.datetime.utcnow()
node = Node(url=url, last_connected_at=now)
move = fx_user.create_novice(fx_novice_status)
fx_session.add_all([node, move])
fx_session.commit()
with Mocker() as m:
m.post('http://test.neko/moves', json={'result': 'success'})
expected = serialized = move.serialize(
use_bencode=False,
include_signature=True,
include_id=True,
)
multicast(serialized=serialized, my_node=node,
broadcast=broadcast_move)
expected['sent_node'] = 'http://test.neko'
assert node.last_connected_at > now
# check request.json value
assert m.request_history[0].json() == expected
@mark.parametrize('error', [ConnectionError, Timeout])
def broadcast_move_failed(fx_session: scoped_session,
fx_user: User,
fx_novice_status: typing.Mapping[str, str],
error):
now = datetime.datetime.utcnow()
move = fx_user.create_novice(fx_novice_status)
node = Node(url='http://test.neko',
last_connected_at=now)
fx_session.add_all([node, move])
fx_session.commit()
with Mocker() as m:
serialized = move.serialize(
use_bencode=False,
include_signature=True,
include_id=True,
)
m.post('http://test.neko', exc=error)
multicast(serialized=serialized, broadcast=broadcast_move)
assert node.last_connected_at == now
|
import sys, os
import unittest
os.environ["TF_NUM_INTEROP_THREADS"] = "8"
os.environ["TF_NUM_INTRAOP_THREADS"] = "8"
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "8"
import tempfile
import shutil
import tensorflow as tf
import antspymm
import antspyt1w
import antspynet
import ants
import numpy as np
# FIXME - need to return FD and other motion parameters from dewarp function
# then incorporate those parameters in this example
testingClass = unittest.TestCase( )
islocal = False
id1 = "LS2001_3T_rfMRI_REST1_LR_gdc"
id2 = "LS2001_3T_rfMRI_REST1_RL_gdc"
img1 = ants.image_read( antspymm.get_data( id1, target_extension=".nii.gz") )
img2 = ants.image_read( antspymm.get_data( id2, target_extension=".nii.gz") )
# FIXME: - test that these are the same values
# NOTE: could run SR at this point - will take a long time - example here:
# mdlfn = antspymm.get_data( "brainSR", target_extension=".h5")
# mdl = tf.keras.models.load_model( mdlfn )
# srimg = antspymm.super_res_mcimage( img, mdl, verbose=False )
if 'dwp' not in globals():
dwp = antspymm.dewarp_imageset( [img1,img2], iterations=3, padding=0,
target_idx = [10,11,12],
syn_sampling = 20, syn_metric='mattes',
type_of_transform = 'SyN',
total_sigma = 0.0, random_seed=1,
reg_iterations = [200,50,20] )
if islocal:
print('rsfmri dewarp done')
ants.image_write( dwp['dewarped'][0], './rsfmridewarped0.nii.gz' )
ants.image_write( dwp['dewarped'][1], './rsfmridewarped1.nii.gz' )
# now process fMRI as usual --- do we concatenate the two dewarped images?
# for now, just processing dwp0
import pandas as pd
und = dwp['dewarpedmean']
bmask = antspynet.brain_extraction( und, 'bold' ).threshold_image( 0.3, 1.0 )
powers_areal_mni_itk = pd.read_csv(antspymm.get_data('powers_mni_itk', target_extension=".csv")) # power coordinates
t1fn = antspymm.get_data( 'LS2001_3T_T1w_MPR1_gdc' , target_extension='.nii.gz' )
t1 = ants.image_read( t1fn ).n3_bias_field_correction( 8 ).n3_bias_field_correction( 4 )
t1bxt = antspynet.brain_extraction( t1, 't1' ).threshold_image( 0.3, 1.0 )
t1seg = antspynet.deep_atropos( t1 )
t1reg = ants.registration( und * bmask, t1 * t1bxt, "SyN" ) # in practice use something different
# ants.plot( t1*t1bxt, t1reg['warpedfixout'] , axis=2, overlay_alpha=0.25, ncol=8, nslices=24 )
# ants.plot( und, t1reg['warpedmovout'], overlay_alpha = 0.25, axis=2, nslices=24, ncol=6 )
boldseg = ants.apply_transforms( und, t1seg['segmentation_image'],
t1reg['fwdtransforms'], interpolator = 'nearestNeighbor' )
# ants.plot( und, boldseg, overlay_alpha = 0.25, axis=2, nslices=24, ncol=6 )
csfAndWM = ( ants.threshold_image( boldseg, 1, 1 ) +
ants.threshold_image( boldseg, 3, 3 ) ).morphology("erode",1)
dwpind = 1
mycompcor = ants.compcor( dwp['dewarped'][dwpind],
ncompcor=6, quantile=0.80, mask = csfAndWM,
filter_type='polynomial', degree=4 )
nt = dwp['dewarped'][dwpind].shape[3]
import matplotlib.pyplot as plt
plt.plot( range( nt ), mycompcor['components'][:,0] )
# plt.show()
plt.plot( range( nt ), mycompcor['components'][:,1] )
# plt.show()
myvoxes = range(powers_areal_mni_itk.shape[0])
anat = powers_areal_mni_itk['Anatomy']
syst = powers_areal_mni_itk['SystemName']
Brod = powers_areal_mni_itk['Brodmann']
xAAL = powers_areal_mni_itk['AAL']
ch2 = ants.image_read( ants.get_ants_data( "ch2" ) )
if 'treg' not in globals():
treg = ants.registration( t1 * t1bxt, ch2, 'SyN' )
concatx2 = treg['invtransforms'] + t1reg['invtransforms']
pts2bold = ants.apply_transforms_to_points( 3, powers_areal_mni_itk, concatx2,whichtoinvert = ( True, False, True, False ) )
locations = pts2bold.iloc[:,:3].values
ptImg = ants.make_points_image( locations, bmask, radius = 2 )
# ants.plot( und, ptImg, axis=2, nslices=24, ncol=8 )
tr = ants.get_spacing( dwp['dewarped'][dwpind] )[3]
highMotionTimes = np.where( dwp['FD'][dwpind] >= 0.5 )
print( "highMotionTimes: " + str(highMotionTimes) )
goodtimes = np.where( dwp['FD'][dwpind] < 0.5 )
gmseg = ants.threshold_image( boldseg, 2, 2 )
spa, spt = 1.5, 0.0 # spatial, temporal - which we ignore b/c of frequency filtering
smth = ( spa, spa, spa, spt ) # this is for sigmaInPhysicalCoordinates = F
simg = ants.smooth_image(dwp['dewarped'][dwpind], smth, sigma_in_physical_coordinates = False )
nuisance = mycompcor['components']
nuisance = np.c_[ nuisance, mycompcor['basis'] ]
nuisance = np.c_[ nuisance, dwp['FD'][dwpind] ]
gmmat = ants.timeseries_to_matrix( simg, gmseg )
gmmat = ants.bandpass_filter_matrix( gmmat, tr = tr, lowf=0.03, highf=0.08 ) # some would argue against this
gmmat = ants.regress_components( gmmat, nuisance )
postCing = powers_areal_mni_itk['AAL'].unique()[9]
networks = powers_areal_mni_itk['SystemName'].unique()
ww = np.where( powers_areal_mni_itk['SystemName'] == networks[5] )[0]
dfnImg = ants.make_points_image(pts2bold.iloc[ww,:3].values, bmask, radius=1).threshold_image( 1, 400 )
# ants.plot( und, dfnImg, axis=2, nslices=24, ncol=8 )
dfnmat = ants.timeseries_to_matrix( simg, ants.threshold_image( dfnImg * gmseg, 1, dfnImg.max() ) )
dfnmat = ants.bandpass_filter_matrix( dfnmat, tr = tr, lowf=0.01, highf=0.09 )
dfnmat = ants.regress_components( dfnmat, nuisance )
dfnsignal = dfnmat.mean( axis = 1 )
from scipy.stats.stats import pearsonr
gmmatDFNCorr = np.zeros( gmmat.shape[1] )
for k in range( gmmat.shape[1] ):
gmmatDFNCorr[ k ] = pearsonr( dfnsignal, gmmat[:,k] )[0]
corrImg = ants.make_image( gmseg, gmmatDFNCorr )
corrImgPos = corrImg * ants.threshold_image( corrImg, 0.25, 1 )
ants.plot( und, corrImgPos, axis=2, overlay_alpha = 0.6, cbar=False, nslices = 24, ncol=8, cbar_length=0.3, cbar_vertical=True )
|
import torch
import numpy as np
from torch import nn
class Fusion2(nn.Module):
"""
lr = 0.01 tanh 4.80%
lr = 0.005 tanh 4.64%
"""
def __init__(self):
super(Fusion2, self).__init__()
self.elayer = nn.Sequential(nn.Linear(11, 30),
nn.BatchNorm1d(30),
nn.Tanh( ),
nn.Linear(30, 40),
nn.BatchNorm1d(40),
nn.Tanh(),
nn.Linear(40, 20),
nn.BatchNorm1d(20),
nn.Tanh())
self.flayer1 = nn.Sequential(nn.Linear(29, 30),
nn.BatchNorm1d(30),
nn.Tanh( ))
self.flayer2 = nn.Sequential(nn.Linear(30, 30),
nn.BatchNorm1d(30),
nn.Tanh())
self.flayer3 = nn.Linear(30, 10)
def forward(self, ele, poss):
x1 = self.elayer(ele) #15->35
z1 = torch.cat([x1,poss],1) #->2*10
z2 = self.flayer1(z1) #20*20
z3 = self.flayer2(z2) #->20*10
z4 = self.flayer3(z3) # ->20*10
return z4
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.t())
area2 = box_area(box2.t())
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
# Create Precision-Recall curve and compute AP for each class
pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_gt = (target_cls == c).sum() # Number of ground truth objects
n_p = i.sum() # Number of predicted objects
if n_p == 0 or n_gt == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_gt + 1e-16) # recall curve
r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
# Plot
# fig, ax = plt.subplots(1, 1, figsize=(5, 5))
# ax.plot(recall, precision)
# ax.set_xlabel('Recall')
# ax.set_ylabel('Precision')
# ax.set_xlim(0, 1.01)
# ax.set_ylim(0, 1.01)
# fig.tight_layout()
# fig.savefig('PR_curve.png', dpi=300)
# Compute F1 score (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
return p, r, ap, f1, unique_classes.astype('int32')
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
mpre = np.concatenate(([0.], precision, [0.]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap
def spost_processing(conf_thresh, nms_thresh, output):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# [batch, num, 1, 4]
box_array = output[0]
box_array = torch.clamp(box_array,0,1)
# [batch, num, num_classes]
confs = output[1]
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
confs_sim = confs[i, argwhere]
bboxes = []
confs = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3],
confs_sim[keep[k]], ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
return bboxes_batch
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
|
<filename>MULTITASK_FILES/RETINANET_FILES/src/pytorch-retinanet/retinanet/dataloader.py
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import csv
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
from future.utils import raise_from
from pycocotools.coco import COCO
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from . import augmentation_pipelines
class CocoDataset(Dataset):
"""Coco dataset."""
def __init__(self, root_dir, set_name='train2017', transform=None):
"""
Args:
root_dir (string): COCO directory.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.set_name = set_name
self.transform = transform
self.coco = COCO(os.path.join(self.root_dir, 'annotations', 'instances_' + self.set_name + '.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.root_dir, 'images', self.set_name, image_info['file_name'])
img = skimage.io.imread(path)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32) / 255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, :4] = a['bbox']
annotation[0, 4] = self.coco_label_to_label(a['category_id'])
annotations = np.append(annotations, annotation, axis=0)
# transform from [x, y, w, h] to [x1, y1, x2, y2]
annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
return annotations
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def label_to_coco_label(self, label):
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def num_classes(self):
return 80
class CSVDataset(Dataset):
"""CSV dataset."""
def __init__(self, train_file, class_list, transform=None, augment=0, pipeline=None, blacken=False, keep_name=True):
"""
Args:
train_file (string): CSV file with training annotations
annotations (string): CSV file with class list
test_file (string, optional): CSV file with testing annotations
"""
self.augment = augment
self.pipeline = pipeline
self.train_file = train_file
self.class_list = class_list
self.transform = transform
self.blacken = blacken
self.keep_name = keep_name
# parse the provided class file
try:
with self._open_for_csv(self.class_list) as file:
self.classes = self.load_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, class_name
try:
with self._open_for_csv(self.train_file) as file:
self.image_data = self._read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)), None)
self.image_names = list(self.image_data.keys())
def _parse(self, value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _open_for_csv(self, path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_classes(self, csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def __len__(self):
return len(self.image_names)
def get(self, idx):
annot = self.load_annotations(idx)
img = self.load_image(idx, annot)
return img
def getfilenameindex(self, filename):
if filename in self.image_names:
index = self.image_names.index(filename)
return index
#sample = dataset_train[index]
#return sample
def __getitem__(self, idx):
annot = self.load_annotations(idx)
img = self.load_image(idx, annot)
filename = self.image_names[idx]
# <<<<<<< Updated upstream
# if self.augment:
# if random.uniform(0, 1) < 0.25:
# bbs = convert_bounding_boxes(img.shape, annot)
# img, bbs = seq(image=img, bounding_boxes=bbs)
# annot = revert_bounding_boxes(bbs)
# =======
img = (img * 255.0).astype(np.uint8) # Added
aug = 0
if random.uniform(0, 1) < self.augment:
aug = 1
#bbs = convert_bounding_boxes(img.shape, annot)
#img, bbs = seq(image=img, bounding_boxes=bbs)
#annot = revert_bounding_boxes(bbs)
pipeline = getattr(augmentation_pipelines, self.pipeline)
bbs = convert_bounding_boxes(img.shape, annot)
img, bbs = pipeline(image=img, bounding_boxes=bbs)
annot = revert_bounding_boxes(bbs)
img = img.astype(np.float32) / 255.0 # Added
# >>>>>>> Stashed changes
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
if self.keep_name:
sample['filename'] = filename
sample['augmented'] = aug
sample['index'] = idx
return sample
def load_image(self, image_index, annot=None):
img = skimage.io.imread(self.image_names[image_index])
if annot is not None and self.blacken:
img = blacken(img, annot)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32) / 255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotation_list = self.image_data[self.image_names[image_index]]
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
if (x2 - x1) < 1 or (y2 - y1) < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotations = np.append(annotations, annotation, axis=0)
return annotations
def _read_annotations(self, csv_reader, classes):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, x1, y1, x2, y2, class_name = row[:6]
except ValueError:
raise_from(ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)),
None)
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = self._parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = self._parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = self._parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = self._parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def num_classes(self):
return max(self.classes.values()) + 1
def image_aspect_ratio(self, image_index):
image = Image.open(self.image_names[image_index])
return float(image.width) / float(image.height)
class ContextDataset(CSVDataset):
def __init__(self, train_file, class_list, memory=None, transform=None, augment=False, blacken=False, keep_name=False, aug_rate=0.3):
super().__init__(train_file, class_list, transform=transform, augment=augment, blacken=blacken, keep_name=keep_name)
self.context = [] # (img, annotation) tuples
self.learned_context = [] # (img, annotation) tuples
self.init_frame = []
self.augment_rate = aug_rate
if memory is None:
self.memory = len(self.image_names)
else:
self.memory = memory
def __len__(self):
return len(self.image_names) + len(self.learned_context) + len(self.context) + len(self.init_frame)
def __getitem__(self, idx):
if idx < len(self.image_names):
return super().__getitem__(idx)
idx -= len(self.image_names)
if idx < len(self.learned_context):
img, annot = self.learned_context[idx]
else:
idx -= len(self.learned_context)
if len(self.context) > 0 and idx < len(self.context):
img, annot = self.context[idx]
else:
img, annot = self.init_frame[0]
img = img.copy().astype(np.float32) / 255.0
annot = annot.copy()
if self.augment:
if random.uniform(0, 1) < self.augment_rate:
bbs = convert_bounding_boxes(img.shape, annot)
img, bbs = seq(image=img, bounding_boxes=bbs)
annot = revert_bounding_boxes(bbs)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def context_len(self):
return len(self.context)
def add_context(self, new_context, remember_rate=0.1):
valid_context = []
for img, bbs in new_context:
if img is not None and bbs is not None:
valid_context.append((img, bbs))
# We don't want to remember every frame - could get overwhelming
if random.uniform(0, 1) < remember_rate:
self.learned_context += self.context
self.context = valid_context
# Make sure our learned context doesn't overwhelm our original dataset
if len(self.learned_context) > self.memory:
self.learned_context = random.sample(self.learned_context, self.memory // 2)
def set_init_frame(self, first_frame):
self.learned_context += self.init_frame
self.init_frame = first_frame
class InferenceDataset(Dataset):
def __init__(self, images, transform=None):
"""
A dataset used for inference that can be quickly loaded from memory, and compatable with retinanet
images: List of images
"""
self.images = images
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = self.images[idx].copy().astype(np.float32) / 255.0
sample = {'img': image, 'annot': np.zeros((0,5))}
if self.transform:
sample = self.transform(sample)
return sample
def collater(data):
imgs = [s['img'] for s in data]
annots = [s['annot'] for s in data]
scales = [s['scale'] for s in data]
pad_ws = [s['pad_w'] for s in data]
pad_hs = [s['pad_h'] for s in data]
filenames = [s['filename'] for s in data if 'filename' in s]
augflags = [s['augmented'] for s in data] # Added
idxs = [s['index'] for s in data] # Added
widths = [int(s.shape[0]) for s in imgs]
heights = [int(s.shape[1]) for s in imgs]
batch_size = len(imgs)
max_width = np.array(widths).max()
max_height = np.array(heights).max()
padded_imgs = torch.zeros(batch_size, max_width, max_height, 3)
for i in range(batch_size):
img = imgs[i]
padded_imgs[i, :int(img.shape[0]), :int(img.shape[1]), :] = img
max_num_annots = max(annot.shape[0] for annot in annots)
if max_num_annots > 0:
annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1
if max_num_annots > 0:
for idx, annot in enumerate(annots):
# print(annot.shape)
if annot.shape[0] > 0:
annot_padded[idx, :annot.shape[0], :] = annot
else:
annot_padded = torch.ones((len(annots), 1, 5)) * -1
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
result = {'img': padded_imgs, 'annot': annot_padded, 'scale': scales, 'pad_w': pad_ws, 'pad_h': pad_hs}
if len(filenames) > 0:
result['filename'] = filenames
result['augmented'] = augflags # Added
result['index'] = idxs # Added
return result
class Resizer(object):
"""Convert ndarrays in sample to Tensors."""
#def __call__(self, sample, min_side=608-4*32, max_side=1024-4*32):
def __call__(self, sample, min_side=416-3*32, max_side=1056):
image, annots = sample['img'], sample['annot']
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows * scale)), int(round((cols * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
annots[:, :4] *= scale
result = {'img': torch.from_numpy(new_image), 'annot': torch.from_numpy(annots), 'scale': scale, 'pad_w': pad_w, 'pad_h': pad_h}
if 'filename' in sample:
result['filename'] = sample['filename']
return result
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5, gauss=0.3):
if np.random.rand() < flip_x:
image, annots = sample['img'], sample['annot']
image = image[:, ::-1, :]
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
sample['img'] = image
sample['annot'] = annots
return sample
class Normalizer(object):
def __init__(self):
self.mean = np.array([[[0.485, 0.456, 0.406]]])
self.std = np.array([[[0.229, 0.224, 0.225]]])
def __call__(self, sample):
image, annots = sample['img'], sample['annot']
result = {'img': ((image.astype(np.float32) - self.mean) / self.std), 'annot': annots}
if 'filename' in sample:
result['filename'] = sample['filename']
return result
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
tens = tensor.clone()
for t, m, s in zip(tens, self.mean, self.std):
t.mul_(s).add_(m)
return tens
class ContextSampler(Sampler):
def __init__(self, data_source, batch_size=1, crystalize=False):
self.data_source = data_source
self.batch_size = batch_size
self.crystalize = crystalize
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
return len(self.data_source) - len(self.data_source.context) - len(self.data_source.init_frame)
def group_images(self):
if self.crystalize: # We no longer infer new bounding boxes, rather train exclusively on our learned memory
order = list(range(len(self.data_source.learned_context)))
random.shuffle(order)
return [[order[x % len(order)] + len(self.data_source.image_names) for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
c_len = self.data_source.context_len()
if self.data_source.context_len() == 0:
return [[i] for i in range(len(self))]
groups = []
for i in range(len(self)):
group = [i % len(self.data_source.image_names), len(self) + i % c_len, len(self) + (i + 1) % c_len] # Include an image from the original dataset
if len(self.data_source.init_frame) > 0 and random.uniform(0,1) < 0.15: # 15% we include the seed frame
group.append(len(self) + c_len)
if len(self.data_source.learned_context) > 0:
group.append(len(self.data_source.image_names) + i % len(self.data_source.learned_context)) # Include a memory frame
groups.append(group)
return groups
# return [[i, len(self) + i % c_len] + [i + c_len] if len(self.data_source.init_frame) > 0 and random.uniform(0,1) < 0.1 else [] for i in range(len(self))]
class AspectRatioBasedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size
def group_images(self):
# determine the order of the images
order = list(range(len(self.data_source)))
order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))
# divide into groups, one group = one batch
return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
# New
class BalancedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last, ignore_negatives=False):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.ignore_negatives = ignore_negatives
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
return len(self.groups)
def group_images(self):
groups_by_label = {}
for label in self.data_source.labels:
groups_by_label[label] = []
groups_by_label[-1] = []
indices = list(range(len(self.data_source)))
random.shuffle(indices)
for index in indices:
annotations = self.data_source.load_annotations(index)
if annotations.shape[0] == 0:
groups_by_label[-1].append(index)
continue
best_label = -1
group_size = float('inf')
for label in groups_by_label:
if label in annotations[:, 4] and len(groups_by_label[label]) < group_size:
best_label = label
group_size = len(groups_by_label[label])
groups_by_label[best_label].append(index)
for key in groups_by_label:
groups_by_label[key].sort(key=lambda x: self.data_source.image_aspect_ratio(x))
if self.ignore_negatives:
del groups_by_label[-1]
# We have built up a dict that maps labels to images that contain that label
largest = max([len(group) for group in groups_by_label.values()])
groups = []
for i in range(largest):
groups.append([group[int(float(i) / float(largest) * len(group))] for group in groups_by_label.values() if len(group) > 0])
self.batch_size = len(groups[0])
return groups
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.6),
iaa.Crop(percent=(0, 0.2)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(
0.5,
iaa.GaussianBlur(sigma=(0, 0.25)),
),
iaa.Sometimes(
0.5,
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5),
),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
iaa.LinearContrast((0.75, 1.5)),
iaa.Multiply((0.8, 1.3), per_channel=0.5),
iaa.Affine(
scale={"x": (0.8, 1.25), "y": (0.8, 1.25)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-15, 15),
order=[0,1],
shear=(-5,5),
),
iaa.PiecewiseAffine(scale=(0.01, 0.05)),
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),])),
], random_order=True) # apply augmenters in random order
def convert_bounding_boxes(img_shape, annotations):
boxes = []
for row in annotations:
box = BoundingBox(x1=row[0], y1=row[1], x2=row[2], y2=row[3], label=row[4])
boxes.append(box)
return BoundingBoxesOnImage(boxes, shape=img_shape)
def revert_bounding_boxes(bbs):
annotations = np.zeros((0, 5))
for bounding_box in bbs:
annotation = np.zeros((1, 5))
annotation[0, 0] = bounding_box.x1
annotation[0, 1] = bounding_box.y1
annotation[0, 2] = bounding_box.x2
annotation[0, 3] = bounding_box.y2
annotation[0, 4] = bounding_box.label
annotations = np.append(annotations, annotation, axis=0)
return annotations
# Black out all background -
def blacken(img, annots):
mask = img >= 0 # create boolean array in the shape of image, initialized all to true
for annot in annots:
mask[int(annot[1]):int(annot[3]), int(annot[0]):int(annot[2]), :] = False
img[mask] = 0
return img
|
<filename>RESOURCES/search.py
import sys
import copy
import re
import elist.elist as elel
import tlist.tlist as tltl
import estring.estring as eses
import edict.edict as eded
import xdict.CrtableLib.crtable as xcr
from xdict.jprint import pdir
from xdict.jprint import pobj
import navegador5.file_toolset as nvft
#http://doc.chacuo.net/ascii
DD_U = chr(252)
AC_A = chr(225)
AC_E = chr(233)
AC_O = chr(243)
AC_I = chr(237)
AC_U = chr(250)
ENE = chr(241)
R_QM = chr(191)
R_EXCM = chr(161)
ARRS = {}
def no_num_cond_func(ele):
regex = re.compile("[0-9]")
m = regex.search(ele)
if(m):
return(False)
else:
return(True)
def split_words_repo():
ARRS[0] = nvft.read_json(fn='granada_es.all.arr',op='r+')
ARRS[1] = elel.array_map(ARRS[0],lambda ele:str.strip(ele," "))
ARRS[2] = elel.cond_select_values_all(ARRS[1],cond_func=lambda ele:not(" " in ele))
ARRS[3] = elel.cond_select_values_all(ARRS[2],cond_func=lambda ele:not("!" in ele))
ARRS[4] = elel.cond_select_values_all(ARRS[3],cond_func=lambda ele:not(R_EXCM in ele))
ARRS[5] = elel.cond_select_values_all(ARRS[4],cond_func=lambda ele:not("?" in ele))
ARRS[6] = elel.cond_select_values_all(ARRS[5],cond_func=lambda ele:not(R_QM in ele))
ARRS[7] = elel.cond_select_values_all(ARRS[6],cond_func=lambda ele:not("-" in ele))
ARRS[8] = elel.cond_select_values_all(ARRS[7],cond_func=lambda ele:not("," in ele))
ARRS[9] = elel.cond_select_values_all(ARRS[8],cond_func=lambda ele:not("_" in ele))
ARRS[10] = elel.cond_select_values_all(ARRS[9],cond_func=no_num_cond_func)
nvft.write_json(fn='granada_es.all.single.nomark.arr',json=ARRS[10],op='w+')
def acenize(s):
return(s.replace('a',AC_A).replace('e',AC_E).replace('o',AC_O).replace('i',AC_I).replace('u',AC_U))
def unacenize(s):
return(s.replace(AC_A,'a').replace(AC_E,'e').replace(AC_O,'o').replace(AC_I,'i').replace(AC_U,'u'))
def cond_search(ele,cond,**kwargs):
if('ignore_case' in kwargs):
ignore_case = kwargs['ignore_case']
else:
ignore_case = True
if('ignore_acen' in kwargs):
ignore_acen = kwargs['ignore_acen']
else:
ignore_acen = True
if('ignore_ddu' in kwargs):
ignore_ddu = kwargs['ignore_ddu']
else:
ignore_ddu = True
if('ignore_ene' in kwargs):
ignore_ene = kwargs['ignore_ene']
else:
ignore_ene = True
if(ignore_ddu):
ele = ele.replace('ü','u').replace('Ü','u')
cond = cond.replace('ü','u').replace('Ü','u')
else:
pass
if(ignore_ene):
ele = ele.replace('ñ','n')
cond = cond.replace('ñ','n')
else:
pass
if(ignore_case):
ele = ele.lower()
cond = cond.lower()
else:
pass
if(ignore_acen):
ele = unacenize(ele)
cond = unacenize(cond)
else:
pass
return(cond in ele)
def search(arr,s):
indexes = elel.cond_select_indexes_all(arr,cond_func = cond_search,cond_func_args=[s])
values = elel.select_indexes(arr,indexes)
pobj(values,with_color=0)
return(values)
try:
search(sys.argv[2],sys.argv[1])
except:
esd = nvft.read_json(fn='granada_es.all.single.nomark.arr',op='r+')
search(esd,sys.argv[1])
else:
pass
|
# 14. sls/remove_list
# Method is GET (but ought to be POST or DELETE)
# I can't tell from the old documentation what this service is supposed to do.
import sys, unittest, json
sys.path.append('./')
sys.path.append('../')
import webapp, lists
service = webapp.get_service(5005, 'sls/remove_list')
http_method = 'POST'
# Old doc says method is GET; that's wrong. Issue?
# Works with GET; that's also wrong. TBD: Issue, definitely.
class TestSlsRemoveList(webapp.WebappTestCase):
@classmethod
def get_service(self):
return service
list_id = None
@classmethod
def setUpClass(cls):
webapp.WebappTestCase.setUpClass()
cls.list_id = lists.insert_sample_list()
def test_get_should_fail(self):
"""As of 2017-11-05, we get either 400
"Error: Missing parameter 'user_id'" which is incorrect in two ways,
or Error: Missing parameter 'access_token' (if access token works)"""
user_id = webapp.config('user_id')
list_id = self.__class__.list_id
params = {u'user_id': user_id, u'list_id': list_id}
x = service.get_request('GET', params).exchange() #fails, but how?
mess = x.json().get(u'message')
# This is not a 'safe' HTTP exchange, so cannot use GET to do it
# (according to HTTP spec).
self.assert_response_status(x, 405, mess)
def test_no_parameter(self):
""" What if we fail to give it any parameters?"""
x = service.get_request(http_method, None).exchange()
self.assert_response_status(x, 400)
mess = x.json().get(u'message')
self.assertTrue(u'list_id' in mess or u'user_id' in mess, mess)
def test_bad_token(self):
"""What is we give it a bad access token?
It says "400 Error: Missing parameter 'list_id'" - which is
WRONG In two ways.
tbd: issue"""
user_id = webapp.config('user_id')
list_id = self.__class__.list_id
example_32 = service.get_request(http_method,
{u'user_id': user_id,
u'access_token': u'invalid token',
u'list_id': list_id})
x = self.start_request_tests(example_32)
# Insert: whether result is what it should be according to docs
# 401 means unathorized
# Informative error message? Hard to say what is should be.
# Something about a token maybe.
if x.status_code != 401:
mess = x.json().get(u'message')
self.assertTrue(u'oken' in mess)
json.dump(x.to_dict(), sys.stdout, indent=2)
# Insert here: edge case tests
# Insert here: inputs out of range, leading to error or long delay
# Insert here: error-generating conditions
# (See ../README.md)
def test_example_32(self):
"""I'm getting "Error: Missing parameter 'user_id'"
which isn't right since I'm supplying it...
Issue"""
(user_id, access_token) = self.user_credentials()
list_id = self.__class__.list_id
example_32 = service.get_request(http_method,
{u'user_id': user_id,
u'access_token': access_token,
u'list_id': list_id})
x = self.start_request_tests(example_32)
mess = x.json().get(u'message')
self.assert_success(x, mess)
# Insert: whether result is what it should be according to docs
@classmethod
def tearDownClass(cls):
print 'cleaning up'
lists.cleanup()
webapp.WebappTestCase.tearDownClass()
if __name__ == '__main__':
webapp.main()
|
import operator
from datetime import date, timedelta
from functools import reduce
from django.contrib.postgres.search import SearchQuery, SearchVector
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import SearchForm
from .models import Listing, ParsedListing, SearchLog
# Home Page/Index View
def index(request):
context = {}
if not request.session.exists(request.session.session_key):
request.session.create()
listings = Listing.objects.filter(subreddit__iexact='hardwareswap').order_by('-created_utc')
# parsed_listings_locations = ParsedListing.objects.values('location') \
# .distinct().order_by('location')
search_form = SearchForm(initial={
'date': date.today().strftime('%Y-%m-%d'),
'number_of_trades': 0})
context['listings'] = listings[0:10]
# context['locations'] = parsed_listings_locations
context['search_form'] = search_form
return render(request, 'home/index.html', context)
# Search View
def search(request):
context = {}
if not request.session.exists(request.session.session_key):
request.session.create()
search_form = SearchForm(request.GET)
search_params = {}
if search_form.is_valid():
search_params = {
'search': search_form.cleaned_data['search'],
'search_title_only': search_form.cleaned_data['search_title_only'],
'location': search_form.cleaned_data['location'],
'listing_type': search_form.cleaned_data['listing_type'],
'payment_type': search_form.cleaned_data['payment_type'],
'date': search_form.cleaned_data['date'],
'date_within': int(search_form.cleaned_data['date_within']),
'number_of_trades': int(search_form.cleaned_data['number_of_trades']),
'number_of_trades_filter': search_form.cleaned_data['number_of_trades_filter'],
}
logged_search = SearchLog()
logged_search.ip_address = request.META['REMOTE_ADDR']
logged_search.user_agent = request.META['HTTP_USER_AGENT']
logged_search.query_string = request.META['QUERY_STRING']
logged_search.session_id = request.session.session_key
logged_search.query_search_text = search_params['search']
logged_search.query_search_title_only = search_params['search_title_only']
logged_search.query_location = search_params['location']
logged_search.query_date = search_params['date']
logged_search.query_date_within = search_params['date_within']
logged_search.query_trade_amount = search_params['number_of_trades']
logged_search.query_trade_sort = search_params['number_of_trades_filter']
logged_search.query_listing_type = search_params['listing_type']
logged_search.query_payment_type = search_params['payment_type']
logged_search.save()
else:
return HttpResponseRedirect('/')
# parsed_listings_locations = ParsedListing.objects.values('location') \
# .distinct().order_by('location')
listings = Listing.objects.filter(subreddit__iexact='hardwareswap') \
.filter(reduce(operator.or_,
(Q(link_flair_text__icontains=x) for x in search_params['listing_type']))) \
.filter(reduce(operator.or_,
(Q(title__icontains=x) for x in search_params['payment_type']))) \
.filter(reduce(operator.or_, (Q(title__icontains=x) for x in search_params['location']))) \
.filter(created_utc__gte=search_params['date'] - timedelta(days=search_params['date_within']),
created_utc__lte=search_params['date'] + timedelta(days=search_params['date_within']))
if search_params['search']:
if search_params['search_title_only']:
vector = SearchVector('title')
query = SearchQuery(search_params['search'])
# listings = listings.filter(title__icontains=search_params['search'])
listings = listings.annotate(search=vector).filter(search=query)
else:
vector = SearchVector('title', 'selftext')
query = SearchQuery(search_params['search'])
listings = listings.annotate(search=vector).filter(search=query)
# listings = listings.filter(Q(title__icontains=search_params['search']) \
# | Q(selftext__icontains=search_params['search']))
listings = listings.order_by('-created_utc')
context['SEARCH_PARAMS'] = search_params
context['search_form'] = search_form
context['listings'] = listings[0:25]
return render(request, 'search/index.html', context)
|
import datetime
import json
import os
import sys
import random
import time
from subprocess import call
import uuid
import threading
import requests
import platform
import re
#含有信息打印的延迟
def waitingSeconds(sleepNumber):
print("请等待{}s".format(sleepNumber))
time.sleep(sleepNumber)
#power为0.001时为毫秒级延迟
def delayRandom(downTime, upTime, power):
DelayTime = random.randint(downTime, upTime)
time.sleep(DelayTime * power)
#创建文件夹
def makeDir(dirPath):
if not os.path.exists(dirPath):
os.mkdir(dirPath)
#往txt内增加日志信息
def logWrite(logMessage):
nowTime = datetime.datetime.now()
logDir = rootDir + "runLog" + os.sep
makeDir(logDir)
logFileName = logDir + nowTime.strftime("runLog_%Y-%m-%d.txt")
content = "\n日志时间:{}\n程序uuid:{}\n日志内容:{}\n".format(nowTime, logUUID, logMessage)
with open(logFileName,"a",encoding = 'utf-8') as logFile:
logFile.writelines(content)
#发出网络请求
def downloadFile(url):
global requestErrorTimes
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'}
requestsMinTime = 200 # 设置向B站请求时随机延时最小值,单位ms
requestsMaxTime = 500 # 设置向B站请求时随机延时最大值,单位ms
delayRandom(requestsMinTime, requestsMaxTime, 0.001)
try:
r = requests.get(url, headers=headers, timeout=30)
r.raise_for_status() # 这里可能会抛出异常
r.encoding = 'utf-8'
response = [r.text,1] #数组第二位为是否请求出错标志
except :
requestErrorTimes += 1
errorMessage = "【{}】请求出错".format(url)
response = [errorMessage,0]
logWrite(errorMessage)
return response
#直播下载对象
class UPUP(object):
def __init__(self,upMessage):
self.upMessage = upMessage
self.live = {'name': '【未初始化】', 'roomid': '【未初始化】', 'title': '【未初始化】', 'status': 0, 'streamUrl': '【未初始化】'} # status为直播状态,1为正在直播;streamUrl为直播流直链
self.downloadDir = ""
self.refreshTimes = 0
self.tempNumber = 0
self.nextTime = datetime.datetime.now()
self.mid = 0
self.downTime = "18:30" #在每天downTime到upTime区间内高频率刷新直播状态
self.upTime = "22:00" #upTime小于downTime则默认为次日
self.minTime = 20 # 设置直播状态刷新随机间隔最小值,单位s
self.maxTime = 30 # 设置直播状态刷新随机间隔最大值,单位s
self.addTime = 120 #设置空闲时间段刷新频率的加权值,值可正可负,不建议为负
self.qualityLive = 10000 # 设置直播流爬取质量:20000为4K,10000为原画,401为杜比蓝光,400为蓝光,250为超清,150为高清,80为流畅
self.refreshParam()
# 数据初始化函数
def refreshParam(self):
DATA = self.upMessage
mid = DATA['mid']
print('{}正在初始化基本信息'.format(mid))
down2up = DATA['down2up'].split('-')
min2max = DATA['min2max'].split('-')
self.addTime = DATA['addTime']
self.qualityLive = DATA['qualityLive']
self.mid = mid
self.downTime= down2up[0]
self.upTime= down2up[1]
self.minTime = int(min2max[0])
self.maxTime = int(min2max[1])
self.getUserInfo()
self.downloadDir = rootDir + self.live['name'] + "_"+ str(self.mid)
makeDir(self.downloadDir)
threading.Thread(target=self.taskListening, daemon=True).start()
threading.Thread(target=self.liveDownload, daemon=True).start()
threading.Thread(target=self.getDanmu, daemon=True).start()
logMessage = '【{}初始化完成】'.format(mid)
logWrite(logMessage)
# 根据ID获取up基本信息,也是刷新直播状态
def getUserInfo(self):
self.refreshTimes = self.refreshTimes + 1
url = 'https://api.bilibili.com/x/space/acc/info?mid={}&jsonp=jsonp'.format(self.mid)
#https://api.bilibili.com/x/space/acc/info?mid=23191782&jsonp=jsonp
r = downloadFile(url)
if r[1]:
response = json.loads(r[0])
self.live['name'] = response['data']['name']
self.live['roomid'] = response['data']['live_room']['roomid']
self.live['title'] = response['data']['live_room']['title']
self.live['status'] = response['data']['live_room']['liveStatus']
return self.live['status']
# 获取直播流url
def getStreamUrl(self):
url = 'https://api.live.bilibili.com/room/v1/Room/playUrl?cid={}&qn={}&platform=web'.format(
self.live['roomid'], self.qualityLive)
r = downloadFile(url)
if r[1]:
response = json.loads(r[0])
self.live['streamUrl'] = response['data']['durl'][0]['url']
return self.live['streamUrl']
# 线程:监听时间变化和改变直播刷新频率
def taskListening(self):
changeFlage = 0 #是否设置分段刷新
if self.downTime and self.upTime:
changeFlage = 1
while changeFlage:
currentTime = datetime.datetime.now()
nowDate = datetime.datetime.now().strftime("%Y-%m-%d_")
downTime = datetime.datetime.strptime(nowDate + self.downTime,"%Y-%m-%d_%H:%M")
upTime = datetime.datetime.strptime(nowDate + self.upTime,"%Y-%m-%d_%H:%M")
#upTime为次日
if upTime < downTime:
upTime = upTime + datetime.timedelta(days=1)
if currentTime > downTime and currentTime < upTime:
self.tempNumber = 0
else:
self.tempNumber = self.addTime
time.sleep(10)
# 线程:下载直播弹幕
def getDanmu(self):
DanmuURL = 'https://api.live.bilibili.com/xlive/web-room/v1/dM/gethistory'
DanmuData = {'roomid':self.live['roomid'],'csrf_token':'','csrf':'','visit_id':'',}
DanmuHeaders = {'Host':'api.live.bilibili.com','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0',}
lines_seen = set() #存储已写入弹幕信息用于去重
while runFlag:
DanmuFileName = self.downloadDir + os.sep + datetime.datetime.now().strftime("{}_%Y-%m-%d.txt".format(self.live['name']))
try:
if self.live['status']:
html = requests.post(url=DanmuURL,headers=DanmuHeaders,data=DanmuData).json()
# 解析弹幕列表
for content in html['data']['room']:
msg = content['timeline'] +' '+ content['nickname'] + ': ' + content['text'] + '\n' # 记录发言
if msg not in lines_seen:
lines_seen.add(msg)
with open(DanmuFileName,"a",encoding = 'utf-8') as logFile:
logFile.writelines(msg)
if len(lines_seen)>10000:
lines_seen.clear()
except Exception as ex:
logWrite(ex)
time.sleep(1)
# 线程:监听当前对象直播与下载
def liveDownload(self):
while runFlag:
try:
DelayTime = random.randint(self.minTime+self.tempNumber, self.maxTime+self.tempNumber)
self.nextTime = datetime.datetime.now() + datetime.timedelta(seconds=DelayTime)
if self.getUserInfo() == 1:
streamUrl = self.getStreamUrl()
liveTitle = re.sub('[\/:*?"<>|]','_',self.live['title'])
liveFileName = datetime.datetime.now().strftime("{}_%Y-%m-%d_%H-%M-%S_{}.flv".format(self.live['name'],liveTitle))
#下载线程会自己卡在这里
call([aria2cDir, streamUrl, '-d', self.downloadDir, '-o', liveFileName])
else:
time.sleep(DelayTime-4)
except Exception as ex:
logWrite(ex)
waitingSeconds(4)
#线程:创建up对象,显示监听信息
def liveListening(upIDlist):
upObjectList = []
global allMessage
liveStatusFileName = rootDir + "liveStatus.txt"
#开启各个up线程
for i in upIDlist:
if i['isOpen']:
liveVideo = UPUP(upMessage=i)
upObjectList.append(liveVideo)
waitingSeconds(4)
#不断刷新屏幕信息
while runFlag:
allMessage = ""
liveOn = [] # 正在开播
liveWaiting = [] # 等待开播
for i in upObjectList:
upName = i.live['name']
upID = upObjectList.index(i)+1
upNameID = "{}-{}".format(upID,i.live['name'])
title = "\n\t直播标题:{}".format(i.live['title'])
refreshTimes = "\n\t刷新次数:{}".format(i.refreshTimes)
streamUrl = "\n\t下载链接:{}".format(i.live['streamUrl'])
nextFreshTime = i.nextTime.strftime("\n\t下次刷新:%Y-%m-%d %H:%M:%S")
roomURl = '\n\t房间链接:https://live.bilibili.com/{}'.format(i.live['roomid'])
if i.live['status']:
liveOn.append(upNameID)
tempMessage = "\n\n\tUP主:{}{}{}{}{}".format(upName, refreshTimes, title, roomURl, streamUrl)
else:
liveWaiting.append(upNameID)
tempMessage = "\n\n\t未开播:{}{}{}{}".format(upName, refreshTimes, nextFreshTime, roomURl)
allMessage += tempMessage
liveStatusMessage = "\n开播状态:\n\t正在直播{}\n\t等待开播{}\n\t请求出错次数:{}".format(liveOn, liveWaiting, requestErrorTimes)
allMessage = liveStatusMessage + allMessage
if isWindows:
os.system('cls')
else:
os.system('clear')
print(allMessage)
liveStatusFile = open(liveStatusFileName,"w",encoding = 'utf-8')
liveStatusFile.write(allMessage)
liveStatusFile.close()
time.sleep(2)
if __name__ == '__main__':
aria2cDir = r"aria2c"
isBrowser = 0 # 是否用浏览器下载直播流
runFlag = 1
isWindows = 0
requestErrorTimes = 0
logUUID = uuid.uuid1()
rootDir = os.getcwd() + os.sep
version = "BilibiliLive_V1.1"
allMessage = "程序初始化完成\n程序版本:{}".format(version)
if platform.system() == "Windows":
isWindows = 1
if len(sys.argv)>1:
userFile = sys.argv[1]
f = open(userFile,"r",encoding="utf-8")
data = json.load(f)
f.close()
uplist = data['user']
sysConfig = data['sysConfig'][0]
rootDir = sysConfig['globalDownloadDir'] + os.sep
aria2cDir = sysConfig['aria2cDir']
makeDir(rootDir)
else:
uplist = []
threadLive = threading.Thread(target=liveListening, args=(uplist,), daemon=True)
threadLive.start()
while runFlag:
time.sleep(100)
|
<reponame>micferr/ADM_HW3
import csv
import os
from datetime import datetime
from typing import Tuple, Optional
import bs4
import re
from bs4 import BeautifulSoup
from pathlib import Path
from constants import ANIMES_DIRECTORY, PARSED_ANIMES_DIRECTORY
from utils import Anime, prepare_to_download, parsed_anime_filename
def get_tag_text(tag: Optional[bs4.element.Tag]) -> Optional[str]:
"""Utility to retrieve and strip the text of a tag, defaulting to None if missing."""
return ''.join(tag.find_all(text=True, recursive=False)).strip() if tag else None
def extract_title(soup: BeautifulSoup) -> str:
"""Extract an anime's title."""
return soup.find("div", class_="h1-title").div.h1.strong.get_text().strip()
def _parse_airing_dates(dates: Optional[str]) -> Tuple[Optional[datetime], Optional[datetime]]:
"""Parse and return the airing date(s) of the anime."""
def to_datetime(d: str):
if not any(char.isdigit() for char in d): # To account for missing data (Unknown, Not Available, ?)
return None
return datetime.strptime(d, "%b %d, %Y")
if re.search(" to ", dates):
date1, date2 = tuple(dates.split(" to "))
return to_datetime(date1), to_datetime(date2)
if re.search(" - ", dates):
date1, date2 = tuple(dates.split(" - "))
return to_datetime(date1), to_datetime(date2)
if re.match("[A-Za-z]+ [0-9]+, [0-9]+", dates): # Only one date
date = to_datetime(dates)
return date, date
def extract_type_episodes_and_dates(soup: BeautifulSoup) -> Tuple[str, int, Optional[datetime], Optional[datetime]]:
"""Extract an anime's type, number of episodes and start/end dates."""
type_tag = soup.find("span", text=lambda text: text and "Type:" in text)
if type_tag:
_type = get_tag_text(type_tag.parent)
else:
_type = None
episodes_tag = soup.find("span", text=lambda text: text and "Episodes:" in text)
episodes = None
if episodes_tag:
try:
episodes = int(get_tag_text(episodes_tag.parent))
except:
pass
dates_tag = soup.find("span", text=lambda text: text and "Aired:" in text)
start_date, end_date = None, None
if dates_tag:
try:
start_date, end_date = _parse_airing_dates(get_tag_text(dates_tag.parent))
except:
pass
return _type, episodes, start_date, end_date
def extract_score_info(soup: BeautifulSoup, rank: int) -> Tuple[int, float, int, int, int]:
"""Extract an anime's members, score, users, rank and popularity."""
members, score, users, popularity = 0, 0.0, 0, 0
try:
members_text = get_tag_text(soup.find("span", class_="numbers members"))
if members_text:
members = int(members_text.replace(",", ""))
except:
members = None
try:
score = int(get_tag_text(soup.find("span", {"itemprop": "ratingValue"})))
except:
score = None
try:
users = int(get_tag_text(soup.find("span", {"itemprop", "ratingCount"})))
except:
users = None
try:
popularity_text = get_tag_text(soup.find("span", class_="numbers popularity"))
if popularity_text:
popularity = int(popularity_text.replace(",", ""))
except:
popularity = None
return members, score, users, rank, popularity
def extract_description(soup: BeautifulSoup) -> Optional[str]:
"""Extract an anime's synopsis."""
tag = soup.find("p", {"itemprop": "description"})
return get_tag_text(tag) if tag else None
def extract_related_anime(soup: BeautifulSoup) -> list[str]:
"""Extract an anime's list of related animes."""
related_anime_table = soup.find("table", class_="anime_detail_related_anime")
if not related_anime_table:
return []
related_anime_names = set()
# Related animes must have a link (and the link must start with /anime, otherwise it's a manga)
for anime_link in related_anime_table.find_all("a", href=lambda href: re.match("/anime", href)):
anime_name = get_tag_text(anime_link)
related_anime_names.add(anime_name)
return list(related_anime_names)
def extract_characters_and_voices(soup: BeautifulSoup) -> tuple[list[str], list[str]]:
"""Extract an anime's characters and their voice actors."""
characters_div = soup.find("div", class_="detail-characters-list") # First one is characters/VAs, second is staff
if not characters_div:
return [], []
characters = [get_tag_text(tag.a) for tag in characters_div.find_all("h3", class_="h3_characters_voice_actors") or []]
voices = [
get_tag_text(tag)
for tag
in characters_div.find_all("a", href=lambda href: re.search("/people/", href) is not None, text=lambda text: text) or []
]
return characters, voices
def extract_anime_staff(soup: BeautifulSoup) -> list[list[str]]:
"""Extract an anime's staff."""
divs = list(soup.find_all("div", class_="detail-characters-list"))
if len(divs) < 2:
return []
staff_div = divs[1]
staff = []
for a_tag in staff_div.find_all("a"):
if a_tag.img: # Skip images
continue
try:
role_tag = a_tag.parent.div.small
staff.append([get_tag_text(a_tag), get_tag_text(role_tag)])
except:
pass
return staff
if __name__ == "__main__":
"""
This scripts iterates the individual anime's pages. For each anime, it parses the required information
and saves them to a TSV file.
"""
files_to_parse = sorted(Path(ANIMES_DIRECTORY).glob("*.html"))
start_file = prepare_to_download(PARSED_ANIMES_DIRECTORY)
try:
for i, path in enumerate(files_to_parse[start_file:], start=start_file):
with open(path, "r") as fin:
soup = BeautifulSoup(fin, "html.parser")
anime = Anime()
anime.animeTtle = extract_title(soup)
anime.animeType, anime.animeNumEpisode, anime.releaseDate, anime.endDate = extract_type_episodes_and_dates(soup)
(
anime.animeNumMembers,
anime.animeScore,
anime.animeUsers,
anime.animeRank,
anime.animePopularity
) = extract_score_info(soup, i+1)
anime.animeDescription = extract_description(soup)
anime.animeRelated = extract_related_anime(soup)
anime.animeCharacters, anime.animeVoices = extract_characters_and_voices(soup)
anime.animeStaff = extract_anime_staff(soup)
# Write the TSV file for this anime
with open(os.path.join(PARSED_ANIMES_DIRECTORY, parsed_anime_filename(i)), "w") as tsv_file:
tsv_writer = csv.writer(tsv_file, delimiter='\t')
tsv_writer.writerow([
"animeTitle", "animeType", "animeNumEpisode", "releaseDate", "endDate",
"animeNumMembers", "animeScore", "animeUsers", "animeRank", "animePopularity",
"animeDescription", "animeRelated", "animeCharacters", "animeVoices", "animeStaff"
])
tsv_writer.writerow([
anime.animeTtle, anime.animeType, anime.animeNumEpisode, anime.releaseDate, anime.endDate,
anime.animeNumMembers, anime.animeScore, anime.animeUsers, anime.animeRank, anime.animePopularity,
anime.animeDescription, anime.animeRelated, anime.animeCharacters, anime.animeVoices, anime.animeStaff
])
if i%100 == 0:
print(f"{i}/{len(files_to_parse)}...")
except Exception as e:
print(str(path)) # Log which file caused the error
raise e
|
#my colormap
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
import splat
import pandas as pd
import splat.empirical as spem
import statsmodels.nonparametric.kernel_density as kde
import numba
from astropy.io import ascii
import matplotlib
from astropy import stats as astrostats
from scipy import stats
import bisect
import os
#from . import *
#################
splat.initializeStandards()
###############
from wisps.utils import memoize_func
WISP_PATH=os.environ['WISP_CODE_PATH']
DATA_FILES=os.path.dirname(WISP_PATH.split('wisps')[0]+('wisps')+'//data//')
LIBRARIES='/volumes/LaCie/wispsdata/libraries/'
kirkpa2019pol={'pol':np.poly1d(np.flip([36.9714, -8.66856, 1.05122 ,-0.0344809])),
'scatter':.67, 'range':[36, 44]}
def plot_annotated_heatmap(ax, data, gridpoints, columns, cmap='viridis',
annotate=False, vmin=0.0, vmax=1.0, textsize=14):
#plot an annotated heatmap
data= data.dropna()
xcol, ycol, zcol= columns
step1= np.ptp(data[xcol])/gridpoints
step2= np.ptp(data[ycol])/gridpoints
#print (step1)
xgrid= np.linspace(data[xcol].min(), data[xcol].max(), gridpoints)
ygrid= np.linspace(data[ycol].min(), data[ycol].max(), gridpoints)
mask = np.zeros((len(xgrid), len(ygrid)))
values = np.zeros((len(xgrid), len(ygrid)))
#for annotation
for i in range(len(xgrid)):
#loop over matrix
for j in range(len(ygrid)):
if (i == len(xgrid)-1) | (j == len(ygrid)-1) :
pass
else:
maskx= np.logical_and(data[xcol] > xgrid[i], data[xcol] <= xgrid[i]+step1)
masky=np.logical_and(data[ycol] > ygrid[j], data[ycol] <=ygrid[j]+step2)
zmedian= np.nanmean(data[zcol][np.logical_and(maskx, masky)])
lenz= len(data[np.logical_and.reduce([maskx, masky])])
if lenz == 0:
values[j][i] = np.nan
mask[j][i] = 1
else:
values[j][i] = zmedian
if annotate == 'third_value':
ax.text(xgrid[i]+step1/2., ygrid[j]+step2/2., f'{zmedian:.0f}',
ha='center', va='center', fontsize=textsize, color='#111111')
if annotate== 'number':
ax.text(xgrid[i]+step1/2., ygrid[j]+step2/2., f'{lenz:.0f}',
ha='center', va='center', fontsize=textsize, color='#111111')
values2 = np.ma.array(values, mask=mask)
cax = ax.pcolormesh(xgrid, ygrid, values2, vmin=vmin, vmax=vmax, cmap=cmap)
#plt.axis('tight')
ymin, ymax = plt.ylim()
ax.minorticks_on()
ax.set_ylim(ymax, ymin)
return
class Annotator(object):
"""
Contains static method to manipulate index-index tables
"""
@staticmethod
def group_by_spt(df, **kwargs):
"""
This is a static method that takes a table and an array of spectral type and
Args:
df (pandas dataframe): a table of objects with a column of labelled spectral types
Returns:
returns the same table with spectral type ranges labelled
spt_label=keyword for spt column
"""
spt=kwargs.get('spt_label', 'Spts')
#select by specral type range start spt=15
df['spt_range']=''
classes=['trash', 'M7-L0', 'L0-L5', 'L5-T0','T0-T5','T5-T9']
if kwargs.get('assign_middle', False):
#assign the the range to the median spectral type
classes=[20, 22, 27, 32, 37]
if kwargs.get('assign_number', False):
classes=[0, 1, 2, 3, 4, 5]
if not 'data_type' in df.columns:
df['data_type']='templates'
df['spt_range'].loc[(df[spt] < 17.0 ) & (df['data_type']== 'templates')]=classes[0]
df['spt_range'].loc[(df[spt] >= 17.0 ) & (df[spt] <=20.0) & (df['data_type']== 'templates')]=classes[1]
df['spt_range'].loc[(df[spt] >= 20.1 ) & (df[spt] <=25.0) & (df['data_type']== 'templates')]=classes[2]
df['spt_range'].loc[(df[spt] >= 25.1 ) & (df[spt] <=30.0) & (df['data_type']== 'templates')]=classes[3]
df['spt_range'].loc[(df[spt] >= 30.1 ) & (df[spt] <=35.0) & (df['data_type']== 'templates')]=classes[4]
df['spt_range'].loc[(df[spt] >= 35.1 ) & (df[spt] <=40.0) & (df['data_type']== 'templates')]=classes[5]
df['spt_range'].loc[ (df['data_type']== 'subdwarf')]='subdwarf'
#print (df)
if kwargs.get('add_subdwarfs', False):
sds=kwargs.get('subdwarfs', None)
#print ('adding subdwarfs')
sds['spt_range']='subdwarfs'
df=pd.concat([df,sds], ignore_index=True, join="inner")
#print (df)
return df
@staticmethod
def color_from_spts(spts, **kwargs):
"""
Given spt (or a bunch of intergers, get colors
spts must be arrays of numbers else, will try to change it to colors
"""
if isinstance(spts[0], str):
try:
spts=[float(x) for x in spts]
except:
spts=[splat.typeToNum(x) for x in spts]
cmap=kwargs.get('cmap', matplotlib.cm.YlOrBr)
maxi= np.nanmax(spts)
mini=np.nanmin(spts)
norm = matplotlib.colors.Normalize(vmin=mini, vmax=maxi, clip=True)
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
colors=[]
for c in spts:
colors.append(mapper.to_rgba(c))
return colors
@staticmethod
def reformat_table(df):
"""
drop uncertainties in the indidces
"""
new_df=pd.DataFrame()
for k in df.columns:
if isinstance(df[k].iloc[0], tuple):
new_df[k]=np.array(np.apply_along_axis(list, 0, df[k].values))[:,0]
new_df[k+'_er']=np.array(np.apply_along_axis(list, 0, df[k].values))[:,1]
else:
new_df[k]=df[k].values
return new_df
@np.vectorize
def splat_teff_to_spt(teff):
rel=splat.SPT_TEFF_RELATIONS['pecaut']
spt_sorted_idx=np.argsort(rel['values'])
scatter=108
teffsc=np.random.normal(teff, scatter)
return np.interp(teffsc, np.array(rel['values'])[spt_sorted_idx], np.array(rel['spt'])[spt_sorted_idx])
@np.vectorize
def splat_teff_from_spt(spt):
rel=splat.SPT_TEFF_RELATIONS['pecaut']
#spt_sorted_idx=np.argsort(rel['values'])
teff=np.interp(spt, np.array(rel['spt']), np.array(rel['values']))
return np.random.normal(teff, 108)
@numba.jit
def make_spt_number(spt):
##make a spt a number
if isinstance(spt, str):
return splat.typeToNum(spt)
else:
return spt
#def get_abs_mag_contants():
#need to wrap these into a function to avoid overloading memory
#mamjk=ascii.read(DATA_FILES+'/mamajek_relations.txt').to_pandas().replace('None', np.nan)
#pec_js=mamjk.M_J.apply(float).values
#pec_jminush=mamjk['J-H'].apply(float).values
#pec_hs=pec_js-pec_jminush
#pec_spts=mamjk.SpT.apply(make_spt_number).apply(float).values
#pec_hsortedindex=np.argsort(pec_hs)
#pec_jsortedindex=np.argsort(pec_js)
best_dict={'2MASS J': {\
'spt': [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39], \
'values': [10.36,10.77,11.15,11.46,11.76,12.03,12.32,12.77,13.51,13.69,14.18,14.94,14.90,14.46,14.56,15.25,14.54,14.26,13.89,14.94,15.53,16.78,17.18,17.75],\
'rms': [0.30,0.30,0.42,0.34,0.18,0.15,0.21,0.24,0.28,0.25,0.60,0.20,0.13,0.71,0.5,0.12,0.06,0.16,0.36,0.12,0.27,0.76,0.51,0.5]},
'2MASS H': {\
'spt': [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39], \
'values': [9.76,10.14,10.47,10.74,11.00,11.23,11.41,11.82,12.45,12.63,13.19,13.82,13.77,13.39,13.62,14.39,13.73,13.67,13.57,14.76,15.48,16.70,17.09,17.51],\
'rms': [0.30,0.31,0.43,0.35,0.23,0.21,0.25,0.29,0.3,0.30,0.62,0.31,0.20,0.73,0.5,0.18,0.15,0.24,0.40,0.24,0.37,0.78,0.5,0.5]}}
def absolute_mag_kirkpatrick(spt, filt):
if filt != '2MASS H':
return np.nan
else:
if (spt > 36) and (spt <44):
pol=kirkpa2019pol['pol']
unc=kirkpa2019pol['scatter']
return np.random.normal(pol(spt-30), unc, 1000).mean()
else:
return np.nan
@np.vectorize
def absolute_magnitude_jh(spt):
"""
returns J and H magnitudes by interpolating between values from pecaut2013
must probably sort spt if spt is a list bfeore passing it through the interpolator
"""
jval, hval=(np.nan, np.nan)
#[SHOULD ADD VEGA TO AB CONVERSION FACTOR]
if spt <=37:
hval=np.interp(spt, pec_spts[pec_hsortedindex], pec_hs[pec_hsortedindex])
jval=np.interp(spt, pec_spts[pec_jsortedindex], pec_js[pec_jsortedindex])
else:
hval=absolute_mag_kirkpatrick(spt, '2MASS H')
return jval, hval
def k_clip_fit(x, y, sigma_y, sigma = 5, n=6):
'''Fit a polynomial to y vs. x, and k-sigma clip until convergence
hard-coded, returns mask array
'''
not_clipped = np.ones_like(y).astype(bool)
n_remove = 1
#use median sigma
#median_sigma= np.nanmedian(sigma_y)
while n_remove > 0:
best_fit = np.poly1d(np.polyfit(x[not_clipped], y[not_clipped], n))
norm_res = (np.abs(y - best_fit(x)))/(sigma_y)
remove = np.logical_and(norm_res >= sigma, not_clipped == 1)
n_remove = sum(remove)
not_clipped[remove] = 0
return not_clipped
def fit_with_nsigma_clipping(x, y, y_unc, n, sigma=3.):
not_clipped = k_clip_fit(x, y, y_unc, sigma = sigma)
return not_clipped, np.poly1d(np.polyfit(x[not_clipped], y[not_clipped], n))
@numba.vectorize("float64(float64, float64)", target='cpu')
def get_distance(absmag, rel_mag):
return 10.**(-(absmag-rel_mag)/5. + 1.)
@numba.jit
def my_color_map():
colors1 = plt.cm.BuGn(np.linspace(0., 1, 256))
colors2 = plt.cm.Purples(np.linspace(0., 1, 256))
colors3 = plt.cm.cool(np.linspace(0., 1, 256))
colors4 = plt.cm.Greens(np.linspace(0., 1, 256))
colors = np.vstack((colors1+colors2)/2)
colorsx = np.vstack((colors3+colors4)/2)
return mcolors.LinearSegmentedColormap.from_list('my_colormap', colors), mcolors.LinearSegmentedColormap.from_list('my_other_colormap', colorsx)
MYCOLORMAP, MYCOLORMAP2=my_color_map()
@memoize_func
def stats_kde(x, **kwargs):
grid=np.arange(np.nanmin(x), np.nanmax(x))
model=kde.KDEMultivariate(x, bw='normal_reference', var_type='c')
return grid, model.cdf(grid), model.pdf(grid)
def drop_nan(x):
x=np.array(x)
return x[(~np.isnan(x)) & (~np.isinf(x)) ]
def custom_histogram(things, grid, binsize=1):
n=[]
for g in grid:
n.append(len(things[np.logical_and(g<=things, things< g+binsize)]))
return np.array(n)
@numba.jit
def is_in_that_classification(spt, subclass):
#determines if a spt is within a subclass
flag=False
scl=subclass.lower()
if scl[0] in ['m', 'l', 't']:
slow=splat.typeToNum(subclass[:2])
shigh=splat.typeToNum(subclass[-2:])
if slow<=make_spt_number(spt)<=shigh:
flag=True
if scl.startswith('y') & (make_spt_number(spt)>=38):
flag=True
if scl.startswith('subd'):
flag=False
return flag
def random_draw(xvals, cdfvals, nsample=10):
"""
randomly drawing from a discrete distribution
"""
@numba.vectorize("int32(float64)")
def invert_cdf(i):
return bisect.bisect(cdfvals, i)-1
x=np.random.rand(nsample)
idx=invert_cdf(x)
return np.array(xvals)[idx]
def fit_polynomial(x, y, n=2, y_unc=None, sigma_clip=False, sigma=None):
"""
Polynomial fit with n-sigma clipping
"""
if sigma_clip:
va=np.array([x, y]).T
d=pd.DataFrame(va).dropna().values
sigma_clipped=astrostats.sigma_clip(d, sigma=sigma)
x=sigma_clipped[:,0]
y=sigma_clipped[:,1]
nany=np.isnan(x)
p = np.poly1d(np.polyfit(x[~nany], y[~nany], n))
if y_unc is not None:
p=np.poly1d(np.polyfit(x[~nany],y[~nany], n, w=1./y_unc[~nany]))
return p
def kernel_density(distr, **kwargs):
"""
1D-kernel density estimation
"""
kernel = stats.gaussian_kde(distr, **kwargs)
return kernel
def get_big_file():
COMBINED_PHOTO_SPECTRO_FILE=LIBRARIES+'/master_dataset.h5'
COMBINED_PHOTO_SPECTRO_DATA=pd.read_hdf(COMBINED_PHOTO_SPECTRO_FILE, key='new')
#definitions
return COMBINED_PHOTO_SPECTRO_DATA |
<filename>filters/pandoc_listof.py
#!/usr/bin/env python
"""
Pandoc filter to create lists of all kinds
"""
from pandocfilters import toJSONFilters, walk, Str, Plain, Link, BulletList, Para, RawInline
from functools import reduce
from copy import deepcopy
import io
import sys
import codecs
import json
import re
import unicodedata
import subprocess
collections = {}
headers = [0, 0, 0, 0, 0, 0]
headers2 = [0, 0, 0, 0, 0, 0]
def stringify(x, format):
"""Walks the tree x and returns concatenated string content,
leaving out all formatting.
"""
result = []
def go(key, val, format, meta):
if key in ['Str', 'MetaString']:
result.append(val)
elif key == 'Code':
result.append(val[1])
elif key == 'Math':
# Modified from the stringify function in the pandocfilter package
if format == 'latex':
result.append('$' + val[1] + '$')
else:
result.append(val[1])
elif key == 'LineBreak':
result.append(" ")
elif key == 'Space':
result.append(" ")
elif key == 'Note':
# Do not stringify value from Note node
del val[:]
walk(x, go, format, {})
return ''.join(result)
def collect(key, value, format, meta):
global headers
# Is it a header? Keep the correct numbered headers in the headers array
if key == 'Header':
[level, [id, classes, attributes], content] = value
if 'unnumbered' not in classes:
headers[level - 1] = headers[level - 1] + 1
for index in range(level, 6):
headers[index] = 0
# Is it a link with a right tag?
elif key == 'Span':
# Get the Span
[[anchor, classes, other], text] = value
# Is the anchor correct?
result = re.match('^([a-zA-Z][\w.-]*):([\w.-]+)$', anchor)
if result:
global collections
# Compute the name
name = result.group(1)
# Compute the identifier
identifier = result.group(2)
# Store the new item
string = stringify(deepcopy(text), format)
# Prepare the names
names = []
# Add the atomic name to the list
names.append(name)
# Prepare the latex output
if format == 'latex':
latex = '\\phantomsection\\addcontentsline{' + name + '}{figure}{' + string + '}'
# Loop on all the headers
for i in [0, 1, 2, 3, 4, 5]:
if headers[i] > 0:
# Add an alternate name to the list
altName = name + ':' + '.'.join(map(str, headers[:i+1]))
names.append(altName)
if format == 'latex':
# Complete the latex output
latex = latex + '\\phantomsection\\addcontentsline{' + altName + '}{figure}{' + string + '}'
latex = latex + '\\phantomsection\\addcontentsline{' + altName + '_}{figure}{' + string + '}'
else:
break
for name in names:
# Prepare the new collections if needed
if name not in collections:
collections[name] = []
collections[name].append({'identifier': identifier, 'text': string})
# Special case for LaTeX output
if format == 'latex':
text.insert(0, RawInline('tex', latex))
value[1] = text
def listof(key, value, format, meta):
global headers2
# Is it a header?
if key == 'Header':
[level, [id, classes, attributes], content] = value
if 'unnumbered' not in classes:
headers2[level - 1] = headers2[level - 1] + 1
for index in range(level, 6):
headers2[index] = 0
# Is it a paragraph with only one string?
if key == 'Para' and len(value) == 1 and value[0]['t'] == 'Str':
# Is it {tag}?
result = re.match('^{(?P<name>(?P<prefix>[a-zA-Z][\w.-]*)(?P<section>\:((?P<sharp>#(\.#)*)|(\d+(\.\d+)*)))?)}$', value[0]['c'])
if result:
prefix = result.group('prefix')
# Get the collection name
if result.group('sharp') == None:
name = result.group('name')
else:
level = (len(result.group('sharp')) - 1) // 2 + 1
name = prefix + ':' + '.'.join(map(str, headers2[:level]))
# Is it an existing collection
if name in collections:
if format == 'latex':
# Special case for LaTeX output
if 'toccolor' in meta:
linkcolor = '\\hypersetup{linkcolor=' + stringify(meta['toccolor']['c'], format) + '}'
else:
linkcolor = '\\hypersetup{linkcolor=black}'
if result.group('sharp') == None:
suffix = ''
else:
suffix = '_'
return Para([RawInline('tex', linkcolor + '\\makeatletter\\@starttoc{' + name + suffix + '}\\makeatother')])
else:
# Prepare the list
elements = []
# Loop on the collection
for value in collections[name]:
# Add an item to the list
if pandocVersion() < '1.16':
# pandoc 1.15
link = Link([Str(value['text'])], ['#' + prefix + ':' + value['identifier'], ''])
else:
# pandoc 1.16
link = Link(['', [], []], [Str(value['text'])], ['#' + prefix + ':' + value['identifier'], ''])
elements.append([Plain([link])])
# Return a bullet list
return BulletList(elements)
# Special case where the paragraph start with '{{...'
elif re.match('^{{[a-zA-Z][\w.-]*}$', value[0]['c']):
value[0]['c'] = value[0]['c'][1:]
def pandocVersion():
if not hasattr(pandocVersion, 'value'):
p = subprocess.Popen(['pandoc', '-v'], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out, err = p.communicate()
pandocVersion.value = re.search(b'pandoc (?P<version>.*)', out).group('version').decode('utf-8')
return pandocVersion.value
def main():
toJSONFilters([collect, listof])
if __name__ == '__main__':
main()
|
<reponame>igkins/beeswithmachineguns
"""
"""
from collections import namedtuple
import logging
import re
class Tester(object):
"""
Abstract base class for tester implementations.
"""
def get_command(self, num_requests, concurrent_requests, is_keepalive, url):
"""
Generate a command line to run a test using this tester.
@param num_requests: number of requests this tester should issue
@type num_requests: int
@param concurrent_requests: how many requests to issue at a time
@type concurrent_requests: int
@param is_keepalive: whether to use http keepalive
@type is_keepalive: boolean
@param url: the url to issue requests to
@type url: str
@return: the assembled command line
@rtype: str
"""
raise NotImplementedError
def parse_output(self, output):
"""
Create a L{TesterResult} by extracting values from the output of the tester
command.
This method will return None if the supplied output does not contain
sufficient data to generate a meaningful result.
@param output: the captured output (stdout) from the tester command
@type output: str
@return: L{TesterResult} with the extracted data, or None
"""
raise NotImplementedError
def _parse_measure(self, expression, content, default=''):
"""
Regular expression scraping helper
@param expression: regular expression
@param content: the output to scrape
@param default: if the expression doesn't capture anything, return this
"""
s = re.search(expression, content)
return (s is not None and s.group(1)) or default
_result_keys = [
'concurrency'
, 'time_taken'
, 'complete_requests'
, 'failed_requests'
, 'non_2xx_responses'
, 'total_transferred'
, 'requests_per_second'
, 'ms_per_request'
, 'pctile_50'
, 'pctile_75'
, 'pctile_90'
, 'pctile_95'
, 'pctile_99'
]
class TesterResult(namedtuple('TesterResult', _result_keys)):
"""
Test result container, which works for both individual and aggregated
results. The individual fields map directly to ab results. All values
are stored as floats.
"""
def print_text(self, out):
"""
Print summarized load-testing result to console.
@param out: file-like, open for writing, into which output will be printed.
"""
print >> out, 'Concurrency Level:\t%i' % self.concurrency
print >> out, 'Complete requests:\t%i' % self.complete_requests
print >> out, 'Failed requests:\t%i' % self.failed_requests
print >> out, 'Non-2xx responses:\t%i' % self.non_2xx_responses
print >> out, 'Total Transferred:\t%i bytes' % self.total_transferred
print >> out, 'Requests per second:\t%.2f [#/sec] (mean)' % self.requests_per_second
print >> out, 'Time per request:\t%.3f [ms] (mean)' % self.ms_per_request
print >> out, '50%% response time:\t%i [ms] (mean)' % self.pctile_50
print >> out, '75%% response time:\t%i [ms] (mean)' % self.pctile_75
print >> out, '90%% response time:\t%i [ms] (mean)' % self.pctile_90
print >> out, '95%% response time:\t%i [ms] (mean)' % self.pctile_95
print >> out, '99%% response time:\t%i [ms] (mean)' % self.pctile_99
def get_aggregate_result(results):
"""
Given a sequence of TestResults, generate a single aggregate TestResult.
"""
ar = {}
for k in _result_keys:
if k=='ms_per_request' or k.startswith('pctile'):
# weighted mean.
ar[k] = sum([(getattr(r,k) * r.complete_requests) for r in results]) / sum([r.complete_requests for r in results])
continue
elif k=='time_taken':
# time taken using max instead of sum...not precise.
# do not use in further aggregation.
func = max
else:
# everything else is just summed
func = sum
ar[k] = func([getattr(r,k) for r in results])
return TesterResult(**ar)
class ABTester(Tester):
"""
Tester implementation for ab (apache benchmarking tool).
"""
def get_command(self, num_requests, concurrent_requests, is_keepalive, url):
"""
"""
cmd = []
cmd.append('ab')
cmd.append('-r')
cmd.append('-n %s' % num_requests)
cmd.append('-c %s' % concurrent_requests)
if is_keepalive:
cmd.append('-k')
cmd.append('"%s"' % url)
cmd_line = ' '.join(cmd)
return cmd_line
def parse_output(self, output):
"""
"""
trd = {}
m = self._parse_measure
trd['ms_per_request'] = \
float(m('Time\ per\ request:\s+([0-9.]+)\ \[ms\]\ \(mean\)', output))
if not trd['ms_per_request']:
# problem with results...return None
return None
trd['concurrency'] = \
float(m('Concurrency\ Level:\s+([0-9]+)', output))
trd['requests_per_second'] = \
float(m('Requests\ per\ second:\s+([0-9.]+)\ \[#\/sec\]\ \(mean\)', output))
trd['time_taken'] = \
float(m('Time\ taken\ for\ tests:\s+([0-9.]+)\ seconds', output))
for pctile in (50, 75, 90, 95, 99):
trd['pctile_%s' % pctile] = \
float(m('\s+%s\%%\s+([0-9]+)' % pctile, output)) # e.g. '\s+50\%\s+([0-9]+)'
trd['complete_requests'] = \
float(m('Complete\ requests:\s+([0-9]+)', output))
trd['failed_requests'] = \
float(m('Failed\ requests:\s+([0-9]+)', output))
# note - may not be present - default to 0
trd['non_2xx_responses'] = \
float(m('Non-2xx\ responses:\s+([0-9]+)', output, 0))
trd['total_transferred'] = \
float(m('Total\ transferred:\s+([0-9]+)', output))
return TesterResult(**trd)
class SiegeTester(Tester):
"""
"""
def get_command(self, num_requests, concurrent_requests, is_keepalive, url, time):
"""
is_keepalive is currently ignored here, you instead have to specify
it in 'bees up'.
"""
cmd = []
cmd.append('siege')
cmd.append('-v')
cmd.append('-i')
cmd.append('-b')
cmd.append('-c %s' % concurrent_requests)
if time:
cmd.append('-t%s' % time)
else:
# siege multiplies the number of reqs you want by the concurrency,
# which is different from how ab works, so we divide them pre-emptively
cmd.append('-r %s' % max(1, (num_requests / concurrent_requests)))
if url:
cmd.append('"%s"' % url)
else:
cmd.append('-f urls.txt')
cmd_line = ' '.join(cmd) + ' | ./siege_calc'
return cmd_line
def parse_output(self, output):
"""
"""
trd = {}
m = self._parse_measure
rt_secs = m('Response\ time:\s+([0-9.]+)\ secs', output)
# give up now if we couldnt find the data
if not rt_secs: return None
trd['ms_per_request'] = \
float(float(rt_secs) * 1000.0)
trd['concurrency'] = \
float(m('Concurrency:\s+([0-9.]+)', output))
trd['requests_per_second'] = \
float(m('Transaction rate:\s+([0-9.]+)\ trans/sec', output))
trd['time_taken'] = \
float(m('Elapsed\ time:\s+([0-9.]+)\ secs', output))
# this bit requires siege_calc to be available on the worker bees
for pctile in (50, 75, 90, 95, 99):
trd['pctile_%s' % pctile] = \
float(m('\s+%s\%%\s+([0-9]+)' % pctile, output, 0)) # e.g. '\s+50\%\s+([0-9]+)'
trd['complete_requests'] = \
float(m('Transactions:\s+([0-9]+)\ hits', output))
trd['failed_requests'] = \
float(m('Failed\ transactions:\s+([0-9]+)', output))
# note - may not be present - default to 0
# this isnt implemented yet either
trd['non_2xx_responses'] = 0.0
xferred_mb = m('Data\ transferred:\s+([0-9.]+) MB', output)
trd['total_transferred'] = \
float(float(xferred_mb) * 1024.0 * 1024.0)
return TesterResult(**trd)
class WideloadTester(Tester):
"""
Tester implementation for wideload.
"""
def get_command(self, num_requests, concurrent_requests, is_keepalive, url):
"""
"""
cmd = []
cmd.append('wideload_wrap')
# wideload multiplies the number of reqs you want by the concurrency,
# which is different from how ab works, so we divide them pre-emptively
cmd.append('-r %s' % max(1, (num_requests / concurrent_requests)))
cmd.append('-c %s' % concurrent_requests)
cmd.append('-f 85')
if url:
raise Exception("wideload only works with a URLs file")
else:
cmd.append('urls.txt')
cmd_line = ' '.join(cmd)
return cmd_line
def parse_output(self, output):
"""
"""
trd = {}
m = self._parse_measure
for key in _result_keys:
pattern = '%s:\s+([0-9\.]+)' % key
trd[key] = float(m(pattern, output))
return TesterResult(**trd)
if __name__=='__main__':
import sys
SiegeTester().parse_timings(sys.stdin)
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enhanced configuration file parser."""
from __future__ import with_statement
import logging
import os.path
LOGGER_NAME = __name__
LOG = logging.getLogger(LOGGER_NAME)
class ConfigParser(object):
def __init__(self, config_parser_class):
"""Initializes the object.
Args:
config_parser: Class that acts as a configuration file parser.
"""
self.parser = config_parser_class()
try: # Because default ConfigParser converts to lower case
self.parser.optionxform = str
except:
pass
self.path = None
def associate(self, config_file_path):
"""Associates parser with a config file.
Config file is read from config_file_path as well.
"""
if os.path.exists(config_file_path):
LOG.debug('Reading configuration from %s', config_file_path)
self.parser.read(config_file_path)
else:
LOG.debug('Config file does not exist, starting with empty parser')
self.path = config_file_path
def ensure_basic_options(self, basic_options):
"""Sets options if they are missing.
Args:
basic_options: Nested dictionary in the form of
{section header: {option: value, option: value},
section_header: {option: value, option: value}
...}
Returns:
True if some of the options in basic_options were not set already, False
otherwise.
"""
made_changes = False
for section_name, section_options in basic_options.iteritems():
if not self.parser.has_section(section_name):
self.parser.add_section(section_name)
missing_options = (set(section_options.keys()) -
set(self.parser.options(section_name)))
for option in missing_options:
self.set(section_name, option, section_options[option])
if missing_options and not made_changes:
made_changes = True
return made_changes
def get(self, section, option):
"""Returns option in section.
No backup sections or defaults are returned by this function. If the section
or option does not exist, the config parser will raise an error.
Returns:
String from config file.
"""
return self.parser.get(section, option)
def lazy_get(self, section, option, default=None, option_type=None,
backup_section='GENERAL'):
"""Returns option from config file.
Tries to retrieve <option> from the given section. If that fails, tries to
retrieve the same option from the backup section. If that fails,
returns value of <default> parameter.
Args:
section: Name of the section to initially try to retrieve the option from.
option: Name of the option to retrieve.
default: Value to return if the option does not exist in a searched
section.
option_type: Conversion function to use on the string, or None to leave as
string. For example, if you want an integer value returned, this
should be set to int. Not applied to the <default> parameter.
backup_section: Section to check if option does not exist in given
section. Default 'GENERAL'.
Returns:
Value of the option if it exists in the config file, or value of "default"
if option does not exist.
"""
value = self.safe_get(section, option)
if value is None:
value = self.safe_get(backup_section, option)
if value is None:
return default
if option_type:
# bool() function doesn't actually do what we wanted, so intercept it
# and replace with comparison
if option_type == bool:
return value.lower() == 'true'
else:
return option_type(value)
else:
return value
def safe_get(self, section, option):
"""Returns option if section and option exist, None if they do not."""
if (self.parser.has_section(section) and
self.parser.has_option(section, option)):
return self.parser.get(section, option)
else:
return None
def set(self, section, option, value):
"""Sets option in a section."""
return self.parser.set(section, option, value)
def set_missing_default(self, section, option, value):
"""Sets the option for a section if not defined already.
If the section does not exist, it is created.
Args:
section: Title of the section to set the option in.
option: Option to set.
value: Value to give the option.
config_path: Path to the configuration file.
Default None to use the default path defined in this module.
"""
if type(value) not in [unicode, str]:
value = unicode(value)
existing_value = self.safe_get(section, option)
if existing_value is None:
if not self.parser.has_section(section):
self.parser.add_section(section)
self.set(section, option, value)
def write_out_parser(self, path=None):
"""Writes options in config parser to file.
Args:
path: Path to write to. Default None for path associated with instance.
Raises:
IOError: No path given and instance is not associated with a path.
"""
if not path:
if self.path:
path = self.path
else:
raise IOError('No path given or associated')
with open(path, 'w') as config_file:
self.parser.write(config_file)
|
<filename>Scripts/uniq_strain_tax_2_jsons.py
#!/usr/bin/python
from __future__ import print_function
import re,sys,os
import json
# Copyright {2019} <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This script will read in the uniq_tax of a database and turn it into 7 json files as dictionary for the primer coverage analysis
#This script starts from The _uniq.tax (such as SILVA_138_SSURef_tax_silva_uniq.tax)
#Library file requirement: python 2.7 and up
#Usage: python uniq_strain_tax_2_jsons.py database_tax output_folder
#########check files and folders
if not os.path.isfile(sys.argv[1]):
print('Input tax is not exist; Exit now.')
exit(0)
if not os.path.exists(sys.argv[2]):
os.mkdir(sys.argv[2])
#species dic
D0_sp={}
D1_sp={}
D2_sp={}
D3_sp={}
D4_sp={}
D5_sp={}
D6_sp={}
#strain dic
D0_str={}
D1_str={}
D2_str={}
D3_str={}
D4_str={}
D5_str={}
#contradiction dic
D1_contr={}
D2_contr={}
D3_contr={}
D4_contr={}
D5_contr={}
out_strain_tax = open('%s_uniq_strain.tax' % (sys.argv[1].split("_uniq.tax")[0]), 'w')
with open(sys.argv[1]) as infile:
for i in infile:
isplit = i.rstrip().split(' ')
bsplit = isplit[1].split(';')
len_tax=len(bsplit)
#deal with tax with different levels, the 7-level rule was applied
if len_tax==7:
D0_index=bsplit[0]
D1_index=";".join(bsplit[0:2])
D2_index=";".join(bsplit[0:3])
D3_index=";".join(bsplit[0:4])
D4_index=";".join(bsplit[0:5])
D5_index=";".join(bsplit[0:6])
D6_sp_index=";".join(bsplit[0:6])+"_"+bsplit[6]
elif len_tax>7:
D0_index=bsplit[0]
D1_index=";".join(bsplit[0:2])
D2_index=";".join(bsplit[0:3])
D3_index=";".join(bsplit[0:4])
D4_index=";".join(bsplit[0:5])
D5_index=";".join(bsplit[0:6])
D6_sp_index=";".join(bsplit[0:6])+"_"+"_".join(bsplit[6:])
else:
index_count=0
index_list=[]
for index_count in range(0,6):
if index_count<(len_tax):
index_list.append(";".join(bsplit[0:(index_count+1)]))
else:
index_list.append(index_list[index_count-1]+";"+"NA")
index_list.append(index_list[5]+"_"+"NA")
D0_index=index_list[0]
D1_index=index_list[1]
D2_index=index_list[2]
D3_index=index_list[3]
D4_index=index_list[4]
D5_index=index_list[5]
D6_sp_index=index_list[6]
#assign into the dictionary, and check whether a strain has duplicate
if D6_sp_index in D6_sp:
D0_str[D0_index]+=1
D1_str[D1_index]+=1
D2_str[D2_index]+=1
D3_str[D3_index]+=1
D4_str[D4_index]+=1
D5_str[D5_index]+=1
D6_sp[D6_sp_index]+=1
else:
if D0_index in D0_str:
D0_str[D0_index]+=1
D0_sp[D0_index]+=1
else:
D0_str[D0_index]=1
D0_sp[D0_index]=1
if D1_index in D1_str:
D1_str[D1_index]+=1
D1_sp[D1_index]+=1
else:
D1_str[D1_index]=1
D1_sp[D1_index]=1
if D2_index in D2_str:
D2_str[D2_index]+=1
D2_sp[D2_index]+=1
else:
D2_str[D2_index]=1
D2_sp[D2_index]=1
if D3_index in D3_str:
D3_str[D3_index]+=1
D3_sp[D3_index]+=1
else:
D3_str[D3_index]=1
D3_sp[D3_index]=1
if D4_index in D4_str:
D4_str[D4_index]+=1
D4_sp[D4_index]+=1
else:
D4_str[D4_index]=1
D4_sp[D4_index]=1
if D5_index in D5_str:
D5_str[D5_index]+=1
D5_sp[D5_index]+=1
else:
D5_str[D5_index]=1
D5_sp[D5_index]=1
D6_sp[D6_sp_index]=1
#print into the strain tax
print (i.rstrip()+"::str"+str(D6_sp[D6_sp_index])+"\n", file = out_strain_tax)
#check contradiction
contri_list = D6_sp_index.split(';')
if contri_list[1]=="NA":
continue
else:
if contri_list[1] in D1_contr:
if contri_list[0] in D1_contr[contri_list[1]]:
D1_contr[contri_list[1]][contri_list[0]].append(isplit[0])
else:
D1_contr[contri_list[1]][contri_list[0]]=[isplit[0]]
else:
D1_contr[contri_list[1]]={}
D1_contr[contri_list[1]][contri_list[0]]=[isplit[0]]
contri_lv2 = ";".join(contri_list[0:2])
if contri_list[2]=="NA":
continue
else:
if contri_list[2] in D2_contr:
if contri_lv2 in D2_contr[contri_list[2]]:
D2_contr[contri_list[2]][contri_lv2].append(isplit[0])
else:
D2_contr[contri_list[2]][contri_lv2]=[isplit[0]]
else:
D2_contr[contri_list[2]]={}
D2_contr[contri_list[2]][contri_lv2]=[isplit[0]]
contri_lv3 = ";".join(contri_list[0:3])
if contri_list[3]=="NA":
continue
else:
if contri_list[3] in D3_contr:
if contri_lv3 in D3_contr[contri_list[3]]:
D3_contr[contri_list[3]][contri_lv3].append(isplit[0])
else:
D3_contr[contri_list[3]][contri_lv3]=[isplit[0]]
else:
D3_contr[contri_list[3]]={}
D3_contr[contri_list[3]][contri_lv3]=[isplit[0]]
contri_lv4 = ";".join(contri_list[0:4])
if contri_list[4]=="NA":
continue
else:
if contri_list[4] in D4_contr:
if contri_lv4 in D4_contr[contri_list[4]]:
D4_contr[contri_list[4]][contri_lv4].append(isplit[0])
else:
D4_contr[contri_list[4]][contri_lv4]=[isplit[0]]
else:
D4_contr[contri_list[4]]={}
D4_contr[contri_list[4]][contri_lv4]=[isplit[0]]
contri_lv5 = ";".join(contri_list[0:5])
if contri_list[5]=="NA_NA":
continue
else:
if contri_list[5] in D5_contr:
if contri_lv5 in D5_contr[contri_list[5]]:
D5_contr[contri_list[5]][contri_lv5].append(isplit[0])
else:
D5_contr[contri_list[5]][contri_lv5]=[isplit[0]]
else:
D5_contr[contri_list[5]]={}
D5_contr[contri_list[5]][contri_lv5]=[isplit[0]]
out_strain_tax.close()
#summary contradictions into one file
out_contr = open('%s/%s_contradict_sum.txt' % (sys.argv[2],sys.argv[1]), 'w')
print ("[[D1]]", file = out_contr)
for i_keys in D1_contr:
if len(D1_contr[i_keys]) >1:
print (i_keys+":"+"\t".join(D1_contr[i_keys].keys()), file = out_contr)
print ("[[D2]]", file = out_contr)
for i_keys in D2_contr:
if len(D2_contr[i_keys]) >1:
print (i_keys+":"+"\t".join(D2_contr[i_keys].keys()), file = out_contr)
print ("[[D3]]", file = out_contr)
for i_keys in D3_contr:
if len(D3_contr[i_keys]) >1:
print (i_keys+":"+"\t".join(D3_contr[i_keys].keys()), file = out_contr)
print ("[[D4]]", file = out_contr)
for i_keys in D4_contr:
if len(D4_contr[i_keys]) >1:
print (i_keys+":"+"\t".join(D4_contr[i_keys].keys()), file = out_contr)
print ("[[D5]]", file = out_contr)
for i_keys in D5_contr:
if len(D5_contr[i_keys]) >1:
print (i_keys+":"+"\t".join(D5_contr[i_keys].keys()), file = out_contr)
out_contr.close()
#writ the dictionaries into json files
#species dicts
js = json.dumps(D0_sp)
file = open('%s/D0_species_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D1_sp)
file = open('%s/D1_species_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D2_sp)
file = open('%s/D2_species_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D3_sp)
file = open('%s/D3_species_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D4_sp)
file = open('%s/D4_species_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D5_sp)
file = open('%s/D5_species_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D6_sp)
file = open('%s/D6_species_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
#strain dic
js = json.dumps(D0_str)
file = open('%s/D0_strain_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D1_str)
file = open('%s/D1_strain_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D2_str)
file = open('%s/D2_strain_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D3_str)
file = open('%s/D3_strain_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D4_str)
file = open('%s/D4_strain_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D5_str)
file = open('%s/D5_strain_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
#contradiction dic
js = json.dumps(D1_contr)
file = open('%s/D1_contradictions_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D2_contr)
file = open('%s/D2_contradictions_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D3_contr)
file = open('%s/D3_contradictions_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D4_contr)
file = open('%s/D4_contradictions_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
js = json.dumps(D5_contr)
file = open('%s/D5_contradictions_dic.json' % (sys.argv[2]), 'w')
file.write(js)
file.close()
|
import pytest
from postcodes.uk import PostCodeUK
def test_postcodes_uk_uppercase_normalization():
raw_postcode = 'aa9a 9aa'
postcode = PostCodeUK(raw_postcode)
assert postcode.raw_postcode == raw_postcode
assert postcode.full_postcode == 'AA9A 9AA'
def test_postcodes_uk_outward_and_inward_validations():
raw_postcode = 'AA9A 9AA'
outward, inward = raw_postcode.split(' ')
postcode = PostCodeUK(raw_postcode)
assert postcode.outward == outward
assert postcode.inward == inward
@pytest.mark.parametrize('raw_postcode, area, district, sector, unit', [['AA9A 9AA', 'AA', '9A', '9', 'AA'],
['A9A 9AA', 'A', '9A', '9', 'AA'],
['A9 9AA', 'A', '9', '9', 'AA'],
['A99 9AA', 'A', '99', '9', 'AA'],
['AA9 9AA', 'AA', '9', '9', 'AA'],
['AA99 9AA', 'AA', '99', '9', 'AA']])
def test_postcodes_uk_valid_formats(raw_postcode, area, district, sector, unit):
postcode = PostCodeUK(raw_postcode)
assert postcode.raw_postcode == raw_postcode
assert postcode.full_postcode == raw_postcode
assert postcode.area == area
assert postcode.district == district
assert postcode.sector == sector
assert postcode.unit == unit
assert postcode.is_valid is True
@pytest.mark.parametrize('raw_postcode', ['9 9AA', '9A 9AA', '99 9AA',
'AAA9A 9AA', 'AAAA9A 9AA', 'AAAAA9A 9AA',
'AAA99 9AA', 'AAAA99 9AA', 'AAAAA99 9AA', ])
def test_postcodes_uk_invalid_area(raw_postcode):
postcode = PostCodeUK(raw_postcode)
assert postcode.raw_postcode == raw_postcode
assert postcode.full_postcode == raw_postcode
assert postcode.is_valid is False
assert postcode.errors == {'area': 'Invalid area format.'}
@pytest.mark.parametrize('raw_postcode', ['A 9AA', 'AA 9AA'])
def test_postcodes_uk_invalid_district(raw_postcode):
postcode = PostCodeUK(raw_postcode)
assert postcode.raw_postcode == raw_postcode
assert postcode.full_postcode == raw_postcode
assert postcode.is_valid is False
assert postcode.errors == {'district': 'Invalid district format.'}
@pytest.mark.parametrize('raw_postcode', ['AA9A AA', 'A9A AA', 'A9 AA', 'A99 AA', 'AA9 AA', 'AA99 AA'])
def test_postcodes_uk_invalid_sector(raw_postcode):
postcode = PostCodeUK(raw_postcode)
assert postcode.raw_postcode == raw_postcode
assert postcode.full_postcode == raw_postcode
assert postcode.is_valid is False
assert postcode.errors == {'sector': 'Invalid sector format.'}
@pytest.mark.parametrize('raw_postcode', ['AA9A 9', 'A9A 9', 'A9 9', 'A99 9', 'AA9 9', 'AA99 9',
'AA9A 9AAA', 'A9A 9AAA', 'A9 9AAA', 'A99 9AAA', 'AA9 9AAA', 'AA99 9AAA',
'AA9A 9AAAA', 'A9A 9AAAA', 'A9 9AAAA', 'A99 9AAAA', 'AA9 9AAAA',
'AA99 9AAAA'])
def test_postcodes_uk_invalid_unit(raw_postcode):
postcode = PostCodeUK(raw_postcode)
assert postcode.raw_postcode == raw_postcode
assert postcode.full_postcode == raw_postcode
assert postcode.is_valid is False
assert postcode.errors == {'unit': 'Invalid unit format.'}
def test_postcodes_uk_to_dict_with_valid_postcode():
raw_postcode = 'AA9A 9AA'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is True
assert postcode.to_dict() == {'postcode': 'AA9A 9AA',
'is_valid': True,
'attributes': {'area': 'AA', 'district': '9A', 'sector': '9', 'unit': 'AA'},
'sides': {'outward': 'AA9A', 'inward': '9AA'},
'errors': {}}
def test_postcodes_uk_to_dict_with_invalid_area():
raw_postcode = '9A 9AA'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': '', 'district': '9A', 'sector': '9', 'unit': 'AA'},
'errors': {'area': 'Invalid area format.'},
'is_valid': False,
'postcode': '9A 9AA',
'sides': {'inward': '9AA', 'outward': '9A'}}
def test_postcodes_uk_to_dict_with_invalid_district():
raw_postcode = 'AA 9AA'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': 'AA', 'district': None, 'sector': '9', 'unit': 'AA'},
'errors': {'district': 'Invalid district format.'},
'is_valid': False,
'postcode': 'AA 9AA',
'sides': {'inward': '9AA', 'outward': 'AA'}}
def test_postcodes_uk_to_dict_with_invalid_sector():
raw_postcode = 'AA9A AA'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': 'AA', 'district': '9A', 'sector': '', 'unit': 'AA'},
'errors': {'sector': 'Invalid sector format.'},
'is_valid': False,
'postcode': 'AA9A AA',
'sides': {'inward': 'AA', 'outward': 'AA9A'}}
def test_postcodes_uk_to_dict_with_invalid_unit():
raw_postcode = 'AA9A 9'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': 'AA', 'district': '9A', 'sector': '9', 'unit': ''},
'errors': {'unit': 'Invalid unit format.'},
'is_valid': False,
'postcode': 'AA9A 9',
'sides': {'inward': '9', 'outward': 'AA9A'}}
def test_postcodes_uk_to_dict_with_invalid_combined_area_and_unit():
raw_postcode = '9A 9'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': '', 'district': '9A', 'sector': '9', 'unit': ''},
'errors': {'area': 'Invalid area format.', 'unit': 'Invalid unit format.'},
'is_valid': False,
'postcode': '9A 9',
'sides': {'inward': '9', 'outward': '9A'}}
def test_postcodes_uk_to_dict_with_invalid_combined_district_and_sector():
raw_postcode = 'AA AA'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': 'AA', 'district': None, 'sector': '', 'unit': 'AA'},
'errors': {'district': 'Invalid district format.',
'sector': 'Invalid sector format.'},
'is_valid': False,
'postcode': 'AA AA',
'sides': {'inward': 'AA', 'outward': 'AA'}}
def test_postcodes_uk_to_dict_with_missing_space_letter():
raw_postcode = 'AAAA'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': 'AAAA', 'district': None, 'sector': None, 'unit': None},
'errors': {'area': 'Invalid area format.',
'district': 'Invalid district format.',
'missing_space': 'Missing space in the postcode',
'sector': 'Invalid sector format.',
'unit': 'Invalid unit format.'},
'is_valid': False,
'postcode': 'AAAA',
'sides': {'inward': None, 'outward': 'AAAA'}}
def test_postcodes_uk_to_dict_with_missing_space_digit():
raw_postcode = '9'
postcode = PostCodeUK(raw_postcode)
assert postcode.is_valid is False
assert postcode.to_dict() == {'attributes': {'area': '', 'district': '9', 'sector': None, 'unit': None},
'errors': {'area': 'Invalid area format.',
'missing_space': 'Missing space in the postcode',
'sector': 'Invalid sector format.',
'unit': 'Invalid unit format.'},
'is_valid': False,
'postcode': '9',
'sides': {'inward': None, 'outward': '9'}}
|
<filename>main.py
import cv2
import time
import argparse
from detectors.cnn_dlib import CNNFaceDetector
from detectors.hog_dlib import HoGFaceDetector
from detectors.haarcascade import FaceDetectorHaar
from detectors.ssd_face_detector import FaceDetector
import line_profiler
import atexit
profile = line_profiler.LineProfiler()
atexit.register(profile.print_stats)
def cmd_line_parser():
parser = argparse.ArgumentParser(description='Face Detection')
parser.add_argument('-m', '--method',
help='Algorithms',
choices=['haar',
'hog',
'cnn',
'ssd'],
type = str,
default = None
)
parser.add_argument('-i', '--input',
help='Input',
type=str,
default = 'webcam')
parser.add_argument('-t', '--type',
help = 'Input Type',
choices = ['image',
'video'],
type = str,
default = 'video')
return parser.parse_args()
@profile
def main():
args = cmd_line_parser()
# Face detection in video
if args.type == 'video':
if args.input is 'webcam':
video = cv2.VideoCapture(0)
width = int(video.get(3))
height = int(video.get(4))
codec = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter('output.avi',codec, 20.0, (width, height))
else:
video = cv2.VideoCapture(args.input)
width = int(video.get(3))
height = int(video.get(4))
codec = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter('output_cnn.avi',codec, 25.0, (width, height))
if args.method == 'haar':
face_detector = FaceDetectorHaar(frame=frame)
elif args.method == 'hog':
face_detector = HoGFaceDetector(frame=frame)
elif args.method == 'cnn':
face_detector = CNNFaceDetector(frame=frame)
elif args.method == 'ssd':
face_detector = FaceDetector()
else:
print('Face detection scheme not selected...')
return
count = 0
timestep0 = time.time()
timestep1 = timestep0
while(video.isOpened()):
timestep1 = time.time()
ret, frame = video.read()
if ret == True:
count += 1
detected_frame = face_detector.detection(frame)
cv2.imshow('Face Detection - Video', detected_frame)
writer.write(detected_frame)
if timestep1 >= timestep0 + 1:
print('FPS: ', count)
timestep0 = time.time()
count = 0
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
video.release()
writer.release()
cv2.destroyAllWindows()
# Face detection in image
elif args.type == 'image':
frame = cv2.imread(args.input)
if args.method == 'haar':
face_detector = FaceDetectorHaar(frame=frame)
elif args.method == 'hog':
face_detector = HoGFaceDetector(frame=frame)
elif args.method == 'cnn':
face_detector = CNNFaceDetector(frame=frame)
elif args.method == 'ssd':
face_detector = FaceDetector(frame=frame)
else:
print('Face detection scheme not selected...')
return
detected_frame = face_detector.detection()
cv2.imwrite("img_115_hog.jpg", detected_frame)
cv2.imshow('Face Detection - Image', detected_frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print('Please select a proper input type. For help use -h or --help')
if __name__ == '__main__':
main()
|
from scipy.interpolate import interp1d
import numpy as np
def series_interp(data = []):
y = [k for k in data if k >= 0]
x = [i for i in range(len(data)) if data[i] >= 0 ]
if 0 not in x:
y = [0] + y
x = [0] + x
f = interp1d(x,y,kind='quadratic')
return [data[i] if data[i] >= 0 else f(i) for i in range(len(data))]
def window_avg(data = [], window = 7):
avg = []
for i in range(len(data)):
if i < window:
avg.append(np.mean(data[:i+1]))
else:
avg.append(np.mean(data[i-window+1:i+1]))
return avg
def run_avg(x, wind=1):
avg = []
for i in range(len(x)):
if i >= np.floor(wind/2) and i < np.ceil(len(x) - wind/2):
avg.append(np.mean(x[int(i+np.floor(-wind/2+1)):int(i+np.floor(wind/2+1))]))
elif i < wind/2:
avg.append(np.mean(x[:int(i+wind/2+1)]))
else:
avg.append(np.mean(x[int(i-wind/2+1):]))
return avg
def make_fig_dates():
f = plt.figure(figsize=(10,10))
ax = f.add_subplot(111)
formatter = mdates.DateFormatter("%Y-%m-%d")
locator = mdates.MonthLocator()
locator_days = mdates.DayLocator()
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(locator)
ax.set_xlabel("Date",fontsize=20)
ax.xaxis.set_ticks(tick_dates)
ax.xaxis.set_tick_params(rotation=45)
ax.set_ylabel("Percent of Population (%)",fontsize=20)
ax.tick_params(labelsize=15, direction='out', length=6, width=2)
#ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useMathText=True)
ax.yaxis.offsetText.set_fontsize(20)
ax.set_prop_cycle(color=[
'#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a',
'#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94',
'#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d',
'#17becf', '#9edae5'])
return f,ax
def make_fig(f=None,ax=None):
if f == None and ax == None:
f = plt.figure(figsize=(10,10))
ax = f.add_subplot(111)
ax.set_xlabel("Frequency",fontsize=18)
ax.set_ylabel("Percent of Population",fontsize=18)
def func(x,pos):
if str(np.around(x,2)) ==str(x):
return x
return np.format_float_scientific(x,precision=2)
ax.tick_params(labelsize=16, direction='out', length=6, width=2)
# ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useMathText=True)
ax.yaxis.offsetText.set_fontsize(14)
ax.set_prop_cycle(color=[
'#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a',
'#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94',
'#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d',
'#17becf', '#9edae5'])
return f,ax
def human_format(num):
num = float('{:.3g}'.format(num))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude])
|
<reponame>soleneulmer/FADE
import warnings
import time
import datetime
import pandas as pd
from . import format_dataset as fd
from pprint import pprint
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from scipy.stats import spearmanr, pearsonr
from sklearn.feature_selection import f_regression, mutual_info_regression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import learning_curve, validation_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from scipy.stats import binned_statistic
from scipy.stats import multivariate_normal as mvn
import lime
import lime.lime_tabular
from astropy.units import earthMass, earthRad, jupiterMass, jupiterRad
import os
import sys
__all__ = [
'load_dataset', 'load_dataset_errors', 'load_dataset_RV',
'random_forest_regression', 'computing_errorbars',
'predict_radius', 'plot_dataset', 'plot_true_predicted',
'plot_learning_curve', 'plot_validation_curves',
'plot_LIME_predictions'
]
here = os.path.abspath(os.path.dirname(__file__))
published_dir = os.path.join(here, '..', 'published_output')
if os.path.exists(os.path.join(published_dir, 'r2_0.84_2019-07-23_17:05.pkl')):
pass
else:
published_dir = os.path.join(sys.prefix, 'published_output')
saved_pickle_model = os.path.join(published_dir, 'r2_0.84_2019-07-23_17:05.pkl')
def load_dataset(cat_exoplanet='exoplanet.eu_catalog_15April.csv',
cat_solar='solar_system_planets_catalog.csv',
feature_names=['mass', 'semi_major_axis',
'eccentricity', 'star_metallicity',
'star_radius', 'star_teff',
'star_mass', 'radius'],
solar=True):
"""Select exoplanet in the catalog
which have mass and radius measurements
as well as stellar parameters
This dataset will be used to train and test the Random Forest
INPUTS: catalog_exoplanet = CSV file from exoplanet.eu
catalog_solar = CSV file from Planetary sheet
feature_names = list of features to select in the dataset
OUPUTS: dataset_exo = pandas struct with exoplanets
with mass & radius measurements
the mass/radius are in Earth massses/radii"""
print('\nLoading exoplanet dataset and solar system planets:')
# Importing exoplanet dataset
cat_exoplanet = os.path.join(published_dir, cat_exoplanet)
dataset_exo = pd.read_csv(cat_exoplanet, index_col=0)
# Importing Solar system dataset
# Masses and Radii already in Earth metrics
cat_solar = os.path.join(published_dir, cat_solar)
dataset_solar_system = pd.read_csv(cat_solar, index_col=0)
# Choosing features/data
if not feature_names:
print('No features selected, loading all features')
pass
else:
dataset_exo = dataset_exo[feature_names]
dataset_solar_system = dataset_solar_system[feature_names]
# Removes the planets with NaN values
dataset_exo = dataset_exo.dropna(axis=0, how='any')
# Converting from Jupiter to Earth masses/radii - exoplanets
print('Converting planet\'s mass/radius in Earth masses/radii')
dataset_exo = fd.jupiter_to_earth_mass(dataset_exo, 'mass')
dataset_exo = fd.jupiter_to_earth_radius(dataset_exo, 'radius')
# Changing the mass of Kepler-10 c
print('\nKepler 10 c changing mass')
print(dataset_exo.loc['Kepler-10 c'].mass)
dataset_exo.loc['Kepler-10 c'].mass = 17.2
print(dataset_exo.loc['Kepler-10 c'].mass, '\n')
# Add the Solar system planets with the Exoplanets
if solar:
dataset = dataset_exo.append(dataset_solar_system)
else:
dataset = dataset_exo
# Removes the planets with NaN values
dataset = dataset.dropna(axis=0, how='any')
# Add observables
print('Computing planet\'s equilibrium temperature')
dataset = fd.add_temp_eq_dataset(dataset)
print('Computing stellar luminosity')
dataset = fd.add_star_luminosity_dataset(dataset)
# Number of planets in dataset
print('\nNumber of planets: ', len(dataset))
print('\n', dataset.head())
# Returning the dataset with selected features
select_features = ['mass',
'semi_major_axis',
'temp_eq',
'star_luminosity',
'star_radius', 'star_teff',
'star_mass',
'radius']
print('Selecting features:')
pprint(select_features)
dataset = dataset[select_features]
return dataset
def load_dataset_errors(cat_exoplanet='exoplanet.eu_catalog_15April.csv',
cat_solar='solar_system_planets_catalog.csv',
solar=True):
"""Select exoplanet in the catalog
which have uncertainty measurements
as well as stellar parameters
If there is no uncertainty measurement, the uncertainty is set to
the 0.9 quantile of the distribution of uncertainties
This dataset will be used to compute error bars for the test set
INPUTS: catalog_exoplanet = CSV file from exoplanet.eu
catalog_solar = CSV file from Planetary sheet
features = list of features to select in the dataset
OUPUTS: dataset_exo = pandas struct with exoplanets
with mass & radius measurements
the mass/radius are in Earth massses/radii"""
print('\nLoading exoplanet dataset and solar system planets:')
# Importing exoplanet dataset
cat_exoplanet = os.path.join(published_dir, cat_exoplanet)
dataset_exo = pd.read_csv(cat_exoplanet, index_col=0)
dataset_exo = dataset_exo[['mass', 'mass_error_min', 'mass_error_max',
'radius',
'radius_error_min', 'radius_error_max',
'semi_major_axis', 'semi_major_axis_error_min',
'semi_major_axis_error_max',
'eccentricity', 'eccentricity_error_min',
'eccentricity_error_max',
'star_mass',
'star_mass_error_min', 'star_mass_error_max',
'star_radius', 'star_radius_error_min',
'star_radius_error_max',
'star_teff',
'star_teff_error_min', 'star_teff_error_max']]
# Importing Solar system dataset
cat_solar = os.path.join(published_dir, cat_solar)
dataset_solar_system = pd.read_csv(cat_solar, index_col=0)
dataset_solar_system = dataset_solar_system[['mass', 'mass_error',
'semi_major_axis',
'semi_major_axis_error',
'eccentricity',
'eccentricity_error',
'star_mass',
'star_mass_error',
'star_radius',
'star_radius_error',
'star_teff',
'star_teff_error',
'radius', 'radius_error']]
# Remove NaNs in features only
dataset_exo = dataset_exo.dropna(subset=['mass', 'semi_major_axis',
'star_radius', 'star_mass',
'star_teff', 'radius'])
dataset_solar_system = dataset_solar_system.dropna(subset=['mass',
'semi_major_axis',
'star_radius',
'star_mass',
'star_teff',
'radius'])
# Replace inf by NaN
dataset_exo = dataset_exo.replace([np.inf, -np.inf], np.nan)
# Replace NaN values in the error features by the 0.9 quantile value
error_columns = ['mass_error_min', 'mass_error_max',
'radius_error_min', 'radius_error_max',
'semi_major_axis_error_min', 'semi_major_axis_error_max',
'eccentricity_error_min', 'eccentricity_error_max',
'star_mass_error_min', 'star_mass_error_max',
'star_radius_error_min', 'star_radius_error_max',
'star_teff_error_min', 'star_teff_error_max']
for error_col in error_columns:
# find the 0.9 quantile value of the error columns
max_error = dataset_exo[error_col].quantile(0.9)
print(error_col, max_error)
# replace NaN by the 0.9 error value
dataset_exo[error_col] = dataset_exo[error_col].replace(np.nan,
max_error)
# Converting from Jupiter to Earth masses/radii - exoplanets
print('Converting planet\'s mass/radius in Earth masses/radii')
dataset_exo = fd.jupiter_to_earth_mass(dataset_exo, 'mass')
dataset_exo = fd.jupiter_to_earth_mass(dataset_exo, 'mass_error_max')
dataset_exo = fd.jupiter_to_earth_mass(dataset_exo, 'mass_error_min')
dataset_exo = fd.jupiter_to_earth_radius(dataset_exo, 'radius')
dataset_exo = fd.jupiter_to_earth_radius(dataset_exo, 'radius_error_max')
dataset_exo = fd.jupiter_to_earth_radius(dataset_exo, 'radius_error_min')
# Computes the average error column
dataset_exo['mass_error'] = dataset_exo[['mass_error_min', 'mass_error_max']].mean(axis=1).abs()
dataset_exo['radius_error'] = dataset_exo[['radius_error_min', 'radius_error_max']].mean(axis=1).abs()
dataset_exo['semi_major_axis_error'] = dataset_exo[['semi_major_axis_error_min', 'semi_major_axis_error_max']].mean(axis=1).abs()
dataset_exo['eccentricity_error'] = dataset_exo[['eccentricity_error_min', 'eccentricity_error_max']].mean(axis=1).abs()
dataset_exo['star_mass_error'] = dataset_exo[['star_mass_error_min', 'star_mass_error_max']].mean(axis=1).abs()
dataset_exo['star_radius_error'] = dataset_exo[['star_radius_error_min', 'star_radius_error_max']].mean(axis=1).abs()
dataset_exo['star_teff_error'] = dataset_exo[['star_teff_error_min', 'star_teff_error_max']].mean(axis=1).abs()
dataset_exo = dataset_exo[['mass', 'mass_error',
'semi_major_axis',
'semi_major_axis_error',
'eccentricity',
'eccentricity_error',
'star_mass',
'star_mass_error',
'star_radius',
'star_radius_error',
'star_teff',
'star_teff_error',
'radius', 'radius_error']]
# Changing the mass of Kepler-10 c
print('\nKepler 10 c changing mass and mass error')
print(dataset_exo.loc['Kepler-10 c'].mass)
print(dataset_exo.loc['Kepler-10 c'].mass_error)
dataset_exo.loc['Kepler-10 c'].mass = 17.2
dataset_exo.loc['Kepler-10 c'].mass_error = 1.9
print(dataset_exo.loc['Kepler-10 c'].mass)
print(dataset_exo.loc['Kepler-10 c'].mass_error, '\n')
# Add the Solar system planets with the Exoplanets
if solar:
dataset = dataset_exo.append(dataset_solar_system)
else:
dataset = dataset_exo
# Add observables
print('Computing planet\'s equilibrium temperature')
dataset = fd.add_temp_eq_error_dataset(dataset)
print('Computing stellar luminosity')
dataset = fd.add_star_luminosity_error_dataset(dataset)
# Number of planets in dataset
print('\nNumber of planets: ', len(dataset))
# Select the same features as the original dataset
dataset = dataset[['mass', 'mass_error',
'star_luminosity',
'star_luminosity_error',
'temp_eq', 'temp_eq_error',
'semi_major_axis',
'semi_major_axis_error',
'star_mass', 'star_mass_error',
'star_radius', 'star_radius_error',
'star_teff', 'star_teff_error',
'radius', 'radius_error']]
print('The selected features can be change in [load_dataset_errors]')
print('\n', dataset.head())
return dataset
def load_dataset_RV(catalog_exoplanet='exoplanet.eu_catalog_15April.csv',
feature_names=['mass', 'mass_error_min', 'mass_error_max',
'semi_major_axis',
'eccentricity',
'star_metallicity',
'star_radius',
'star_teff', 'star_mass']):
"""Select exoplanet in the catalog which are detected by RV
and do not have mass measurement.
This dataset will be used to later predict their masses
INPUTS: catalog_exoplanet = CSV file from exoplanet.eu
features = list of features to select in the dataset
OUPUTS: dataset_radial = pandas struct with exoplanets
detected by RV without radius measurements
the mass is in Earth massses"""
print('\nLoading exoplanet dataset found with RVs:')
catalog_exoplanet = os.path.join(published_dir, catalog_exoplanet)
dataset = pd.read_csv(catalog_exoplanet, index_col=0)
# Select detected by RV
dataset_radial = dataset[dataset.detection_type == 'Radial Velocity']
# dataset_radial = dataset
# the radius column in Null (=NaN)
dataset_radial = dataset_radial[pd.isnull(dataset_radial['radius'])]
# Choosing features/data
if not feature_names:
print('No features selected, loading all features')
pass
else:
print('Selecting features:')
pprint(feature_names)
dataset_radial = dataset_radial[feature_names]
# Excluding exoplanets with missing data
dataset_radial = dataset_radial.dropna(subset=['mass', 'semi_major_axis',
'eccentricity',
'star_metallicity',
'star_radius', 'star_teff',
'star_mass'])
# Replace inf by NaN
dataset_radial = dataset_radial.replace([np.inf, -np.inf], np.nan)
# Replace NaN values in the error features by the 0.9 quantile value
error_columns = ['mass_error_min', 'mass_error_max']
for error_col in error_columns:
# find the 0.9 quantile value of the error columns
# max_error = dataset_radial[error_col].quantile(0.9)
max_error = 0.0
print(error_col, max_error)
# replace NaN by the 0.9 error value
dataset_radial[error_col] = dataset_radial[error_col].replace(np.nan,
max_error)
# Converting from Jupiter to Earth masses/radii - exoplanets
print('Converting planet\'s mass/radius in Earth masses/radii')
dataset_radial = fd.jupiter_to_earth_mass(dataset_radial, 'mass')
dataset_radial = fd.jupiter_to_earth_mass(dataset_radial, 'mass_error_max')
dataset_radial = fd.jupiter_to_earth_mass(dataset_radial, 'mass_error_min')
# Computes the average error column
dataset_radial['mass_error'] = dataset_radial[['mass_error_min',
'mass_error_max']].mean(axis=1).abs()
# Adding observables
print('Computing planet\'s equilibrium temperature')
dataset_radial = fd.add_temp_eq_dataset(dataset_radial)
print('Computing stellar luminosity')
dataset_radial = fd.add_star_luminosity_dataset(dataset_radial)
print('\nNumber of planets: ', len(dataset_radial))
# Remove the mass error column for Random forest
dataset_radial = dataset_radial[['mass',
'semi_major_axis',
'temp_eq',
'star_luminosity',
'star_radius', 'star_teff',
'star_mass']]
return dataset_radial
def random_forest_regression(dataset,
model=saved_pickle_model,
fit=False):
"""Split the dataset into a training (75%) and testing set (25%)
Removing 3 outliers planets from both sets
If fit is True:
Fitting the hyperparameters of the random forest regression
otherwise loading a already fitted model
INPUTS: dataset = pandas dataframe with all the exoplanets
and their planetary and stellar parameters as features
model = random forest model with best fit hyperparameters
fit = boolean, to do the fitting (True) or not (False)
OUPUTS: regr = the random forest regression model
y_test_predict = radius predictions of the test set
train_test_values = arrays with the values of the train and test sets
train_test_sets = pandas dataframes with exoplanets and features names
as well as the values"""
# Preparing the training and test sets
# ------------------------------------
# Exoplanet and Solar system dataset
dataset_exo = dataset[:501]
dataset_sol = dataset[501:]
# Separating the data into dependent and independent variables
features = dataset_exo.iloc[:, :-1] # mass, teq, etc
labels = dataset_exo.iloc[:, -1] # radius
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(features,
labels,
test_size=0.25,
random_state=0)
features_sol = dataset_sol.iloc[:, :-1]
labels_sol = dataset_sol.iloc[:, -1]
X_train_sol, X_test_sol, y_train_sol, y_test_sol = train_test_split(features_sol,
labels_sol,
test_size=0.25,
random_state=0)
X_train = X_train.append(X_train_sol)
y_train = y_train.append(y_train_sol)
X_test = X_test.append(X_test_sol)
y_test = y_test.append(y_test_sol)
# Outliers in the sample
# Remove HATS-12 b from the training set
X_test = X_test.drop(['HATS-12 b'])
y_test = y_test.drop(labels=['HATS-12 b'])
print('\nHATS-12 b removes from test set\n')
# Remove K2-95 b from the training set
X_train = X_train.drop(['K2-95 b'])
y_train = y_train.drop(labels=['K2-95 b'])
print('\nK2-95 b removes from training set\n')
# Remove Kepler-11 g from the training set
X_train = X_train.drop(['Kepler-11 g'])
y_train = y_train.drop(labels=['Kepler-11 g'])
print('\nKepler-11 g removes from training set\n')
train_test_values = [X_train.values, X_test.values,
y_train.values, y_test.values]
train_test_sets = [X_train, X_test, y_train, y_test]
# Fitting the hyperparameters of the random forest model
# with the grid search method
# ------------------------------------------------------
if fit:
# Setting up the grid of hyperparameters
rf = GridSearchCV(RandomForestRegressor(),
param_grid={'n_estimators': np.arange(80, 200),
'max_depth': np.arange(4, 10),
'max_features': np.arange(3, 6),
'min_samples_split': np.arange(4, 5)},
cv=3, verbose=1, n_jobs=-1)
# Fitting training set - finding best hyperparameters
rf.fit(X_train, y_train)
# Best hyperparameters found by the grid search
print(rf.best_params_)
# Random forest model with the best hyperparameters
regr = RandomForestRegressor(n_estimators=rf.best_params_['n_estimators'],
max_depth=rf.best_params_['max_depth'],
max_features=rf.best_params_['max_features'],
min_samples_split=rf.best_params_['min_samples_split'],
random_state=0, oob_score=True)
# Saving the random forest model in a file
outdir = 'bem_output'
if not os.path.exists(outdir):
os.mkdir(outdir)
name_Rf = 'r2_' + str(round(rf.best_score_, 2)) + '_' + str(datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")) + '.pkl'
name_Rf = os.path.join(outdir, name_Rf)
joblib.dump(regr, name_Rf)
print('RF model save in : ', name_Rf)
else:
# Loading the random forest model saved
print('Loading random forest model: ', model)
regr = joblib.load(model)
# Fit the best random forest model to the training set
# ----------------------------------------------------
regr.fit(X_train, y_train)
# Predict the radius for the training and testing sets
y_train_predict = regr.predict(X_train)
y_test_predict = regr.predict(X_test)
# Scores of the random forest
test_score = r2_score(y_test, y_test_predict)
pearson = pearsonr(y_test, y_test_predict)
print(f'Test set, R-2 score: {test_score:>5.3}')
print(f'\nTest set, Pearson correlation: {pearson[0]:.3}')
# Mean squared errors of the train and test set
print('Root mean squared errors')
print('Train set: ', np.sqrt(np.mean((y_train-y_train_predict)**2)),
'\nTest set: ', np.sqrt(np.mean((y_test-y_test_predict)**2)))
# Feature importance
name_features = dataset.columns.tolist()
print('\nFeature importance')
_ = [print(name, ': \t', value)
for name, value
in zip(name_features, regr.feature_importances_)]
return regr, y_test_predict, train_test_values, train_test_sets
def computing_errorbars(regr, dataset_errors, train_test_sets):
"""INPUTS: regr = random forest regression model
dataset_errors = pandas dataframe with each feature
and their associated uncertainties
train_test_sets = pandas dataframes with exoplanets
and features names
as well as the values
OUTPUTS: radii_test_output_error = error on the predicted
radius for the Test set
radii_test_input_error = original uncertainty
on the radius measurements"""
# Original train and test sets
X_train, X_test, y_train, y_test = train_test_sets
# Cross matching the Test set with the dataset with errors
# to compute error bars for the exoplanets which have input errors
dataset_errors = dataset_errors.loc[X_test.index.values.tolist()]
# Remove an exoplanet in case there is still a NaN
# in one of the feature
dataset_errors = dataset_errors.dropna(axis=0, how='any')
# Matrix with all the errors on the different features
features_errors = dataset_errors.iloc[:, :-2].values
# Radius vector
radii_test = dataset_errors.iloc[:, -2].values
# Error on the radius vector
radii_test_input_error = dataset_errors.iloc[:, -1].values
# Empty vector to store the error bars
radii_test_output_error = np.zeros_like(radii_test_input_error)
for i in range(radii_test.size):
# print(i)
# from each line in X_train generate new values for all parameters
# with a multivariate gaussian which has
# a vector of mean with the value columns and std with the error column
# mean_values_0 = features_errors[i,0:-1:2]
# >> takes the features : [mass0, temp_eq0, ...]
# std_errors_0 = features_errors[0,1:-1:2]
# >> takes the errors : [mass_err0, temp_eq_err0, ...]
rerr = regr.predict(mvn(features_errors[i, ::2],
np.diag(features_errors[i, 1::2]),
allow_singular=True).rvs(1000)).std()
radii_test_output_error[i] = rerr
# print(radii_test_output_error[i], radii_test_input_error[i])
# Save the errorbars in a txt file
outdir = 'bem_output'
if not os.path.exists(outdir):
os.mkdir(outdir)
filename = 'bem_output/test_radius_RF_errorbars.dat'
print('Error bars of the test set are saved in: ', filename)
np.savetxt(filename, radii_test_output_error)
return radii_test_output_error, radii_test_input_error
def predict_radius(my_planet=np.array([[1, 1, 0, 1, 5777, 1]]),
my_name=np.array(['My planet b']),
regr=None,
jupiter_mass=False,
error_bar=False):
"""Predict radius of a planet
given the planetary mass, semi major axis, eccentricity,
stellar radius, star effective temperature, and stellar mass
INPUTS: my_planet = array with a shape (1,6)
np.array([[planetary mass,
semi major axis,
eccentricity,
star_radius,
star_teff,
star_mass]])
my_name = array with a shape (1,)
np.array(['my planet b'])
regr = random forest regression model
jupiter_mass = bool, True is the planet's mass is given in Jupiter mass
error_bar = bool, True if an error is provided for each parameter
such as
my_planet = np.array([[planetary mass, planetary mass error,
semi major axis, semi major axis error,
eccentricity, eccentricity error,
star_radius, star_radius error,
star_teff, star_teff error,
star_mass, star_mass error]])
OUTPUTS: radius = planet's radius predicting with the RF model
my_pred_planet = pandas dataframe with the input features
used by the random forest model
Can be used as input in plot_LIME_predictions()
The features are now:
'mass', 'semi_major_axis',
'temp_eq', 'star_luminosity',
'star_radius', 'star_teff',
'star_mass'"""
if regr is None:
# Loading the random forest model saved
print('Loading random forest model: ', saved_pickle_model)
regr = joblib.load(saved_pickle_model)
else:
pass
if error_bar:
print('\nPredicting radius for planet:\n')
my_planet = pd.DataFrame(data=my_planet,
index=my_name,
columns=np.array(['mass', 'mass_error',
'semi_major_axis', 'semi_major_axis_error',
'eccentricity', 'eccentricity_error',
'star_radius', 'star_radius_error',
'star_teff', 'star_teff_error',
'star_mass', 'star_mass_error']))
# Changing mass units to Earth mass
if jupiter_mass:
my_planet = fd.jupiter_to_earth_mass(my_planet, 'mass')
my_planet = fd.jupiter_to_earth_mass(my_planet, 'mass_error')
else:
print('Planetary mass is given in Earth mass')
# Computing equilibrium temperature
my_planet = fd.add_temp_eq_error_dataset(my_planet)
# Computing stellar luminosity
my_planet = fd.add_star_luminosity_error_dataset(my_planet)
# Planet with error bars
print('Planet with error bars\n', my_planet.iloc[0])
# Radius error prediction
my_pred_planet = my_planet[['mass', 'mass_error',
'semi_major_axis', 'semi_major_axis_error',
'temp_eq', 'temp_eq_error',
'star_luminosity', 'star_luminosity_error',
'star_radius', 'star_radius_error',
'star_teff', 'star_teff_error',
'star_mass', 'star_mass_error']]
# Feature / feature error
features_with_errors = my_pred_planet.iloc[0].values.reshape(1, -1)
radius_error = regr.predict(mvn(features_with_errors[0, ::2],
np.diag(features_with_errors[0, 1::2]),
allow_singular=True).rvs(1000)).std()
# Radius prediction
my_pred_planet = my_planet[['mass', 'semi_major_axis',
'temp_eq', 'star_luminosity',
'star_radius', 'star_teff',
'star_mass']]
radius = regr.predict(my_pred_planet.iloc[0].values.reshape(1, -1))
# Print
print('Predicted radius (Rearth): ', radius, '+-', radius_error)
return [radius, radius_error], my_pred_planet
else:
print('\nPredicting radius for planet:\n')
my_planet = pd.DataFrame(data=my_planet,
index=my_name,
columns=np.array(['mass', 'semi_major_axis',
'eccentricity',
'star_radius',
'star_teff', 'star_mass']))
# Changing mass units to Earth mass
if jupiter_mass:
my_planet = fd.jupiter_to_earth_mass(my_planet, 'mass')
else:
print('Planetary mass is given in Earth mass')
# Computing equilibrium temperature
my_planet = fd.add_temp_eq_dataset(my_planet)
# Computing stellar luminosity
my_planet = fd.add_star_luminosity_dataset(my_planet)
# Select features
my_pred_planet = my_planet[['mass', 'semi_major_axis',
'temp_eq', 'star_luminosity',
'star_radius', 'star_teff',
'star_mass']]
# Radius prediction
print(my_pred_planet.iloc[0])
radius = regr.predict(my_pred_planet.iloc[0].values.reshape(1, -1))
print('Predicted radius (Rearth): ', radius)
return radius, my_pred_planet
def plot_dataset(dataset, predicted_radii=[], rv=False):
if not rv:
# Remove outlier planets
dataset = dataset.drop(['Kepler-11 g'])
dataset = dataset.drop(['K2-95 b'])
dataset = dataset.drop(['HATS-12 b'])
# Plot the original dataset
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
size = dataset.temp_eq
plt.scatter(dataset.mass, dataset.radius, c=size,
cmap=cm.magma_r, s=4, label='Verification sample')
plt.colorbar(label=r'Equilibrium temperature (K)')
plt.xlabel(r'Mass ($M_\oplus$)')
plt.ylabel(r'Radius ($R_\oplus$)')
plt.legend(loc='lower right', markerscale=0,
handletextpad=0.0, handlelength=0)
if rv:
# Plot the radial velocity dataset
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
size = dataset.temp_eq
plt.scatter(dataset.mass, predicted_radii, c=size,
cmap=cm.magma_r, s=4, label='RV sample')
plt.colorbar(label=r'Equilibrium temperature (K)')
plt.xlabel(r'Mass ($M_\oplus$)')
plt.ylabel(r'Radius ($R_\oplus$)')
plt.legend(loc='lower right', markerscale=0,
handletextpad=0.0, handlelength=0)
return None
def plot_true_predicted(train_test_sets, radii_test_RF,
radii_test_output_error):
"""Plot the residuals on the test set
between True radius and Random forest"""
X_train, X_test, y_train, y_test = train_test_sets
plt.figure()
plt.errorbar(radii_test_RF, y_test.values,
xerr=radii_test_output_error,
fmt='.', c='C1', elinewidth=0.5,
label='Random forest')
# 1:1 line and labels
plt.plot(np.sort(y_test.values), np.sort(y_test.values), 'k-', lw=0.25)
plt.ylabel(r'True radius ($R_\oplus$)')
plt.ylabel(r'Predicted radius ($R_\oplus$)')
plt.legend(loc='lower right')
return None
def plot_learning_curve(regr, dataset, save=False, fit=False):
"""INPUTS: regr = random forest regression model
dataset = pandas dataframe with features and labels
save = bool, writes (True) or not (False) the scores
fit = bool, computes the score if True
OUTPUTS: Written files
Cross validation with 100 iterations
to get smoother mean test and train score curves,
each time with 20% data randomly selected as a validation set."""
features = dataset.iloc[:, :-1].values # mass, teq, etc
labels = dataset.iloc[:, -1].values # radius
outdir = 'bem_output'
if not os.path.exists(outdir):
os.mkdir(outdir)
if fit:
cv = ShuffleSplit(n_splits=100, test_size=0.1, random_state=11)
train_sizes, train_scores, test_scores = learning_curve(regr,
features,
labels,
cv=cv,
n_jobs=-1,
train_sizes=np.linspace(.1,
1.0,
10),
verbose=1)
else:
train_sizes = np.loadtxt(os.path.join(published_dir, 'lc_train_sizes.dat'))
train_scores = np.loadtxt(os.path.join(published_dir, 'lc_train_scores.dat'))
test_scores = np.loadtxt(os.path.join(published_dir, 'lc_test_scores.dat'))
plt.figure()
plt.xlabel("Training examples")
plt.ylabel("Score")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="lower right")
if save:
np.savetxt(os.path.join(outdir, 'lc_train_sizes.dat'), train_sizes)
np.savetxt(os.path.join(outdir, 'lc_train_scores.dat'), train_scores)
np.savetxt(os.path.join(outdir, 'lc_test_scores.dat'), test_scores)
return plt
def plot_validation_curves(regr, dataset, name='features',
save=False, fit=False):
"""INPUTS: regr = random forest regression model
dataset = pandas dataframe with features and labels
name = str, can be 'features', 'tree', 'depth'
save = bool, writes (True) or not (False) the scores
fit = bool, computes the score if True
OUTPUTS: Written files"""
features = dataset.iloc[:, :-1].values # mass, teq, etc
labels = dataset.iloc[:, -1].values # radius
outdir = 'bem_output'
if not os.path.exists(outdir):
os.mkdir(outdir)
if name == 'features':
param_range = np.arange(features.shape[1])+1
param_name = 'max_features'
elif name == 'tree':
param_range = np.array([10, 20, 35, 50, 100, 1000, 5000, 10000])
param_name = 'n_estimators'
elif name == 'depth':
param_range = np.array([1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 50, 100, 1000])
param_name = 'max_depth'
else:
print('Error the parameter of the validation curve is incorrect')
print('Names can be features, tree, depth')
return None
# Need to catch the user warning:
# Some inputs do not have OOB scores.
# This probably means too few trees were used
# to compute any reliable oob estimates.
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
if fit:
train_scores, test_scores = validation_curve(regr, features, labels,
param_name=param_name,
param_range=param_range,
cv=3, scoring="r2",
n_jobs=-1, verbose=1)
else:
if name == 'features':
train_scores = np.loadtxt(os.path.join(published_dir, 'vc_features_train_scores.dat'))
test_scores = np.loadtxt(os.path.join(published_dir, 'vc_features_test_scores.dat'))
elif name == 'tree':
train_scores = np.loadtxt(os.path.join(published_dir, 'vc_tree_train_scores.dat'))
test_scores = np.loadtxt(os.path.join(published_dir, 'vc_tree_test_scores.dat'))
elif name == 'depth':
train_scores = np.loadtxt(os.path.join(published_dir, 'vc_depth_train_scores.dat'))
test_scores = np.loadtxt(os.path.join(published_dir, 'vc_depth_test_scores.dat'))
else:
pass
# Averaging
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.figure()
plt.title("Validation Curve with Random Forest regressor")
plt.xlabel(param_name)
plt.ylabel("Score")
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
if save:
if name == 'features':
np.savetxt(os.path.join(outdir, 'vc_features_train_scores.dat'), train_scores)
np.savetxt(os.path.join(outdir, 'vc_features_test_scores.dat'), test_scores)
elif name == 'tree':
np.savetxt(os.path.join(outdir, 'vc_tree_train_scores.dat'), train_scores)
np.savetxt(os.path.join(outdir, 'vc_tree_test_scores.dat'), test_scores)
elif name == 'depth':
np.savetxt(os.path.join(outdir, 'vc_depth_train_scores.dat'), train_scores)
np.savetxt(os.path.join(outdir, 'vc_depth_test_scores.dat'), test_scores)
else:
pass
return None
def plot_LIME_predictions(regr, dataset, train_test_sets,
planets=[],
my_pred_planet=pd.DataFrame(),
my_true_radius=None,
feature_name=['mass',
'semi_major_axis',
'temp_eq',
'star_luminosity',
'star_radius', 'star_teff',
'star_mass',
'radius']):
"""
Compute and plot the LIME explanation for one or several radius predictions
made by the random forest model
INPUTS: REGR = the random forest model
DATASET = the input dataset from which the RF is built
TRAIN_TEST_SET = the training and test sets
PLANETS = list of indexes of the planets in the Test set,
for which we want an LIME explanation
Contains maximum 6 numbers
or
MY_PRED_PLANET = pandas dataset with the input features
used by the random forest model
> mass, semi_major_axis, temp_eq, star_luminosity,
star_radius, star_teff, star_mass
The my_pred_planet output of predict_radius() can be used as
my_pred_planet input for this function
FEATURE_NAME = list of input features used by the random forest
OUTPUTS: EXP = LIME explainer, contains the LIME radius prediction
"""
# Data
X_train, X_test, y_train, y_test = train_test_sets
features = dataset.iloc[:, :-1].values # mass, teq, etc
labels = dataset.iloc[:, -1].values # radius
# Check if some features are non continuous
nb_unique_obj_in_features = np.array([len(set(features[:, x]))
for x in range(features.shape[1])])
# In our case the list of categorical features is empty
cat_features = np.argwhere(nb_unique_obj_in_features <= 10).flatten()
# LIME explainer
explainer = lime.lime_tabular.LimeTabularExplainer(X_train.values,
feature_names=feature_name,
class_names=['radius'],
categorical_features=cat_features,
verbose=True,
mode='regression')
# Select planets to explain with LIME
if not my_pred_planet.empty:
exp = explainer.explain_instance(my_pred_planet.values[0],
regr.predict, num_features=7)
if my_true_radius:
print('True radius: ', my_true_radius)
else:
print('True radius was not provided')
lime_radius = exp.local_pred
rf_radius = exp.predicted_value
# My plot of exp_as_pyplot()
exp = exp.as_list()
vals = [x[1] for x in exp]
names = [x[0].replace("<=", r'$\leq$').replace('_',' ').replace('.00','').replace("<", "$<$").replace(">", "$>$") for x in exp]
# print(names)
vals.reverse()
names.reverse()
colors = ['C2' if x > 0 else 'C3' for x in vals]
pos = np.arange(len(exp)) + .5
# Plotting
plt.figure()
plt.xlabel('Weight')
plt.ylabel('Feature')
plt.title(my_pred_planet.index[0], loc='right')
rects = plt.barh(pos, vals, align='center', color=colors, alpha=0.5)
for i, rect in enumerate(rects):
# if rf_radius > 12.0:
plt.text(plt.xlim()[0]+0.03, rect.get_y()+0.2, str(names[i]))
# Text box
if my_true_radius:
textstr = '\n'.join((
r'True radius=%.2f$R_\oplus$' % (my_true_radius, ),
r'RF radius=%.2f$R_\oplus$' % (rf_radius, ),
r'LIME radius=%.2f$R_\oplus$' % (lime_radius, )))
else:
textstr = '\n'.join((
r'RF radius=%.2f$R_\oplus$' % (rf_radius, ),
r'LIME radius=%.2f$R_\oplus$' % (lime_radius, )))
# place a text box in upper left in axes coords
plt.text(-4, 0.1, textstr,
bbox={'boxstyle': 'round', 'facecolor': 'white'})
return exp
elif not planets:
planets.append(np.where(X_test.index == 'TRAPPIST-1 g')[0][0])
planets.append(np.where(X_test.index == 'HATS-35 b')[0][0])
planets.append(np.where(X_test.index == 'CoRoT-13 b')[0][0])
planets.append(np.where(X_test.index == 'Kepler-75 b')[0][0])
planets.append(np.where(X_test.index == 'WASP-17 b')[0][0])
planets.append(np.where(X_test.index == 'Kepler-20 c')[0][0])
else:
pass
# Plotting
fig, axs = plt.subplots(3, 2, constrained_layout=True, figsize=(15, 7.2712643025))
axs = axs.flatten()
for j, planet in enumerate(planets):
print('\n', X_test.iloc[planet])
print('True radius: ', y_test[planet])
exp = explainer.explain_instance(X_test.values[planet],
regr.predict, num_features=7)
lime_radius = exp.local_pred
rf_radius = exp.predicted_value
# pprint(exp.as_list())
# My plot of exp_as_pyplot()
exp = exp.as_list()
vals = [x[1] for x in exp]
names = [x[0].replace("<=", r'$\leq$').replace('_',' ').replace('.00','').replace("<", "$<$").replace(">", "$>$") for x in exp]
# print(names)
vals.reverse()
names.reverse()
colors = ['C2' if x > 0 else 'C3' for x in vals]
pos = np.arange(len(exp)) + .5
# Plotting
axs[j].get_yaxis().set_visible(False)
axs[j].set_xlabel('Weight')
axs[j].set_ylabel('Feature')
axs[j].set_title(X_test.iloc[planet].name, loc='right')
rects = axs[j].barh(pos, vals, align='center', color=colors, alpha=0.5)
for i, rect in enumerate(rects):
# if rf_radius > 12.0:
axs[j].text(axs[j].get_xlim()[0]+0.03, rect.get_y()+0.2, str(names[i]))
# Text box
textstr = '\n'.join((
r'True radius=%.2f$R_\oplus$' % (y_test[planet], ),
r'RF radius=%.2f$R_\oplus$' % (rf_radius, ),
r'LIME radius=%.2f$R_\oplus$' % (lime_radius, )))
# place a text box in upper left in axes coords
axs[j].text(0.68, 0.1, textstr,
bbox={'boxstyle': 'round', 'facecolor': 'white'},
transform=axs[j].transAxes)
# Plot the Mass Radius Temp eq relation
# with LIME predicted planets in circles
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
size = X_test.temp_eq.values
plt.scatter(X_test.mass.values, y_test.values,
c=size, cmap=cm.magma_r)
plt.colorbar(label=r'Equilibrium temperature (K)')
plt.xlabel(r'Mass ($M_\oplus$)')
plt.ylabel(r'Radius ($R_\oplus$)')
plt.legend()
for planet in planets:
plt.plot(X_test.iloc[planet].mass,
y_test.values[planet], 'o',
mfc='none', ms=12,
label=X_test.iloc[planet].name)
plt.legend()
return exp
|
<gh_stars>1-10
import sys;
import re;
import os;
import ldap;
import time;
from common import *
#test configuration
source_host = 'gary-sles'
target_host = 'gary-testvm'
source_port = '11711'
target_port = '11711'
source_domain = 'bellevue'
target_domain = 'csp'
source_username = 'administrator'
target_username = 'administrator'
source_password = '<PASSWORD>'
target_password = '<PASSWORD>'
source_base = 'cn=ComponentManager,dc=bellevue'
target_base = 'cn=ComponentManager,dc=csp'
server_id = '2'
CHILDREN_NUM = 10
LAYER_NUM = 3
#test data
container_list = [('objectClass', 'vmIdentity-Container')]
user_list = [('objectClass', 'vmIdentity-User')]
def find_rdn(dn):
m = re.match(r'cn=(.+?),', dn)
return m.group(1)
def delete_dn(l, dn):
try:
l.delete_s(dn)
except ldap.NO_SUCH_OBJECT:
pass
def dfs_clean(l, dn):
try:
r = l.search_s(dn, ldap.SCOPE_BASE, 'cn=*')
except:
return
r = l.search_s(dn, ldap.SCOPE_ONELEVEL, 'cn=*')
for i in r:
dfs_clean(l, i[0])
delete_dn(l, dn)
def add_entry(l, entry):
modlist = []
modlist.append(('cn', find_rdn(entry[0])))
if(entry[1] == 'user'):
modlist.append(('vmIdentity-Account', find_rdn(entry[0])))
modlist.extend(user_list)
else:
modlist.extend(container_list)
l.add_s(entry[0], modlist)
def dfs_add(l, dn, layer):
if layer == 0:
add_entry(l, (dn, 'user'))
else:
add_entry(l, (dn, 'container'))
for i in xrange(CHILDREN_NUM):
newdn = None
if layer == 1:
newdn = 'cn=user%d,%s' % (i, dn)
else:
newdn = 'cn=team%d,%s' % (i, dn)
dfs_add(l, newdn, layer - 1)
def dfs_check(l, dn, layer):
try:
r = l.search_s(dn, ldap.SCOPE_ONELEVEL, 'cn=*')
expectednum = 0 if layer == 0 else CHILDREN_NUM
if(len(r) != expectednum):
return False
for i in r:
dfs_check(l, i[0], layer - 1)
return True
except:
return False
def init_source():
reset_service()
vdc_promo(source_domain)
def init_target():
os.system('ssh root@gary-testvm python /mnt/hgfs/workspaces/lotus/main/vmdir/tools/vdcmerge/test/test_config.py')
#main function
init_source()
#init_target()
source_uri = 'ldap://%s:%s' % (source_host, source_port)
target_uri = 'ldap://%s:%s' % (target_host, target_port)
source_dn = 'cn=%s,cn=users,dc=%s' % (source_username, source_domain)
target_dn = 'cn=%s,cn=users,dc=%s' % (target_username, target_domain)
#initialize and log in ldap servers
s = ldap.initialize(source_uri)
s.simple_bind_s(source_dn, source_password)
t = ldap.initialize(target_uri)
t.simple_bind_s(target_dn, target_password)
dfs_clean(t, target_base)
#add test data
dfs_add(s, source_base, LAYER_NUM)
dfs_add(t, target_base, 0)
#call vdcmerge
cmdline = '%s -p %s -d %s -u %s -w %s -b %s -i %s -H %s -P %s -D %s -U %s -W %s -B %s' % (vdcmerge,
source_port, source_domain, source_username, source_password, source_base, server_id,
target_host, target_port, target_domain, target_username, target_password, target_base);
cmdline += ' -v' #verbose mode
print '%s\n' % (cmdline)
os.system(cmdline)
#validate results
if dfs_check(t, target_base, LAYER_NUM):
print "Test passed."
else:
print "Test failed."
time.sleep(5)
s = ldap.initialize(source_uri)
s.simple_bind_s(target_dn, target_password)
if dfs_check(s, target_base, LAYER_NUM):
print "Test passed."
else:
print "Test failed."
#unbind
s.unbind_s()
t.unbind_s()
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Payment"),
"items": [
{
"type": "doctype",
"name": "Wharf Payment Entry",
"onboard": 1,
"description": _("Wharf Payment Entry"),
},
{
"type": "doctype",
"name": "Bulk Payment",
"description": _("Bulk Payment"),
},
{
"type": "doctype",
"name": "Wharf Payment Fee",
"description": _("Wharf Payment Fee"),
},
]
},
{
"label": _("Cargo Operation"),
"items": [
{
"type": "doctype",
"name": "Pre Advice",
"description": _("Pre Advice."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo",
"description": _("Cargo."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo Manifest",
"description": _("Cargo Manifest."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo Operation Planning",
"description": _("Cargo Operation Planning."),
"onboard":1
},
]
},
{
"label": _("Warehouse"),
"items": [
{
"type": "doctype",
"name": "Devaning Request",
"description": _("Devaning Request."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo Warehouse",
"description": _("Warehouse."),
"onboard":1
},
{
"type": "doctype",
"name": "Warehouse Inspection",
"description": _("Warehouse Inspection."),
"onboard":1
},
{
"type": "doctype",
"name": "Warehouse Custom Check",
"description": _("Warehouse Custom Check."),
"onboard":1
},
{
"type": "doctype",
"name": "Warehouse Fee Payment",
"description": _("Warehouse Fee Payment."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo Warehouse Security",
"description": _("Cargo Warehouse Security."),
"onboard":1
},
{
"type": "doctype",
"name": "Warehouse Storage Fee",
"description": _("Warehouse Storage Fee."),
"onboard":1
},
]
},
{
"label": _("Cargo Movement"),
"items": [
{
"type": "doctype",
"name": "Transhipment Cargo",
"description": _("Transhipment Cargo."),
},
{
"type": "doctype",
"name": "Export",
"description": _("Export Cargo."),
},
{
"type": "doctype",
"name": "Empty Deliver Payment",
"description": _("Empty Deliver Payment."),
},
{
"type": "doctype",
"name": "Empty Containers",
"description": _("Empty Containers."),
},
]
},
{
"label": _("Yard Operation"),
"items": [
{
"type": "doctype",
"name": "Inspection",
"description": _("Cargo Inspection."),
"onboard":1
},
{
"type": "doctype",
"name": "Yard",
"description": _("Yard Operation."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo Stock Refrence",
"description": _("Cargo Stock Refrence."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo Stock",
"description": _("Cargo Stock."),
"onboard":1
},
{
"type": "page",
"name": "yard-planner",
"label": _("Wharf Yard Planner"),
"description": _("QSC Yard Planner."),
"onboard":1
},
]
},
{
"label": _("Security"),
"items": [
{
"type": "doctype",
"name": "Custom Inspection",
"description": _("Custom Inspection."),
"onboard":1
},
{
"type": "doctype",
"name": "Gate1",
"description": _("Security Gate 1."),
"onboard":1
},
{
"type": "doctype",
"name": "Bulk Item Count",
"description": _("Bulk Item Count."),
"onboard":1
},
{
"type": "doctype",
"name": "Gate2",
"description": _("Security Main Gate."),
"onboard":1
},
{
"type": "doctype",
"name": "Gate1 Export",
"description": _("Gate1 Export."),
"onboard":1
},
{
"type": "doctype",
"name": "Main Gate Export",
"description": _("Main Gate Export."),
"onboard":1
},
]
},
{
"label": _("Pilot"),
"items": [
{
"type": "doctype",
"name": "Pilot Log",
"description": _("Pilot Log Details."),
},
]
},
{
"label": _("Fleet Management"),
"items": [
{
"type": "doctype",
"name": "Vehicle"
},
{
"type": "doctype",
"name": "Vehicle Log"
},
]
},
{
"label": _("Setup"),
"items": [
{
"type": "doctype",
"name": "Item",
"description": _("Item."),
"onboard":1
},
{
"type": "doctype",
"name": "Agents",
"description": _("Agents."),
"onboard":1
},
{
"type": "doctype",
"name": "Vessel Type",
"description": _("Vessel Type"),
"onboard":1
},
{
"type": "doctype",
"name": "Vessels",
"description": _("Vessels."),
"onboard":1
},
{
"type": "doctype",
"name": "Cargo Type",
"description": _("Cargo Type"),
"onboard":1
},
{
"type": "doctype",
"name": "Container Type",
"description": _("Container Type"),
"onboard":1
},
{
"type": "doctype",
"name": "Ports",
"description": _("Ports"),
"onboard":1
},
{
"type": "doctype",
"name": "Shift",
"description": _("Working Shift"),
},
{
"type": "doctype",
"name": "Work Type",
"description": _("Work Type"),
"onboard":1
},
{
"type": "doctype",
"name": "Container Truck",
"description": _("Container Truck"),
"onboard":1
},
{
"type": "doctype",
"name": "Truck Drivers",
"description": _("Truck Drivers"),
"onboard":1
},
]
},
{
"label": _("Fees"),
"items": [
{
"type": "doctype",
"name": "Wharf Handling Fee",
"description": _("Wharf Handling Fee"),
"onboard":1
},
{
"type": "doctype",
"name": "Berthage Fee",
"description": _("Berthage Fee"),
"onboard":1
},
{
"type": "doctype",
"name": "Storage Fee",
"description": _("Storage Fee"),
"onboard":1
},
{
"type": "doctype",
"name": "Wharfage Fee",
"description": _("Wharfage Fee"),
"onboard":1
},
{
"type": "doctype",
"name": "<NAME>",
"description": _("Devanning Fee"),
"onboard":1
},
]
},
{
"label": _("Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Cargo Movement"
},
{
"type": "report",
"is_query_report": True,
"name": "Cargo Warehouse Movement"
},
{
"type": "report",
"is_query_report": True,
"name": "Cargo Yard Efficiency Report"
},
{
"type": "report",
"is_query_report": True,
"name": "Cargo Statitics"
},
{
"type": "report",
"is_query_report": True,
"name": "Cargo Report"
},
{
"type": "report",
"is_query_report": True,
"name": "Vessels Report"
},
]
},
]
|
import os
import re
import sys
import uuid
import tempfile
import subprocess
import datetime as dt
import traceback
from functools import total_ordering, lru_cache
from pathlib import Path
try:
import numpy as np
except ImportError:
np = None
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.figure
except ImportError:
mpl = None
try:
import plotly.graph_objects as plotly_go
except ImportError:
plotly_go = None
import xlwings
missing = object()
def int_to_rgb(number):
"""Given an integer, return the rgb"""
number = int(number)
r = number % 256
g = (number // 256) % 256
b = (number // (256 * 256)) % 256
return r, g, b
def rgb_to_int(rgb):
"""Given an rgb, return an int"""
return rgb[0] + (rgb[1] * 256) + (rgb[2] * 256 * 256)
def hex_to_rgb(color):
color = color[1:] if color.startswith("#") else color
return tuple(int(color[i : i + 2], 16) for i in (0, 2, 4))
def rgb_to_hex(r, g, b):
return f"#{r:02x}{g:02x}{b:02x}"
def get_duplicates(seq):
seen = set()
duplicates = set(x for x in seq if x in seen or seen.add(x))
return duplicates
def np_datetime_to_datetime(np_datetime):
ts = (np_datetime - np.datetime64("1970-01-01T00:00:00Z")) / np.timedelta64(1, "s")
dt_datetime = dt.datetime.utcfromtimestamp(ts)
return dt_datetime
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def col_name(i):
i -= 1
if i < 0:
raise IndexError(i)
elif i < 26:
return ALPHABET[i]
elif i < 702:
i -= 26
return ALPHABET[i // 26] + ALPHABET[i % 26]
elif i < 16384:
i -= 702
return ALPHABET[i // 676] + ALPHABET[i // 26 % 26] + ALPHABET[i % 26]
else:
raise IndexError(i)
def address_to_index_tuple(address):
"""
Based on a function from XlsxWriter, which is distributed under the following
BSD 2-Clause License:
Copyright (c) 2013-2021, <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
re_range_parts = re.compile(r"(\$?)([A-Z]{1,3})(\$?)(\d+)")
match = re_range_parts.match(address)
col_str = match.group(2)
row_str = match.group(4)
# Convert base26 column string to number
expn = 0
col = 0
for char in reversed(col_str):
col += (ord(char) - ord("A") + 1) * (26**expn)
expn += 1
return int(row_str), col
class VBAWriter:
MAX_VBA_LINE_LENGTH = 1024
VBA_LINE_SPLIT = " _\n"
MAX_VBA_SPLITTED_LINE_LENGTH = MAX_VBA_LINE_LENGTH - len(VBA_LINE_SPLIT)
class Block:
def __init__(self, writer, start):
self.writer = writer
self.start = start
def __enter__(self):
self.writer.writeln(self.start)
self.writer._indent += 1
def __exit__(self, exc_type, exc_val, exc_tb):
self.writer._indent -= 1
def __init__(self, f):
self.f = f
self._indent = 0
self._freshline = True
def block(self, template, **kwargs):
return VBAWriter.Block(self, template.format(**kwargs))
def start_block(self, template, **kwargs):
self.writeln(template, **kwargs)
self._indent += 1
def end_block(self, template, **kwargs):
self.writeln(template, **kwargs)
self._indent -= 1
def write(self, template, **kwargs):
if kwargs:
template = template.format(**kwargs)
if self._freshline:
template = (" " * self._indent) + template
self._freshline = False
self.write_vba_line(template)
if template[-1] == "\n":
self._freshline = True
def write_label(self, label):
self._indent -= 1
self.write(label + ":\n")
self._indent += 1
def writeln(self, template, **kwargs):
self.write(template + "\n", **kwargs)
def write_vba_line(self, vba_line):
if len(vba_line) > VBAWriter.MAX_VBA_LINE_LENGTH:
separator_index = VBAWriter.get_separator_index(vba_line)
self.f.write(vba_line[:separator_index] + VBAWriter.VBA_LINE_SPLIT)
self.write_vba_line(vba_line[separator_index:])
else:
self.f.write(vba_line)
@classmethod
def get_separator_index(cls, vba_line):
for index in range(cls.MAX_VBA_SPLITTED_LINE_LENGTH, 0, -1):
if " " == vba_line[index]:
return index
return (
cls.MAX_VBA_SPLITTED_LINE_LENGTH
) # Best effort: split string at the maximum possible length
def try_parse_int(x):
try:
return int(x)
except ValueError:
return x
@total_ordering
class VersionNumber:
def __init__(self, s):
self.value = tuple(map(try_parse_int, s.split(".")))
@property
def major(self):
return self.value[0]
@property
def minor(self):
return self.value[1] if len(self.value) > 1 else None
def __str__(self):
return ".".join(map(str, self.value))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __eq__(self, other):
if isinstance(other, VersionNumber):
return self.value == other.value
elif isinstance(other, str):
return self.value == VersionNumber(other).value
elif isinstance(other, tuple):
return self.value[: len(other)] == other
elif isinstance(other, int):
return self.major == other
else:
return False
def __lt__(self, other):
if isinstance(other, VersionNumber):
return self.value < other.value
elif isinstance(other, str):
return self.value < VersionNumber(other).value
elif isinstance(other, tuple):
return self.value[: len(other)] < other
elif isinstance(other, int):
return self.major < other
else:
raise TypeError("Cannot compare other object with version number")
def process_image(image, format):
"""Returns filename and is_temp_file"""
image = fspath(image)
if isinstance(image, str):
return image, False
elif mpl and isinstance(image, mpl.figure.Figure):
image_type = "mpl"
elif plotly_go and isinstance(image, plotly_go.Figure):
image_type = "plotly"
else:
raise TypeError("Don't know what to do with that image object")
if format == "vector":
if sys.platform.startswith("darwin"):
format = "pdf"
else:
format = "svg"
temp_dir = os.path.realpath(tempfile.gettempdir())
filename = os.path.join(temp_dir, str(uuid.uuid4()) + "." + format)
if image_type == "mpl":
canvas = mpl.backends.backend_agg.FigureCanvas(image)
canvas.draw()
image.savefig(filename, bbox_inches="tight", dpi=300)
plt.close(image)
elif image_type == "plotly":
image.write_image(filename)
return filename, True
def fspath(path):
"""Convert path-like object to string.
On python <= 3.5 the input argument is always returned unchanged (no support for
path-like objects available). TODO: can be removed as 3.5 no longer supported.
"""
if hasattr(os, "PathLike") and isinstance(path, os.PathLike):
return os.fspath(path)
else:
return path
def read_config_sheet(book):
try:
return book.sheets["xlwings.conf"]["A1:B1"].options(dict, expand="down").value
except:
# A missing sheet currently produces different errors on mac and win
return {}
def read_user_config():
"""Returns keys in lowercase of xlwings.conf in the user's home directory"""
config = {}
if Path(xlwings.USER_CONFIG_FILE).is_file():
with open(xlwings.USER_CONFIG_FILE, "r") as f:
for line in f:
values = re.findall(r'"[^"]*"', line)
if values:
config[values[0].strip('"').lower()] = os.path.expandvars(
values[1].strip('"')
)
return config
@lru_cache(None)
def get_cached_user_config(key):
return read_user_config().get(key.lower())
def exception(logger, msg, *args):
if logger.hasHandlers():
logger.exception(msg, *args)
else:
print(msg % args)
traceback.print_exc()
def chunk(sequence, chunksize):
for i in range(0, len(sequence), chunksize):
yield sequence[i : i + chunksize]
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
Licensed under the MIT License
Copyright by <NAME>
https://code.activestate.com/recipes/577058/
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == "":
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
def prepare_sys_path(args_string):
"""Called from Excel to prepend the default paths and those from the PYTHONPATH
setting to sys.path. While RunPython could use Book.caller(), the UDF server can't,
as this runs before VBA can push the ActiveWorkbook over. UDFs also can't interact
with the book object in general as Excel is busy during the function call and so
won't allow you to read out the config sheet, for example. Before 0.24.9,
these manipulations were handled in VBA, but couldn't handle SharePoint.
"""
args = os.path.normcase(os.path.expandvars(args_string)).split(";")
# Not sure, if we really need normcase,
# but on Windows it replaces "/" with "\", so let's revert that
active_fullname = args[0].replace("\\", "/")
this_fullname = args[1].replace("\\", "/")
paths = []
for fullname in [active_fullname, this_fullname]:
if not fullname:
continue
elif "://" in fullname:
fullname = Path(
fullname_url_to_local_path(
url=fullname,
sheet_onedrive_consumer_config=args[2],
sheet_onedrive_commercial_config=args[3],
sheet_sharepoint_config=args[4],
)
)
else:
fullname = Path(fullname)
paths += [str(fullname.parent), str(fullname.with_suffix(".zip"))]
if args[5:]:
paths += args[5:]
sys.path[0:0] = list(set(paths))
@lru_cache(None)
def fullname_url_to_local_path(
url,
sheet_onedrive_consumer_config=None,
sheet_onedrive_commercial_config=None,
sheet_sharepoint_config=None,
):
"""
When AutoSave is enabled in Excel with either OneDrive or SharePoint, VBA/COM's
Workbook.FullName turns into a URL without any possibilities to get the local file
path. While OneDrive and OneDrive for Business make it easy enough to derive the
local path from the URL, SharePoint allows to define the "Site name" and "Site
address" independently from each other with the former ending up in the local folder
path and the latter in the FullName URL. Adding to the complexity: (1) When the site
name contains spaces, they will be stripped out from the URL and (2) you can sync a
subfolder directly (this, at least, works when you have a single folder at the
SharePoint's Document root), which results in skipping a folder level locally when
compared to the online/URL version. And (3) the OneDriveCommercial env var sometimes
seems to actually point to the local SharePoint folder.
Parameters
----------
url : str
URL as returned by VBA's FullName
sheet_onedrive_consumer_config : str
Optional Path to the local OneDrive (Personal) as defined in the Workbook's
config sheet
sheet_onedrive_commercial_config : str
Optional Path to the local OneDrive for Business as defined in the Workbook's
config sheet
sheet_sharepoint_config : str
Optional Path to the local SharePoint drive as defined in the Workbook's config
sheet
"""
# Directory config files can't be used
# since the whole purpose of this exercise is to find out about a book's dir
onedrive_consumer_config_name = (
"ONEDRIVE_CONSUMER_WIN"
if sys.platform.startswith("win")
else "ONEDRIVE_CONSUMER_MAC"
)
onedrive_commercial_config_name = (
"ONEDRIVE_COMMERCIAL_WIN"
if sys.platform.startswith("win")
else "ONEDRIVE_COMMERCIAL_MAC"
)
sharepoint_config_name = (
"SHAREPOINT_WIN" if sys.platform.startswith("win") else "SHAREPOINT_MAC"
)
if sheet_onedrive_consumer_config is not None:
sheet_onedrive_consumer_config = os.path.expandvars(
sheet_onedrive_consumer_config
)
if sheet_onedrive_commercial_config is not None:
sheet_onedrive_commercial_config = os.path.expandvars(
sheet_onedrive_commercial_config
)
if sheet_sharepoint_config is not None:
sheet_sharepoint_config = os.path.expandvars(sheet_sharepoint_config)
onedrive_consumer_config = sheet_onedrive_consumer_config or read_user_config().get(
onedrive_consumer_config_name.lower()
)
onedrive_commercial_config = (
sheet_onedrive_commercial_config
or read_user_config().get(onedrive_commercial_config_name.lower())
)
sharepoint_config = sheet_sharepoint_config or read_user_config().get(
sharepoint_config_name.lower()
)
# OneDrive
pattern = re.compile(r"https://d.docs.live.net/[^/]*/(.*)")
match = pattern.match(url)
if match:
root = (
onedrive_consumer_config
or os.getenv("OneDriveConsumer")
or os.getenv("OneDrive")
or str(Path.home() / "OneDrive")
)
if not root:
raise xlwings.XlwingsError(
f"Couldn't find the local OneDrive folder. Please configure the "
f"{onedrive_consumer_config_name} setting, see: xlwings.org/error."
)
local_path = Path(root) / match.group(1)
if local_path.is_file():
return str(local_path)
else:
raise xlwings.XlwingsError(
"Couldn't find your local OneDrive file, see: xlwings.org/error"
)
# OneDrive for Business
pattern = re.compile(r"https://[^-]*-my.sharepoint.[^/]*/[^/]*/[^/]*/[^/]*/(.*)")
match = pattern.match(url)
if match:
root = (
onedrive_commercial_config
or os.getenv("OneDriveCommercial")
or os.getenv("OneDrive")
)
if not root:
raise xlwings.XlwingsError(
f"Couldn't find the local OneDrive for Business folder. "
f"Please configure the {onedrive_commercial_config_name} setting, "
f"see: xlwings.org/error."
)
local_path = Path(root) / match.group(1)
if local_path.is_file():
return str(local_path)
else:
raise xlwings.XlwingsError(
"Couldn't find your local OneDrive for Business file, "
"see: xlwings.org/error"
)
# SharePoint Online & On-Premises (default top level mapping)
pattern = re.compile(r"https?://[^/]*/sites/([^/]*)/([^/]*)/(.*)")
match = pattern.match(url)
# We're trying to derive the SharePoint root path
# from the OneDriveCommercial path, if it exists
root = sharepoint_config or (
os.getenv("OneDriveCommercial").replace("OneDrive - ", "")
if os.getenv("OneDriveCommercial")
else None
)
if not root:
raise xlwings.XlwingsError(
f"Couldn't find the local SharePoint folder. Please configure the "
f"{sharepoint_config_name} setting, see: xlwings.org/error."
)
if match:
local_path = Path(root) / f"{match.group(1)} - Documents" / match.group(3)
if local_path.is_file():
return str(local_path)
# SharePoint Online & On-Premises (non-default mapping)
book_name = url.split("/")[-1]
local_book_paths = []
for path in Path(root).rglob("[!~$]*.xls*"):
if path.name.lower() == book_name.lower():
local_book_paths.append(path)
if len(local_book_paths) == 1:
return str(local_book_paths[0])
elif len(local_book_paths) == 0:
raise xlwings.XlwingsError(
f"Couldn't find your SharePoint file locally, see: xlwings.org/error"
)
else:
raise xlwings.XlwingsError(
f"Your SharePoint configuration either requires your workbook name to be "
f"unique across all synced SharePoint folders or you need to "
f"{'edit' if sharepoint_config else 'add'} the {sharepoint_config_name} "
f"setting including one or more folder levels, see: xlwings.org/error."
)
def to_pdf(
obj,
path=None,
include=None,
exclude=None,
layout=None,
exclude_start_string=None,
show=None,
quality=None,
):
report_path = fspath(path)
layout_path = fspath(layout)
if isinstance(obj, (xlwings.Book, xlwings.Sheet)):
if report_path is None:
filename, extension = os.path.splitext(obj.fullname)
directory, _ = os.path.split(obj.fullname)
if directory:
report_path = os.path.join(directory, filename + ".pdf")
else:
report_path = filename + ".pdf"
if (include is not None) and (exclude is not None):
raise ValueError("You can only use either 'include' or 'exclude'")
# Hide sheets to exclude them from printing
if isinstance(include, (str, int)):
include = [include]
if isinstance(exclude, (str, int)):
exclude = [exclude]
exclude_by_name = [
sheet.index
for sheet in obj.sheets
if sheet.name.startswith(exclude_start_string)
]
visibility = {}
if include or exclude or exclude_by_name:
for sheet in obj.sheets:
visibility[sheet] = sheet.visible
try:
if include:
for sheet in obj.sheets:
if (sheet.name in include) or (sheet.index in include):
sheet.visible = True
else:
sheet.visible = False
if exclude or exclude_by_name:
exclude = [] if exclude is None else exclude
for sheet in obj.sheets:
if (
(sheet.name in exclude)
or (sheet.index in exclude)
or (sheet.index in exclude_by_name)
):
sheet.visible = False
obj.impl.to_pdf(os.path.realpath(report_path), quality=quality)
except Exception:
raise
finally:
# Reset visibility
if include or exclude or exclude_by_name:
for sheet, tf in visibility.items():
sheet.visible = tf
else:
if report_path is None:
if isinstance(obj, xlwings.Chart):
directory, _ = os.path.split(obj.parent.book.fullname)
filename = obj.name
elif isinstance(obj, xlwings.Range):
directory, _ = os.path.split(obj.sheet.book.fullname)
filename = (
str(obj)
.replace("<", "")
.replace(">", "")
.replace(":", "_")
.replace(" ", "")
)
else:
raise ValueError(f"Object of type {type(obj)} are not supported.")
if directory:
report_path = os.path.join(directory, filename + ".pdf")
else:
report_path = filename + ".pdf"
obj.impl.to_pdf(os.path.realpath(report_path), quality=quality)
if layout:
from .pro.reports.pdf import print_on_layout
print_on_layout(report_path=report_path, layout_path=layout_path)
if show:
if sys.platform.startswith("win"):
os.startfile(report_path)
else:
subprocess.run(["open", report_path])
return report_path
|
#!/usr/bin/env python
# author: combofish
# Filename: CoordinateConversion.py
import openpyxl
import os
import datetime
from datetime import date
import time
import configparser
import wx
APP_TITTLE = '坐标转换工具 V1.3'
APP_ICON = 'well.ico' # 请更换成你的icon
Usage = """>>> 软件使用
* 点击 ‘开始转换’ 按钮开始处理文件。
* 默认的数据读取目录在 Data\ 文件夹下,导出文件存放在当前文件夹下的 Res\ 文件夹下。
>>> 软件设置
* 可在当前目录下的 config.ini 文件来修改默认设置。
* DataDir 数据文件(Excel表格)存放的位置。
* ResultDir 导出坐标文件保存的位置。
* ImageStartTag 生成的坐标文件对应的起始图标标号,可选数值1-200。
* IngoreRowsNumber 软件处理时忽略Excel首部的行数,默认为1。
>>> 软件信息
* @combofish @Version-1.3
"""
class ConfContent():
def __init__(self):
self.currentDir = os.path.split(os.path.realpath(__file__))[0]
configFilePath = os.path.join(self.currentDir, "config.ini")
self.confTool = configparser.ConfigParser()
self.confTool.read(configFilePath,encoding='utf-8')
self.sectionName = self.confTool.sections()[0]
self.author = self.getPara("author")
self.email = self.getPara("email")
self.version = self.getPara("version")
self.dataDir = self.getPara("datadir")
self.resDir = self.getPara("resultDir")
self.imageStartTagText = int(self.getPara("ImageStartTag"))
self.ingoreRowsNumber = int(self.getPara("ingoreRowsNumber"))
self.sheetName = self.getPara("SheetName")
self.outputFileSuffix = self.getPara("OutputFileSuffix")
self.dataDirPath = os.path.join(self.currentDir,self.dataDir)
self.resDirPath = os.path.join(self.currentDir,self.resDir)
if not os.path.exists(self.dataDirPath):
os.mkdir(self.dataDirPath)
if not os.path.exists(self.resDirPath):
os.mkdir(self.resDirPath)
fmt = "%Y-%m-%d"
versionDate = self.getPara("date")
versionDate = time.strptime(versionDate,fmt)[:3] # time.struct_time
self.softwareDate = date(versionDate[0],versionDate[1],versionDate[2])
self.now = datetime.date.today() # datetime.date
self.dateDeltDays = (self.now - self.softwareDate) / datetime.timedelta(days = 1)
def getPara(self,para):
return self.confTool.get(self.sectionName,para)
def getIntPara(self,para):
return int(self.confTool.get(self.sectionName,para))
class ResultTextFrame(wx.Frame):
def __init__(self,confCont):
self.confCont = confCont
dataDir = confCont.dataDir
resDir = confCont.resDir
imageStartTagText = confCont.imageStartTagText
wx.Frame.__init__(self,None,title= APP_TITTLE,pos= (500,200),size= (630,500))
# icon = wx.Icon(APP_ICON, wx.BITMAP_TYPE_ICO)
# self.SetIcon(icon)
panel=wx.Panel(self,-1)
dataText = wx.StaticText(panel, -1, "源数据文件目录: " + dataDir + "\\",pos=(6,7), size=(160,24), style=wx.ALIGN_LEFT)
resultText = wx.StaticText(panel,-1, "坐标文件输出目录: " + resDir + "\\",pos= (190,7),size= (200,24),style=wx.ALIGN_LEFT)
imageStartTagText = wx.StaticText(panel,-1, "图标样式起始标号: " + str(imageStartTagText),pos= (380,7),size= (120,24),style=wx.ALIGN_LEFT)
turnButton = wx.Button(panel,label= "开始转换",pos= (530,7),size= (80,24))
self.contentText = wx.TextCtrl(panel,pos= (5,35),size= (620,460),style= wx.TE_MULTILINE)
# wx.TE_MULTILINE可以实现以滚动条方式多行显示文本,若不加此功能文本文档显示为一行
self.contentText.SetValue(Usage)
self.contentTextString = []
turnButton.Bind(wx.EVT_BUTTON,self.__tureFilesJudge) # 绑定打开文件事件到open_button按钮上
def setContentTextString(self,s):
self.contentTextString.extend(s)
str = '\n '.join(self.contentTextString)
self.contentText.SetValue(str)
def __tureFilesJudge(self,evt):
if self.confCont.dateDeltDays > 30:
self.contentText.SetValue(">>> 软件已达到使用期限,如需继续使用,请联系管理员!")
else:
processStrList = [">>> Processing..."]
self.setContentTextString(processStrList)
# frame.contentText.SetValue(">>> 请点击开始转换")
self.__turnFilesProcess()
def __turnFilesProcess(self):
''' 处理数据文件 '''
files = os.listdir(self.confCont.dataDirPath)
# self.setContentTextString(files)
fileCount = 0
sumProcessLine = 0
for f in files:
fileName = os.path.join(self.confCont.dataDirPath,f)
xlProcesser = Xl2location(fileName,fileCount,self.confCont)
fileCount +=1
fileProcessLineNumber,imageStartTagText = xlProcesser.processXlFile()
sumProcessLine += fileProcessLineNumber
fmt = " Done (%4d) >>> Tag:%3d FileName:%s"
formatStr = fmt % (fileProcessLineNumber,imageStartTagText,os.path.basename(fileName))
self.setContentTextString([formatStr])
xlProcesser.xlClose()
self.setContentTextString([">>> Done."])
self.setContentTextString([""])
self.setContentTextString(["共处理个%3d 文件, 共处理 %3d 行" % (fileCount,sumProcessLine)])
class Xl2location:
departments = {"水利部门":'S',"发改委":'F',"农业农村部门":'N',"自然资源部门":'G',"财政部门":'C',"其他":"Y"}
def __init__(self,fileName,imageTag,confCont):
self.confCont = confCont
self.imgTag = imageTag + confCont.imageStartTagText
self.f = fileName
self.filePath = os.path.dirname(fileName)
self.fileName = os.path.basename(fileName)
self.sFilePath = self.confCont.resDirPath
self.sFileName = self.fileName.split('.')[0] + '.' + self.confCont.outputFileSuffix
def xlClose(self):
self.xlsx.close()
def processXlFile(self):
self.xlsx = openpyxl.load_workbook(self.f,read_only=True)
sheet = self.xlsx[self.confCont.sheetName]
rows = sheet.rows
i = 0
count = 0
mainContent = {}
for row in rows:
i = i + 1
if i>self.confCont.ingoreRowsNumber:
eachLine = []
for cell in row:
eachLine.append(cell.value)
if eachLine[1]:
mainContent[str(i-self.confCont.ingoreRowsNumber)] = eachLine
count = count + 1
self.count = count
self.__processLine(mainContent)
return self.count,self.imgTag
def __getInfoRowNumber(self,rowName):
return self.confCont.getIntPara(rowName)
def __processLine(self,mainContent):
with open(os.path.join(self.sFilePath,self.sFileName),'w') as saveFile:
for key, value in mainContent.items():
well_locate = self.__nullJudge(value[self.__getInfoRowNumber("WellLocate")])
well_number = value[self.__getInfoRowNumber("WellNumber")]
well_name = self.__nullJudge(value[self.__getInfoRowNumber("WellName")])
well_years = self.__nullJudge(value[self.__getInfoRowNumber("WellYear")])
well_department = self.__nullJudge(value[self.__getInfoRowNumber("WellDepartment")])
well_longitude,well_dimension = value[self.__getInfoRowNumber("WellLongDime")].split(',')
well_description = self.__nullJudge(value[self.__getInfoRowNumber("WellDesc")])
WellType = self.__nullJudge(value[self.__getInfoRowNumber("WellType")])
lineInfoLocate = "/"+ well_locate
if self.confCont.getIntPara("AddWellNumberName"):
lineInfoName = str(well_years)[-2:] + str(self.departments[well_department]) + "-" + \
str(well_number)[-5:] + "-#" + str(well_name)
else:
lineInfoName = str(well_years)[-2:] + str(self.departments[well_department]) + "-#" + str(well_name)
lineInfoDetail = str(well_years) + str(well_department[:2]) + "-共" + str(self.count) +"个-"
if self.__getInfoRowNumber("WellIncludeDetail"):
# 是否为拟报废机井 "机井问题类型" "泵问题类型" "高压问题类型" "低压问题类型"
lineInfoType = WellType + \
"-是否为拟报废机井" + self.__defaultJudge(value[self.__getInfoRowNumber("WellDetail")]) + \
"-机井" + self.__defaultJudge(value[self.__getInfoRowNumber("WellDetail")+1]) +\
"-泵" + self.__defaultJudge(value[self.__getInfoRowNumber("WellDetail")+2]) +\
"-高压" + self.__defaultJudge(value[self.__getInfoRowNumber("WellDetail")+3]) +\
"-低压" + self.__defaultJudge(value[self.__getInfoRowNumber("WellDetail")+4])
else:
lineInfoType = WellType
wellDetail = lineInfoDetail + str(well_number) + '-' + lineInfoType + well_description
if self.confCont.getIntPara("IncludeName"):
# 乡镇,名字(年份部门原机井编号),经度,纬度,图标样式,详情(年份,部门,机井类型,电类型,备注)
lineInfo = ','.join([lineInfoLocate,lineInfoName,str(well_longitude),str(well_dimension),\
str(self.imgTag),wellDetail])
else:
# 乡镇,名字(年份部门原机井编号),经度,纬度,图标样式,详情(年份,部门,机井类型,电类型,备注)
lineInfo = ','.join([lineInfoLocate,str(well_longitude),str(well_dimension),\
str(self.imgTag),wellDetail])
saveFile.write(lineInfo + '\n')
def __nullJudge(self,value):
if value:
return str(value).replace(" ",'')
if not value:
return "null"
def __defaultJudge(self,value):
if value:
return str(value).replace(" ",'')
if not value:
return ""
class CoordinateConversionApp(wx.App):
def __init__(self):
# 重构__init__方法,将错误信息重定位到文件中;
# 默认redirect=True,输出到StdOut或StdError;
# 为防止程序因错误一闪而过无法捕捉信息,可在
# 控制台中使用python -i example.py来运行程序。
wx.App.__init__(self,redirect=False,filename=r"Runlog.txt")
def OnInit(self):
confCont = ConfContent()
frame=ResultTextFrame(confCont)
frame.Show(True)
return True
def process():
app=CoordinateConversionApp()
app.MainLoop()
process()
|
<reponame>yuanqing-wang/sake<gh_stars>1-10
import jax
import jax.numpy as jnp
from jax.experimental.ode import odeint
from flax import linen as nn
from .models import DenseSAKEModel
from functools import partial
import math
from typing import Callable
T = jnp.array((0.0, 1.0))
class CenteredGaussian(object):
@staticmethod
def log_prob(value):
N = value.shape[-2]
D = value.shape[-1]
degrees_of_freedom = (N-1) * D
r2 = jnp.reshape(value ** 2, (*value.shape[:-2], -1)).sum(-1)
log_normalizing_constant = -0.5 * degrees_of_freedom * math.log(2*math.pi)
log_px = -0.5 * r2 + log_normalizing_constant
return log_px
@staticmethod
def sample(key, shape):
x = jax.random.normal(key=key, shape=shape)
x = x - x.mean(axis=-2, keepdims=True)
return x
class ODEFlow(object):
@staticmethod
def dynamics(model, params, x, t):
t = jnp.ones((*x.shape[:-1], 1)) * t
_, y, __ = model.apply(params, t, x)
y = y - x
return y
@staticmethod
def _jacobian(fn, t, x):
jacobian = jax.jacrev(fn)(x, t)
return jacobian
@staticmethod
def jacobian(fn, x, t):
_jacobian = jax.vmap(partial(ODEFlow._jacobian, fn, t))
return _jacobian(x)
# @staticmethod
# def trace(fn, x, t):
# res = fn(x, t)
# degrees_of_freedom = res.shape[-1] * res.shape[-2]
# res_shape = (*res.shape[:-4], degrees_of_freedom, degrees_of_freedom)
# res = jnp.reshape(res, res_shape)
# trace = jnp.trace(res, axis1=-2, axis2=-1)
# return trace
@staticmethod
def trace(fn, x, t, key):
_fn = lambda x: fn(x, t)
y, vjp_fun = jax.vjp(_fn, x)
key, subkey = jax.random.split(key)
u = jax.random.normal(subkey, y.shape)
trace = vjp_fun(u)[0] * u
trace = trace.sum(axis=(-1, -2))
return trace
@staticmethod
def logdet(fn, x):
res = fn(x)
degrees_of_freedom = res.shape[-1] * res.shape[-2]
res_shape = (*res.shape[:-4], degrees_of_freedom, degrees_of_freedom)
res = jnp.reshape(res, res_shape)
_, logdet = jnp.linalg.slogdet(res)
return logdet
@staticmethod
def dynamics_and_trace(model, params, key):
dynamics = partial(ODEFlow.dynamics, model, params)
trace = partial(ODEFlow.trace, dynamics)
def fn(state, t):
x, _trace = state
return dynamics(x, t), trace(x, t, key)
return fn
@staticmethod
def call(model, params, x, key):
trace0 = jnp.zeros(shape=x.shape[:-2])
fn = ODEFlow.dynamics_and_trace(model, params, key)
y, logdet = odeint(fn, (x, trace0), T)
y, logdet = y[-1], logdet[-1]
return y, logdet
@staticmethod
def __call__(model, params, x, key): return ODEFlow.call(model, params, x, key)
class AugmentedFlowLayer(nn.Module):
hidden_features: int=64
depth: int=3
activation: Callable=nn.silu
def setup(self):
import sake
self.sake_model = sake.models.DenseSAKEModel(
hidden_features=self.hidden_features,
depth=self.depth,
out_features=1,
activation=self.activation,
)
self.scale_mlp = nn.Sequential(
[
nn.Dense(self.hidden_features),
self.activation,
nn.Dense(1, use_bias=False),
jnp.tanh,
]
)
def mp(self, h, x):
x0 = x
h = jnp.concatenate([h, (x ** 2).sum(-1, keepdims=True)], axis=-1)
h = jnp.concatenate([h, jnp.expand_dims(jnp.zeros_like(h[..., -1, :]), -2)], axis=-2)
x = jnp.concatenate([x, jnp.expand_dims(jnp.zeros_like(x[..., -1, :]), -2)], axis=-2)
h, x, _ = self.sake_model(h, x)
x = x[..., :-1, :]
h = h[..., :-1, :]
translation = x - x0
translation = translation - translation.mean(axis=-2, keepdims=True)
scale = self.scale_mlp(h).mean(axis=-2, keepdims=True)
return scale, translation
def f_forward(self, h, x, v):
scale, translation = self.mp(h, x)
v = jnp.exp(scale) * v + translation
log_det = scale.sum((-1, -2)) * v.shape[-1] * v.shape[-2]
return x, v, log_det
def f_backward(self, h, x, v):
scale, translation = self.mp(h, x)
v = v - translation
v = jnp.exp(-scale) * v
log_det = scale.sum((-1, -2)) * v.shape[-1] * v.shape[-2]
return x, v, log_det
def __call__(self, h, x, v): return self.f_forward(h, x, v)
class AugmentedFlowModel(nn.Module):
depth: int=3
mp_depth: int=3
hidden_features: int=64
activation: Callable=nn.silu
def setup(self):
for idx in range(self.depth):
setattr(
self,
"xv_%s" % idx,
AugmentedFlowLayer(self.hidden_features, self.mp_depth),
)
setattr(
self,
"vx_%s" % idx,
AugmentedFlowLayer(self.hidden_features, self.mp_depth),
)
self.xv_layers = [getattr(self, "xv_%s" % idx) for idx in range(self.depth)]
self.vx_layers = [getattr(self, "vx_%s" % idx) for idx in range(self.depth)]
def f_forward(self, h, x, v):
sum_log_det = 0.0
for xv, vx in zip(self.xv_layers[::-1], self.vx_layers[::-1]):
x, v, log_det = xv.f_forward(h, x, v)
sum_log_det = sum_log_det + log_det
v, x, log_det = vx.f_forward(h, v, x)
sum_log_det = sum_log_det + log_det
return x, v, sum_log_det
def f_backward(self, h, x, v):
sum_log_det = 0.0
for xv, vx in zip(self.xv_layers, self.vx_layers):
v, x, log_det = vx.f_backward(h, v, x)
sum_log_det = sum_log_det + log_det
x, v, log_det = xv.f_backward(h, x, v)
sum_log_det = sum_log_det + log_det
return x, v, sum_log_det
def __call__(self, h, x, v): return self.f_forward(h, x, v)
|
import logging
import pathlib
import webbrowser
from typing import List
from sqlalchemy import Column, ForeignKey, Integer, Table, Text, create_engine, desc
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
logger = logging.getLogger(__name__)
Base = declarative_base()
class Database:
"""Manages connections to the database."""
def __init__(self, filepath, create=False, verbose=False):
"""Parameters
:param filepath: The path to the database
:param create: Optional. If :code:`True` the database will be created if it
doesn't already exist.
:param verbose: Optional. If :code:`True` enable sqlaclhemy's logging of SQL
commands
"""
logger.debug("Creating db instance for: %s", filepath)
self.filepath = pathlib.Path(filepath)
if create and not self.filepath.parent.exists():
self.filepath.parent.mkdir(parents=True)
self.engine = create_engine("sqlite:///" + filepath, echo=verbose)
self.new_session = sessionmaker(bind=self.engine)
self._session = None
if create:
Source.__table__.create(bind=self.engine, checkfirst=True)
Link.__table__.create(bind=self.engine, checkfirst=True)
Tag.__table__.create(bind=self.engine, checkfirst=True)
tag_association_table.create(bind=self.engine, checkfirst=True)
def commit(self):
if self._session:
self._session.commit()
return
raise RuntimeError("There is no session to commit!")
def close(self):
self.engine.dispose()
@property
def exists(self):
"""Determine if the database exists on disk."""
return self.filepath.exists()
@property
def session(self):
"""Return the current session object."""
if self._session is None:
self._session = self.new_session()
return self._session
class Source(Base):
"""Represents a source that a link was imported from."""
__tablename__ = "sources"
id = Column(Integer, primary_key=True)
"""The id of the source."""
name = Column(Text, nullable=False)
"""The name of the source."""
prefix = Column(Text, nullable=True)
"""The prefix that should be added to each link, if given."""
uri = Column(Text, nullable=False)
"""The uri that was used when importing the source."""
links = relationship("Link", backref="source")
"""Any links that were imported with this source."""
def __eq__(self, other):
if not isinstance(other, Source):
return False
return all(
[
self.id == other.id,
self.name == other.name,
self.prefix == other.prefix,
self.uri == other.uri,
]
)
def __repr__(self):
return f"Source<{self.name}, {self.uri}>"
@classmethod
def add(cls, db, items=None, commit=True, **kwargs):
"""Add a link or collection of links to the given database."""
session = db.session
if items is None:
dbitem = cls(**kwargs)
session.add(dbitem)
elif isinstance(items[0], dict):
dbitems = [cls(**args) for args in items]
session.add_all(dbitems)
else:
session.add_all(items)
if commit:
db.commit()
@classmethod
def get(cls, db, id):
"""Get a link with the given id from the database."""
session = db.session
item = session.query(cls).filter(cls.id == id).first()
return item
@classmethod
def search(cls, db, name=None, top=10):
"""Search the given database for sources."""
session = db.session
filters = []
if name is not None:
filters.append(cls.name.ilike(f"%{name}%"))
if len(filters) > 0:
items = session.query(cls).filter(*filters)
else:
items = session.query(cls)
db.commit()
return items[:top]
tag_association_table = Table(
"tag_associations",
Base.metadata,
Column("link_id", Integer, ForeignKey("links.id")),
Column("tag_id", Integer, ForeignKey("tags.id")),
)
class Tag(Base):
"""Represents a tag."""
__tablename__ = "tags"
id = Column(Integer, primary_key=True)
"""The id of the tag."""
name = Column(Text, nullable=False, unique=True)
"""The name of the tag."""
links = relationship("Link", secondary=tag_association_table, back_populates="tags")
"""The links that have this tag."""
def __hash__(self):
return hash((self.name, self.id))
def __eq__(self, other):
if not isinstance(other, Tag):
return False
return all([self.id == other.id, self.name == other.name])
def __repr__(self):
return f"Tag<{self.name}, {len(self.links)} links>"
@classmethod
def add(cls, db, items=None, commit=True, **kwargs):
"""Add a link or collection of links to the given database."""
session = db.session
if items is None:
dbitem = cls(**kwargs)
session.add(dbitem)
elif isinstance(items[0], dict):
dbitems = [cls(**args) for args in items]
session.add_all(dbitems)
else:
session.add_all(items)
if commit:
db.commit()
@classmethod
def search(cls, db: Database, name: str = None, top: int = 10):
"""Search the given database for links.
:param db: The database object to search
:param name: Only return links whose name contains the given string.
:param top: Only return the top :code:`N` results. (Default :code:`10`)
"""
session = db.session
filters = []
if name is not None:
filters.append(cls.name.ilike(f"%{name}%"))
query = session.query(cls)
if len(filters) > 0:
query = query.filter(*filters)
return query[:top]
@classmethod
def get(cls, db, id=None, name=None):
"""Get a tag by name or id."""
if id is None and name is None:
raise ValueError("You must give a name or an id.")
if id is not None:
session = db.session
item = session.query(cls).filter(cls.id == id).first()
return item
if name is not None:
session = db.session
item = session.query(cls).filter(cls.name == name).first()
return item
class Link(Base):
"""Represents an individual link."""
__tablename__ = "links"
id = Column(Integer, primary_key=True)
"""The id of the link."""
name = Column(Text, nullable=False)
"""The name of the link."""
url = Column(Text, nullable=False)
"""The url of the link."""
visits = Column(Integer, default=0)
"""The number of times a link has been visited."""
source_id = Column(Integer, ForeignKey("sources.id"), nullable=True)
"""The id of the source the link was added with, if applicable"""
tags = relationship("Tag", secondary=tag_association_table, back_populates="links")
"""The tags applied to this link."""
def __eq__(self, other):
if not isinstance(other, Link):
return False
return all(
[
self.id == other.id,
self.url == other.url,
self.source_id == other.source_id,
]
)
def __repr__(self):
return f"{self.name} <{self.url_expanded}, {len(self.tags)} tags>"
@property
def url_expanded(self):
"""The full url, including the link's prefix if set."""
if self.source and self.source.prefix:
return f"{self.source.prefix}{self.url}"
return self.url
@classmethod
def open(cls, db, link_id):
"""Open the link with the given id"""
link = cls.get(db, link_id)
url = link.url_expanded
webbrowser.open(url)
# Update the stats
link.visits += 1
db.commit()
@classmethod
def add(cls, db, items=None, commit=True, **kwargs):
"""Add a link or collection of links to the given database."""
session = db.session
if items is None:
dbitem = cls(**kwargs)
session.add(dbitem)
elif isinstance(items[0], dict):
dbitems = [cls(**args) for args in items]
session.add_all(dbitems)
else:
session.add_all(items)
if commit:
db.commit()
@classmethod
def get(cls, db, id):
"""Get a link with the given id from the database."""
session = db.session
item = session.query(cls).filter(cls.id == id).first()
return item
@classmethod
def search(
cls,
db: Database,
name: str = None,
source: Source = None,
tags: List[str] = None,
top: int = 10,
sort: str = None,
):
"""Search the given database for links.
The :code:`sort` parameter can be used to control the order in which the
results are sorted by. The following options are valid:
- :code:`None` (default), results are returned in the default sort order from
the database
- :code:`"visits"`, results are returned with the most visited links first.
Invalid options will be ignored
:param db: The database object to search
:param name: Only return links whose name contains the given string.
:param source: Only return links from the given source.
:param tags: Only return links with the given tags
:param top: Only return the top :code:`N` results. (Default :code:`10`)
:param sort: The criteria to sort the results by. (Default :code:`None`)
"""
session = db.session
filters = []
if name is not None:
filters.append(cls.name.ilike(f"%{name}%"))
if source is not None:
filters.append(cls.source_id == source.id)
if tags is not None:
for tag in tags:
filters.append(Link.tags.any(name=tag))
query = session.query(cls)
if len(filters) > 0:
query = query.filter(*filters)
if sort == "visits":
query = query.order_by(desc(cls.visits))
return query[:top]
|
import random
from math import atan2, pi
import cv2
import numpy as np
from albumentations import VerticalFlip, RandomResizedCrop, Compose, normalize_bbox, denormalize_bbox
from albumentations.augmentations import functional as F
from albumentations.augmentations.crops import functional as crop_f
from albumentations.core.transforms_interface import DualTransform
from .utils import convert_to_square, rotate_and_crop_rectangle_safe, \
rotate_and_crop_keypoints_on_rectangle_safe, keypoints_flip
__all__ = ["RandomCropNearInterestArea", "AlignCropNearInterestArea", "CombineImagesAndCrop",
"RelativeRandomCrop", "ConditionalTranspose", "RelativePadIfNeeded", "ProportionalRandomResizedCrop",
"FlipAndConcatTransform"]
class RandomCropNearInterestArea(DualTransform):
"""Crop area with mask if mask is non-empty with random shift by x,y coordinates.
Args:
max_part_shift (float): float value in (0.0, 1.0) range. Max relative size of shift in crop. Default 0.3
min_part_shift (float): float value in (0.0, 1.0) range. Min relative size of shift in crop. Default 0.3
min_crop_size (tuple): tuple of two values: minimum height and width of crop. Default (0, 0)
ignore_labels (list of int): values to ignore in mask, `0` values are always ignored
(e.g. if background value is 5 set `ignore_values=[5]` to ignore)
p (float): probability of applying the transform. Default: 0.5.
Params:
mask: binary mask where 1's label object of interest
Targets:
image, mask
Image types:
uint8, float32
"""
def __init__(self, max_part_shift=0.3, min_part_shift=0, min_crop_size=(0, 0),
ignore_labels=None, always_apply=False, p=0.5):
super(RandomCropNearInterestArea, self).__init__(always_apply, p)
self.max_part_shift = max_part_shift
self.min_part_shift = min_part_shift
if isinstance(ignore_labels, int):
ignore_labels = {ignore_labels}
elif isinstance(ignore_labels, (list, tuple)):
ignore_labels = set(ignore_labels)
self.ignore_labels = {0} if ignore_labels is None else ignore_labels.union({0})
self.min_crop_size = min_crop_size
def apply(self, img, x_min=0, x_max=0, y_min=0, y_max=0, **params):
return crop_f.clamping_crop(img, x_min, y_min, x_max, y_max)
def get_params_dependent_on_targets(self, params):
mask = params['mask']
mask = np.where(np.isin(mask, self.ignore_labels), 0, mask)
img_h, img_w = mask.shape
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
c = random.choice(contours)
x, y, w, h = cv2.boundingRect(c)
bbox = x, y, x + w, y + h
if random.random() > 0.5:
bbox = convert_to_square(np.array([bbox]))[0]
h_max_shift = int((bbox[3] - bbox[1]) * self.max_part_shift)
w_max_shift = int((bbox[2] - bbox[0]) * self.max_part_shift)
h_min_shift = int((bbox[3] - bbox[1]) * self.min_part_shift)
w_min_shift = int((bbox[2] - bbox[0]) * self.min_part_shift)
x_min = bbox[0] - random.randint(w_min_shift, w_max_shift)
x_max = bbox[2] + random.randint(w_min_shift, w_max_shift)
y_min = bbox[1] - random.randint(h_min_shift, h_max_shift)
y_max = bbox[3] + random.randint(h_min_shift, h_max_shift)
if y_max - y_min < self.min_crop_size[0]:
y_add = (self.min_crop_size[0] - (y_max - y_min)) // 2
y_max = min(y_max + y_add, img_h)
y_min = max(y_min - y_add, 0)
if x_max - x_min < self.min_crop_size[1]:
x_add = (self.min_crop_size[1] - (x_max - x_min)) // 2
x_max = min(x_max + x_add, img_w)
x_min = max(x_min - x_add, 0)
else:
h, w = mask.shape[:2]
x_min, y_min, x_max, y_max = 0, 0, w, h
return {'x_min': x_min,
'x_max': x_max,
'y_min': y_min,
'y_max': y_max,
'rows': h,
'cols': w
}
def apply_to_bbox(self, bbox, x_min=0, x_max=0, y_min=0, y_max=0, **params):
return crop_f.bbox_crop(
bbox, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, rows=params["rows"], cols=params["cols"]
)
def apply_to_keypoint(self, keypoint, x_min=0, x_max=0, y_min=0, y_max=0, **params):
return crop_f.crop_keypoint_by_coords(keypoint, crop_coords=(x_min, y_min, x_max, y_max))
@property
def targets_as_params(self):
return ['mask']
def get_transform_init_args_names(self):
return 'max_part_shift', 'min_part_shift', 'ignore_labels', 'min_crop_size'
class AlignCropNearInterestArea(DualTransform):
"""
Rotate image along the mask Crop bbox from image with random shift by x,y coordinates and
Args:
max_pad (float): float value in (0.0, 1.0) range. Default 0.3
min_pad (float): float value in (0.0, 1.0) range. Value must less or equal to the max_pad.
Default 0.0.
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Params:
interest_mask: binary mask where 1's label object of interest
Targets:
image
Image types:
uint8, float32
"""
def __init__(self, classes_of_interest, max_pad=0.3, min_pad=0, rotate_limit=0,
interpolation=cv2.INTER_LINEAR, always_apply=False, p=1.0):
super().__init__(always_apply, p)
assert max_pad >= min_pad, '`max_pad` must greater or equal to the `min_pad`'
self.classes_of_interest = classes_of_interest
self.max_pad = max_pad
self.min_pad = min_pad
self.rotate_limit = rotate_limit
self.interpolation = interpolation
def apply(self, img, angle=0, box=None, pad=None, **params):
result = rotate_and_crop_rectangle_safe(img, angle, box, pad, **params)
return result
def apply_to_keypoints(self, keypoints, angle=0, box=None, pad=None, **params):
target_keypoints = keypoints[:, :2]
meta_inf = keypoints[:, 2:]
new_keypoints = rotate_and_crop_keypoints_on_rectangle_safe(target_keypoints, angle, box, pad, **params)
return np.hstack([new_keypoints, meta_inf])
def get_params_dependent_on_targets(self, params):
mask = params['mask']
mask = np.isin(mask, self.classes_of_interest).astype('uint8')
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
h, w = mask.shape
if len(contours) != 0:
c = random.choice(contours)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect).astype('int')
vx, vy, x, y = cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01)
angle = atan2(vy, vx) / pi * 180
angle = angle + (random.random() * 2 - 1) * self.rotate_limit
shift_difference = self.max_pad - self.min_pad
h_max_pad_ratio = random.random() * shift_difference + self.min_pad
w_max_pad_ratio = random.random() * shift_difference + self.min_pad
h_min_pad_ratio = random.random() * shift_difference + self.min_pad
w_min_pad_ratio = random.random() * shift_difference + self.min_pad
else:
h_min_pad_ratio, w_max_pad_ratio, w_min_pad_ratio, h_max_pad_ratio = 0, 0, 0, 0
angle = 0
box = [[0, 0], [w, 0], [w, h], [0, h]]
pad = [h_min_pad_ratio, w_min_pad_ratio, h_max_pad_ratio, w_max_pad_ratio]
return {'angle': angle,
'box': box,
'pad': pad
}
@property
def targets_as_params(self):
return ['mask']
def get_transform_init_args_names(self):
return 'max_pad', 'min_pad', 'interpolation'
class CombineImagesAndCrop(DualTransform):
"""
Crops horizontal strip, shifts and merges with prev/next digit image
:param max_part_shift:
:param min_part_shift:
:param up: shifts up or down
"""
def __init__(self, max_part_shift=40, min_part_shift=1, up=True, **kwargs):
super().__init__(**kwargs)
self.max_part_shift = max_part_shift
self.min_part_shift = min_part_shift
self.up = up
def apply(self, img, prev_image, next_image, **params):
length = random.randint(self.min_part_shift, self.max_part_shift)
if self.up:
return np.vstack((img[length:, :, :], next_image[:length, :, :]))
else:
return np.vstack((prev_image[img.shape[0] - length:, :, :], img[:img.shape[0] - length, :, :]))
def get_params_dependent_on_targets(self, params):
return {
'prev_image': params['prev_image'],
'next_image': params['next_image'],
}
@property
def targets_as_params(self):
return ['prev_image', 'next_image']
class RelativeRandomCrop(DualTransform):
"""Crop a random part of the random size of the input.
Args:
h_min_max_ratio ((float, float)): height crop size limits. This ratios must be in range [0, 1].
w_min_max_ratio ((float, float)): width crop size limits. This ratios must be in range [0, 1].
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, h_min_max_ratio=(0, 1), w_min_max_ratio=(0, 1), interpolation=cv2.INTER_LINEAR,
always_apply=False, p=1.0):
super(RelativeRandomCrop, self).__init__(always_apply, p)
self.interpolation = interpolation
self.h_min_max_ratio = h_min_max_ratio
self.w_min_max_ratio = w_min_max_ratio
def apply(self, img, x_min=0, y_min=0, x_max=0, y_max=0, interpolation=cv2.INTER_LINEAR, **params):
return F.clamping_crop(img, x_min, y_min, x_max, y_max)
def apply_to_bbox(self, bbox, x_min=0, y_min=0, x_max=0, y_max=0, **params):
return F.bbox_crop(bbox, x_min, y_min, x_max, y_max, **params)
def get_params_dependent_on_targets(self, params):
image = params['image']
h, w = image.shape[:2]
h_min_ratio, h_max_ratio = self.h_min_max_ratio
w_min_ratio, w_max_ratio = self.w_min_max_ratio
crop_height = int(random.uniform(h_min_ratio, h_max_ratio) * h)
crop_width = int(random.uniform(w_min_ratio, w_max_ratio) * w)
y = random.randint(0, h - crop_height)
x = random.randint(0, w - crop_width)
return {'x_min': x,
'y_min': y,
'x_max': x + crop_width,
'y_max': y + crop_height
}
@property
def targets_as_params(self):
return ['image']
def get_transform_init_args_names(self):
return 'h_min_max_ratio', 'w_min_max_ratio', 'interpolation'
class ProportionalCenterCrop(DualTransform):
def __init__(self, p_height_range, p_width_range, always_apply=False, p=1.0):
super(ProportionalCenterCrop, self).__init__(always_apply, p)
self.p_height_range = p_height_range
self.p_width_range = p_width_range
def apply(self, img, height=0, width=0, **params):
return F.center_crop(img, height, width)
def apply_to_bbox(self, bbox, height=0, width=0, **params):
return F.bbox_random_crop(bbox, height, width, **params)
def apply_to_keypoint(self, keypoint, height=0, width=0, **params):
return F.keypoint_random_crop(keypoint, height, width, **params)
def get_params_dependent_on_targets(self, params):
image = params['image']
h, w = image.shape[:2]
crop_h = int(h * random.uniform(*self.p_height_range))
crop_w = int(w * random.uniform(*self.p_width_range))
return {'height': crop_h,
'width': crop_w}
def get_transform_init_args_names(self):
return 'p_height_range', 'p_width_range'
@property
def targets_as_params(self):
return ['image']
class ConditionalTranspose(DualTransform):
"""Transpose the input by swapping rows and columns if condition is True.
Args:
to_portrait (bool): transpose image to portrait if True else transpose to landscape
p (float): probability of applying the transform. Default: 0.5.
Targets:
image, mask, bboxes
Image types:
uint8, float32
"""
def __init__(self, to_portrait=True, always_apply=False, p=1.0):
super(ConditionalTranspose, self).__init__(always_apply, p)
self.to_portrait = to_portrait
def apply(self, img, apply_transpose=True, **params):
if apply_transpose:
img = F.transpose(img)
return img
def apply_to_bbox(self, bbox, apply_transpose=True, **params):
if apply_transpose:
bbox = F.bbox_transpose(bbox, 0, **params)
return bbox
def get_params_dependent_on_targets(self, params):
image = params['image']
h, w = image.shape[:2]
apply_transpose = h < w if self.to_portrait else h > w
return {'apply_transpose': apply_transpose}
def get_transform_init_args_names(self):
return 'to_portrait'
@property
def targets_as_params(self):
return ['image']
class RelativePadIfNeeded(DualTransform):
"""Pad side of the image to desired width / height ratio.
Args:
p (float): probability of applying the transform. Default: 1.0.
value (list of ints [r, g, b]): padding value if border_mode is cv2.BORDER_CONSTANT.
mask_value (int): padding value for mask if border_mode is cv2.BORDER_CONSTANT.
Targets:
image, mask, bbox, keypoints
Image types:
uint8, float32
"""
def __init__(self, w2h_ratio=1, border_mode=cv2.BORDER_REFLECT_101,
value=None, mask_value=None, always_apply=False, p=1.0):
super(RelativePadIfNeeded, self).__init__(always_apply, p)
self.w2h_ratio = w2h_ratio
self.border_mode = border_mode
self.value = value
self.mask_value = mask_value
def get_params_dependent_on_targets(self, params):
image = params['image']
height, width = image.shape[:2]
if width / height > self.w2h_ratio:
min_width = width
min_height = int(width / self.w2h_ratio)
else:
min_width = int(height * self.w2h_ratio)
min_height = height
if height < min_height:
h_pad_top = int((min_height - height) / 2.0)
h_pad_bottom = min_height - height - h_pad_top
else:
h_pad_top = 0
h_pad_bottom = 0
if width < min_width:
w_pad_left = int((min_width - width) / 2.0)
w_pad_right = min_width - width - w_pad_left
else:
w_pad_left = 0
w_pad_right = 0
params.update({'pad_top': h_pad_top,
'pad_bottom': h_pad_bottom,
'pad_left': w_pad_left,
'pad_right': w_pad_right})
return params
def apply(self, img, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params):
return F.pad_with_params(img, pad_top, pad_bottom, pad_left, pad_right,
border_mode=self.border_mode, value=self.value)
def apply_to_mask(self, img, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params):
return F.pad_with_params(img, pad_top, pad_bottom, pad_left, pad_right,
border_mode=self.border_mode, value=self.mask_value)
def apply_to_bbox(self, bbox, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, rows=0, cols=0, **params):
x_min, y_min, x_max, y_max = denormalize_bbox(bbox, rows, cols)
bbox = [x_min + pad_left, y_min + pad_top, x_max + pad_left, y_max + pad_top]
return normalize_bbox(bbox, rows + pad_top + pad_bottom, cols + pad_left + pad_right)
def apply_to_keypoint(self, keypoint, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params):
x, y, a, s = keypoint
return [x + pad_left, y + pad_top, a, s]
def get_transform_init_args_names(self):
return 'min_height', 'min_width', 'border_mode', 'value', 'mask_value'
@property
def targets_as_params(self):
return ['image']
class ProportionalRandomResizedCrop(DualTransform):
"""Crop a random part of the random size of the input with given proportions.
Args:
height (int): height after crop and resize.
width (int): width after crop and resize.
scale ((float, float)): range of size of the origin size cropped
ratio ((float, float)): range of image distortion with respect to the initial aspect ratio (which is set to 1).
value (list of ints [r, g, b]): padding value if border_mode is cv2.BORDER_CONSTANT.
mask_value (int): padding value for mask if border_mode is cv2.BORDER_CONSTANT.
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32"""
def __init__(self, height, width, scale=(0.08, 1.0), ratio=(1, 1), interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101, value=None, mask_value=None, p=1.0, always_apply=False):
super(ProportionalRandomResizedCrop, self).__init__(always_apply, p)
aspect_ratio = width / height
ratio = (aspect_ratio * ratio[0], aspect_ratio * ratio[1])
self.aug = Compose([RelativePadIfNeeded(aspect_ratio, border_mode, value, mask_value),
RandomResizedCrop(height, width, scale=scale, ratio=ratio, interpolation=interpolation)])
def __call__(self, *args, **kwargs):
return self.aug(*args, **kwargs)
def get_transform_init_args_names(self):
return 'height', 'width', 'scale', 'interpolation', 'border_mode', 'value', 'mask_value'
class FlipAndConcatTransform(DualTransform):
"""Vertically flip initial image and concatenate flipped with original one.
Works only for image and masks.
Args:
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, masks
Image types:
uint8, float32
"""
def __init__(self, p=1., always_apply=True):
super().__init__(always_apply, p)
self.hflip = VerticalFlip(always_apply=True)
def __call__(self, *args, **kwargs):
new_kwargs = self.hflip(*args, **kwargs)
res = {}
for key, arg in new_kwargs.items():
if arg is None:
res[key] = None
continue
key_type = self._additional_targets[key] if key in self._additional_targets else key
if key_type in ['image', 'mask']:
res[key] = np.concatenate([kwargs[key], arg], axis=1)
elif key_type in ['masks']:
res[key] = [np.concatenate([mask, new_mask], axis=1)
for mask, new_mask in zip(kwargs[key], arg)]
return res
def add_targets(self, additional_targets):
super().add_targets(additional_targets)
self.hflip._additional_targets = additional_targets
@property
def targets(self):
return {
"image": self.apply,
"mask": self.apply_to_mask,
"masks": self.apply_to_masks
}
class DeterminedFlip(DualTransform):
"""Flip the input either horizontally, vertically or both horizontally and vertically.
Args:
direction (int): code that specifies how to flip the input. 0 for vertical flipping, 1 for horizontal flipping,
-1 for both vertical and horizontal flipping (which is also could be seen as rotating the input by
180 degrees).
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, direction, always_apply=False):
super().__init__(always_apply, p=1)
self.d = direction
def apply(self, img, d=0, **params):
return F.random_flip(img, d)
def get_params(self):
return {"d": self.d}
def apply_to_bbox(self, bbox, **params):
return F.bbox_flip(bbox, **params)
def apply_to_keypoints(self, keypoints, **params):
return keypoints_flip(keypoints, **params)
def get_transform_init_args_names(self):
return ()
class DeterminedRotate90(DualTransform):
"""Randomly rotate the input by 90 degrees zero or more times.
Args:
p (float): probability of applying the transform. Default: 0.5.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, factor=0, always_apply=False):
super().__init__(always_apply, p=1)
self.factor = factor
def apply(self, img, factor=0, **params):
"""
Args:
factor (int): number of times the input will be rotated by 90 degrees.
"""
return np.ascontiguousarray(np.rot90(img, factor))
def get_params(self):
# Random int in the range [0, 3]
return {"factor": self.factor}
def apply_to_bbox(self, bbox, factor=0, **params):
return F.bbox_rot90(bbox, factor, **params)
def apply_to_keypoint(self, keypoint, factor=0, **params):
return F.keypoint_rot90(keypoint, factor, **params)
def get_transform_init_args_names(self):
return ()
|
import json
import pytest
import time
from delphi.apps.rest_api import create_app, db
from delphi.apps.rest_api.models import DelphiModel, CauseMosAsyncExperimentResult, ExperimentResult
@pytest.fixture(scope="module")
def app():
app = create_app(debug=True)
app.testing = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
with app.app_context():
#db.create_all()
yield app
#db.drop_all()
@pytest.fixture(scope="module")
def client(app):
"""A test client for the app."""
return app.test_client()
def create_model(client, requrst_json_fle, model_no):
print(f'\n\nCreating model {model_no}\n--------------')
with open(
requrst_json_fle, encoding="utf-8"
) as f:
data = json.load(f)
model_id = data['id']
print(f'\nmodel {model_no} id: ', model_id)
rv = client.post(f"/delphi/create-model", json=data)
print(f'\ncreate-model {model_no} response:\n', rv.get_json())
rv = client.get(f"/delphi/models/{model_id}")
status = rv.get_json()['status']
print(f'\nmodel {model_no} status initial response:\n', rv.get_json())
if status == 'invalid model id':
print(f'\nGet Model {model_no} Status Error: Invalid model id!!')
assert False
return
elif status == 'server error: training':
print(f'\nModel {model_no} - Server error: training process cannot be forked!!')
assert False
return
count = 1
while status == 'training':
rv = client.get(f"/delphi/models/{model_id}")
status = rv.get_json()['status']
print('\n\t', '--'*count, status)
count += 1
time.sleep(5)
print(f'\nmodel {model_no} status final response:\n', rv.get_json())
return model_id
def create_experiment(client, request_json_fle, model_id, experiment_no):
print(f'\n\nCreating experiment {experiment_no}\n----------------------')
with open(
request_json_fle, encoding="utf-8"
) as f:
data = json.load(f)
rv = client.post(f"/delphi/models/{model_id}/experiments", json=data)
experiment_id = rv.get_json()["experimentId"]
print(f'\nexperiment {experiment_no} id: ', experiment_id)
if experiment_id == 'invalid model id':
print(f'\nCreate Experiment {experiment_no} Error: Invalid model id!!')
assert False
return
elif experiment_id == 'model not trained':
print(f'\nCreate Experiment {experiment_no} Error: Model not trained. Cannot run experiment!!')
assert False
return
return rv
def get_experiment_results(client, model_id, experiment_id):
rv = client.get(f"/delphi/models/{model_id}/experiments/{experiment_id}")
status = rv.get_json()["status"]
if status == 'invalid experiment id':
print(f'\n\nGet Experiment {experiment_id} Results Error: Invalid experiment id!!')
assert False
return
status = "in progress"
count = 1
while status == "in progress":
rv = client.get(f"/delphi/models/{model_id}/experiments/{experiment_id}")
status = rv.get_json()["status"]
print('\n\t', '--'*count, status)
count += 1
time.sleep(1)
status = rv.get_json()["status"]
print(f'\nexperiment {experiment_id} final status: ', status)
return rv
def test_createModel_and_createExperiment(client):
# Test create Model
model_id1 = create_model(client, "tests/data/delphi/create_model_input_2.json", 1)
# Test create Experiment
rv = create_experiment(client, "tests/data/delphi/experiments_projection_input_2.json",
model_id1, 1)
experiment_id1 = rv.get_json()["experimentId"]
# Test get Experiment Results
rv11 = get_experiment_results(client, model_id1, experiment_id1)
time.sleep(1)
# Test create Experiment for a second time
rv = create_experiment(client, "tests/data/delphi/experiments_projection_input_2.json", model_id1, 2)
experiment_id2 = rv.get_json()["experimentId"]
get_experiment_results(client, model_id1, experiment_id2)
# Request results for a previous experiment. The results we got earlier and
# the results we are getting now must be identical.
print('\n\nRetrieving experiment 1 results again\n-------------------------------------')
rv12 = get_experiment_results(client, model_id1, experiment_id1)
assert rv11.get_json() == rv12.get_json()
status = rv12.get_json()["status"]
print('\nexperiment 1 final status 2nd retrieval: ', status)
# Test create another Model
model_id2 = create_model(client, "tests/data/delphi/create_model_input_1.json", 2)
# Delete the rows added to the database by testing code
CauseMosAsyncExperimentResult.query.filter_by(id=experiment_id1).delete()
CauseMosAsyncExperimentResult.query.filter_by(id=experiment_id2).delete()
ExperimentResult.query.filter_by(id=experiment_id1).delete()
ExperimentResult.query.filter_by(id=experiment_id2).delete()
DelphiModel.query.filter_by(id=model_id1).delete()
DelphiModel.query.filter_by(id=model_id2).delete()
db.session.commit()
assert True
|
<gh_stars>1-10
# multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
import random
import util
from game import Agent, AgentState, Directions, GameStateData
from pacman import GameState
from util import manhattanDistance
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
if 'Stop' in legalMoves:
legalMoves.remove('Stop')
# Choose one of the best actions
scores = [self.evaluationFunction(
gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(
len(scores)) if scores[index] == bestScore]
# Pick randomly among the best
chosenIndex = random.choice(bestIndices)
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState: GameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
future_ghost_score = self.get_ghost_score(successorGameState)
best_food = self.get_best_food(currentGameState)
my_pos = currentGameState.getPacmanPosition()
my_future_pos = successorGameState.getPacmanPosition()
score = 0
score += future_ghost_score
if util.manhattanDistance(my_pos, best_food) < util.manhattanDistance(my_future_pos, best_food):
score -= 10
else:
score += 10
return score
def get_ghost_score(self, gameState: GameState):
score = 0
closest_capsule = self.get_closest_capsule(gameState)
closest_ghost = self.get_closest_ghost(gameState)
my_pos = gameState.getPacmanPosition()
dist_ghost = util.manhattanDistance(
my_pos, closest_ghost.getPosition())
scared_timer = closest_ghost.scaredTimer
if dist_ghost + scared_timer < 3:
if closest_capsule:
dist_capsule = util.manhattanDistance(my_pos, closest_capsule)
if dist_capsule < 2:
score += 10
else:
score = -100
else:
score = -100
elif closest_capsule:
dist_capsule = util.manhattanDistance(my_pos, closest_capsule)
if dist_capsule < 2:
score += 10
return score
def get_best_foods_from_ghost(self, gameState: GameState, ghost: AgentState):
food_positions = gameState.getFood().asList()
ghost_distances = [manhattanDistance(ghost.getPosition(), food)
for food in food_positions]
my_pos = gameState.getPacmanPosition()
my_distances = [manhattanDistance(my_pos, food)
for food in food_positions]
best_pos = None
best_dist = 1000
for i, food_pos in enumerate(food_positions):
if best_dist == 1000:
best_pos = food_pos
best_dist = my_distances[i]
elif my_distances[i] < ghost_distances[i] + ghost.scaredTimer:
if best_dist > my_distances[i]:
best_pos = food_pos
best_dist = my_distances[i]
return best_pos
def get_best_food(self, gameState: GameState) -> tuple:
ghosts = gameState.getGhostStates()
my_pos = gameState.getPacmanPosition()
best_foods = [self.get_best_foods_from_ghost(gameState, ghost)
for ghost in ghosts]
closest_bests = min(best_foods,
key=lambda x: util.manhattanDistance(my_pos, x))
return closest_bests
def get_closest_capsule(self, gameState: GameState):
capsule_positions = gameState.getCapsules()
my_pos = gameState.getPacmanPosition()
distances = [manhattanDistance(my_pos, capsule)
for capsule in capsule_positions]
if capsule_positions:
return capsule_positions[distances.index(min(distances))]
return None
def get_closest_ghost(self, gameState: GameState) -> AgentState:
ghost_states = gameState.getGhostStates()
my_pos = gameState.getPacmanPosition()
distances = [manhattanDistance(my_pos,
ghost.getPosition()) for ghost in ghost_states]
return ghost_states[distances.index(min(distances))]
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn='scoreEvaluationFunction', depth='2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def get_minimax_score(self, gameState: GameState, agentIndex, depth):
if depth == 0 or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
if agentIndex == 0:
return self.max_value(gameState, depth)[0]
else:
return self.min_value(gameState, agentIndex, depth)
def max_value(self, gameState, depth):
v = -float("inf")
max_action = None
for action in [x for x in gameState.getLegalActions(0) if x != "Stop"]:
successor = gameState.generateSuccessor(0, action)
res = self.get_minimax_score(successor, 1, depth)
if v < res:
v = res
max_action = action
return v, max_action
def min_value(self, gameState, agentIndex, depth):
v = float("inf")
for action in [x for x in gameState.getLegalActions(agentIndex) if x != "Stop"]:
successor = gameState.generateSuccessor(agentIndex, action)
if agentIndex == gameState.getNumAgents() - 1:
v = min(v, self.get_minimax_score(successor, 0, depth - 1))
else:
v = min(v, self.min_value(successor, agentIndex + 1, depth))
return v
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
"*** YOUR CODE HERE ***"
action = self.max_value(gameState, self.depth)[1]
return action
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def get_minimax_score(self, gameState: GameState, agentIndex, depth, alpha, beta):
if depth == 0 or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
if agentIndex == 0:
return self.max_value(gameState, depth, alpha, beta)[0]
else:
return self.min_value(gameState, agentIndex, depth, alpha, beta)
def max_value(self, gameState, depth, alpha, beta):
v = -float("inf")
max_action = None
actions = gameState.getLegalActions(0)
if not actions or gameState.isWin():
return self.evaluationFunction(gameState), Directions.STOP
for action in actions:
successor = gameState.generateSuccessor(0, action)
res = self.get_minimax_score(successor, 1, depth, alpha, beta)
if v < res:
v = res
max_action = action
if v > beta:
return v, max_action
alpha = max(alpha, v)
return v, max_action
def min_value(self, gameState, agentIndex, depth, alpha, beta):
v = float("inf")
actions = gameState.getLegalActions(agentIndex)
if not actions or gameState.isWin():
return self.evaluationFunction(gameState)
for action in actions:
successor = gameState.generateSuccessor(agentIndex, action)
if agentIndex == gameState.getNumAgents() - 1:
v = min(v, self.get_minimax_score(successor, 0,
depth - 1, alpha, beta))
else:
v = min(v, self.min_value(successor, agentIndex + 1,
depth, alpha, beta))
if v < alpha:
return v
beta = min(beta, v)
return v
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
return self.max_value(gameState, self.depth,
-float("inf"), float("inf"))[1]
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def get_expectimax_score(self, gameState: GameState, agentIndex, depth):
if depth == 0 or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
if agentIndex == 0:
return self.max_value(gameState, depth)[0]
else:
return self.exp_value(gameState, agentIndex, depth)
def max_value(self, gameState, depth):
v = -float("inf")
max_action = None
for action in [x for x in gameState.getLegalActions(0) if x != "Stop"]:
successor = gameState.generateSuccessor(0, action)
res = self.get_expectimax_score(successor, 1, depth)
if v < res:
v = res
max_action = action
return v, max_action
def exp_value(self, gameState, agentIndex, depth):
v = 0
actions = [x for x in gameState.getLegalActions(
agentIndex) if x != "Stop"]
if not actions or gameState.isWin():
return self.evaluationFunction(gameState)
for action in actions:
successor = gameState.generateSuccessor(agentIndex, action)
if agentIndex == gameState.getNumAgents() - 1:
v += self.get_expectimax_score(successor, 0, depth - 1)
else:
v += self.exp_value(successor, agentIndex + 1, depth)
return v / len(actions)
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
return self.max_value(gameState, self.depth)[1]
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
# Abbreviation
better = betterEvaluationFunction
|
<filename>operators_gen2/utils/dtype_converter/script.py
# For more information about the Python3Operator, drag it to the graph canvas,
# right click on it, and click on "Open Documentation".
# To uncomment the snippets below you can highlight the relevant lines and
# press Ctrl+/ on Windows and Linux or Cmd+/ on Mac.
# # Basic Example 1: Count inputs so far and send on output port (port type
# com.sap.core.string)
# # When using the snippet below make sure you create an output port of type
# #string
# counter = 0
#
# def on_input(msg_id, header, body):
# global counter
# counter += 1
# api.outputs.output.publish(str(counter))
#
# api.set_port_callback("input", on_input)
# # Basic Example 2: Read incoming table as stream and send it
# as stream as well to the output port (any table port type),
# # When using the snippet below make sure you create an input and output
# # port of table type
#
# chunk_size = 10
#
# # Since this is run in a different thread, exceptions on it will not
# # trigger any action. Alternatives are using `api.propagate_exception
# # or sending through a queue to the operator main thread (the callback one).
# def process_batch(body):
# try:
# reader = body.get_reader()
# # This allows creating one output stream per thread,
# # each being able to send data in parallel.
# msg_id, writer = api.outputs.output.with_writer()
# while True:
# table = reader.read(chunk_size)
# # When the stream is closed, len(table) < expected.
# # If -1 was passed, read would wait for stream to close
# if len(table) <= 0:
# api.logger.info('End of table')
# break
#
# writer.write(table)
# writer.close()
# except Exception as ex:
# api.propagate_exception(ex)
#
# def on_input(msg_id, header, body):
# # Since each input thriggers a thread, it is possible to have
# # multiple actions happening in parallel.
# threading.Thread(target=process_batch, args=[body]).start()
#
# api.set_port_callback("input", on_input)
# # Basic Example 3: State Management support, more details at the operator
# # documentation.
# # When using the snippet below make sure you create input and output ports
# # of the com.sap.core.string type.
# import pickle
#
# # Internal operator state
# acc = 0
#
# def on_input(msg_id, header, body):
# global acc
# v = int(body.get())
# acc += v
# api.outputs.output.publish("%d: %d" % (v, acc))
#
# api.set_port_callback("input", on_input)
#
# # It is required to have `is_stateful` set, but since this operator
# # script does not define a generator no information about output port is passed.
# # More details in the operator documentation.
#
# api.set_initial_snapshot_info(api.InitialProcessInfo(is_stateful=True))
#
# def serialize(epoch):
# return pickle.dumps(acc)
#
# api.set_serialize_callback(serialize)
#
# def restore(epoch, state_bytes):
# global acc
# acc = pickle.loads(state_bytes)
#
# api.set_restore_callback(restore)
#
# def complete_callback(epoch):
# api.logger.info(f"epoch {epoch} is completed!!!")
#
# api.set_epoch_complete_callback(complete_callback)
#
# # Prestart
# # When using the snippet below make sure you create an output port of type
# # int64
# counter = 0
#
# def gen():
# global counter
# for i in range(0, 3):
# api.outputs.output.publish(counter)
# counter += 1
#
# api.set_prestart(gen)
# # Timer
# # When using the snippet below make sure you create an output port of type
# # binary
# import os
#
# # Function called when operator handling mode is set to `retry`
# # (more details at the operator documentation)
# def custom_response_callback(msg_id, ex):
# if ex:
# api.logger.error("Error when publishing %s: %s" % (str(msg_id), str(ex)))
#
#
# def time_callback():
# dummy_binary = io.BytesIO(os.urandom(20))
# dummy_header = {"foo": ["bar"]}
# # Send all binary data at once to the output, if only the first
# # 10 bytes were to be sent, `n` = 10
# msg_id = api.outputs.output.publish(dummy_binary, -1,
# header=dummy_header,
# response_callback=custom_response_callback)
# # Controls the time until the next call to time_callback
# return 1
#
# api.add_timer(time_callback)
# # Shutdown
# counter = 0
#
# def on_input(msg_id, header, body):
# global counter
# counter += 1
#
# api.set_port_callback("input", on_input)
#
# def shutdown1():
# print("shutdown1: %d" % counter)
#
# api.set_shutdown(shutdown2)
|
import pandas as pd
from .utils import (
is_series, is_dataframe, is_categorical, is_numeric,
contains_categorical, contains_only_categorical, is_temporal)
def get_applicable_methods(data=None):
"""Informs about the imputation methods that are applicable to a given
data frame or series, based on the number of variables (one or
multiple), type of data (categorical, numerical, o both), and whether
the data is of temporal nature.
:param data: The data for which an the applicable imputation method
should be returned.
:type data: pandas.Series or pandas.DataFrame
:return: The imputation methods that are applicable to the data
:rtype: set of strings
:raises: TypeError
"""
# Check that data is a Series or Dataframe:
if not (isinstance(data, pd.Series) or isinstance(data, pd.DataFrame)):
raise TypeError(
'The data has to be a Series or DataFrame but is a {}.'.format(
type(data).__name__))
# Definition of sets:
applicable_to_cat_only = {
'logistic regression imputation'
}
applicable_to_num_only = {
'mean substitution',
'median substitution',
'random value imputation',
'linear regression',
'stochastic regression',
'imputation using k-NN',
'interpolation',
'interpolation with seasonal adjustment'
}
applicable_to_cat_and_num = {
'listwise deletion',
'pairwise deletion',
'variable deletion',
'random sample imputation',
'random hot-deck imputation',
'most-frequent substitution',
'constant value substitution',
'srmi',
'mice',
'LOCF',
'NOCB'
}
applicable_to_cat = applicable_to_cat_only.union(applicable_to_cat_and_num)
applicable_to_num = applicable_to_num_only.union(applicable_to_cat_and_num)
all_methods = applicable_to_cat.union(applicable_to_num)
requires_temp = {
'LOCF',
'NOCB',
'interpolation',
'interpolation with seasonal adjustment'
}
does_not_require_temp = all_methods - requires_temp
applicable_to_series = {
'listwise deletion',
'random sample imputation',
'most-frequent substitution',
'constant value substitution',
'LOCF',
'NOCB',
'mean substitution',
'median substitution',
'random value imputation',
'interpolation',
'interpolation with seasonal adjustment'
}
# Applicability:
res = set()
if is_series(data):
if is_categorical(data):
if is_temporal(data):
res = applicable_to_series.intersection(
applicable_to_cat
)
else:
res = applicable_to_series.intersection(
applicable_to_cat.intersection(does_not_require_temp)
)
else:
if is_temporal(data):
res = applicable_to_series.intersection(
applicable_to_num
)
else:
res = applicable_to_series.intersection(
applicable_to_num.intersection(does_not_require_temp)
)
if is_dataframe(data):
if contains_only_categorical(data):
if is_temporal(data):
res = applicable_to_cat
else:
res = applicable_to_cat.intersection(does_not_require_temp)
elif contains_categorical(data):
if is_temporal(data):
res = applicable_to_cat_and_num
else:
res = applicable_to_cat_and_num.intersection(
does_not_require_temp)
else:
if is_temporal(data):
res = applicable_to_num
else:
res = applicable_to_num.intersection(does_not_require_temp)
# Return set with applicable methods:
return res
|
import numpy as np
import os, sys, glob
#from sklearn.externals import joblib
import joblib
"""
Contains The LSQ generation algorithm that returns a list of permutations to build a Latin Square.
Also contains methods to transform these lsqs to a representation that can be used for the
belief propagation algorithm of lattice codes.
As well as a save/load methods to allow the pre-generation of the lsq matrices, as the algorithm
to generate a lsq can be somewhat expensive to compute.
Save path follow the pattern:
default_path/d{weight}/{size}/seed{seedvalue}.joblib
"""
default_path = 'C:/Coding/PythonFuzzyOutputs/LsqLattices'
valSeq3 = [1, 0.57735, 0.57735]
valseq5 = [1, 0.4472135955, 0.4472135955, 0.4472135955, 0.4472135955]
class LatticeLSQ:
"""A Class to contain all the methods related to generating Latin Square Lattices for
use as Low Density Lattice Codes parity-check matrices"""
def __init__(self, n, d, seed=None, default_save_path=default_path):
self.n = n
self.d = d
if seed==None:
self.seed = np.random.seed()
else:
self.seed = seed
file_dir = default_path+'/'+f'd{self.d}' + '/' + str(self.n)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_name = f'd{self.d}' + '/' + str(self.n)+'/'+f'seed_{self.seed}'
file = default_path+'/'+file_name+'.joblib'
if os.path.exists(file):
self.load_lsq(file_name)
else:
tuple = self.generate_latin_square_encoding(self.n, self.d, self.seed)
self.permutations = tuple[0]
self.signs = tuple[1]
self.save_lsq(file_name)
pass
def gen_lsq_value_sequence_1(self,d):
seq = []
seq.append(1)
for i in range(d-1):
seq.append( (d**(-0.5)) )
return seq
def save_lsq(self, name):
file = default_path+'/'+name+'.joblib'
joblib.dump(self, file)
def load_lsq(self, name):
file = default_path+'/'+name+'.joblib'
if os.path.exists(file):
# Load it with joblib
#if VERBOSE: print('Loading', file)
gcbObj = joblib.load(file)
self.n = gcbObj.n
self.d = gcbObj.d
self.seed = gcbObj.seed
self.permutations = gcbObj.permutations
self.signs = gcbObj.signs
return True
else:
return False
""" n is the size of the matrix, d in the number of non-zero elements in each row/column"""
def generate_latin_square_encoding (self, n, d, seed=None):
int_seed = None
if (seed == None):
int_seed = np.random.seed()
else:
int_seed = seed
#output = []
generator = np.random.RandomState(int_seed) #Set a random generator, with seed
permutationMat = np.empty(shape=(d, n)) #Generate a placeholder matrix
for i in range(d): #Filling the matrix up with permutations
permutationMat[i] = generator.permutation(n)
#Removing Loops from matrix...
c = 0
loopless_columns = 0
while (loopless_columns < n):
changed_perm = -1
twoLoops = False
fourLoops = False
#searching for twoLoops
for i in range (d):
for j in range(i+1, d):
if ( permutationMat[i,c] == permutationMat[j,c] ):
#2-loop was found
changed_perm = i
twoLoops = True
break
break
#If no 2-loops are found, search for 4-loops
if (twoLoops == False):
col = permutationMat[:,c]
for c0 in range(n):
if (c0 != c):
#col = permutationMat[:,c0]
comparedCols = np.in1d(col, permutationMat[:,c0])#.reshape(col.shape)
#print(comparedCols)
if (np.count_nonzero(comparedCols) > 1):
changed_perm = np.where(comparedCols)[0][0]
break
if (changed_perm != -1) :
index = c
while (index == c):
index = generator.randint(0,n)
temp = permutationMat[changed_perm, c]
permutationMat[changed_perm, c] = permutationMat[changed_perm, index]
permutationMat[changed_perm, index] = temp
loopless_columns = 0
else :
#No loop found at column c
loopless_columns = loopless_columns + 1
c = (c + 1) % n
#print(permutationMat)
#output.append(permutationMat)
#Generating Sign Matrix
signMat = np.empty(shape=(d, n))
for i in range(d):
signMat[i] = generator.randint(2, size=n)*2 - 1
#output.append(signMat)
return (permutationMat, signMat)
def generate_matrix(self, value_sequence=None):
seq = None
if value_sequence==None:
seq = self.gen_lsq_value_sequence_1(self.d)
elif len(value_sequence) != self.d:
return False
else:
seq = value_sequence
latSquare = np.zeros((self.n, self.n))
for i in range(self.n):
for j in range(self.d):
rowVal = int(self.permutations[j,i])
latSquare[rowVal ,i] = seq[j] * self.signs[j,i]
return latSquare
"""
This method return a triple (shape, A_row, A_col) that can be used in the belief_propagation algorithm.
For Example:
A_row = [ row1, row2, ..., row_n ]
row[i] = [ index_1, index_2, ..., index_d ],
with index_i = (j, k) such that M[i,j] = k
A_col will be defined similarly.
"""
def get_sparse_rep(self, value_sequence=None):
seq = None
if value_sequence==None:
seq = self.gen_lsq_value_sequence_1(self.d)
elif len(value_sequence) != self.d:
return False
else:
seq = value_sequence
shape = (self.n, self.n)
A_col = []
for i in range(self.n):
col_i = []
for j in range(self.d):
col_i.append( (int(self.permutations[j,i]), self.signs[j,i]*seq[j]) )
col_i.sort()
A_col.append(col_i)
A_row = []
for i in range(self.n):
A_row.append([])
for i in range(self.n):
for p in A_col[i]:
A_row[p[0]].append( (i, p[1]))
return (shape, A_row, A_col)
def generate_many_encoding_for_tests( n_list, d_list, seed_low, seed_high):
for d in d_list:
for n in n_list:
print(f'd = {d}, n = {n}')
for seed in range(seed_low, seed_high+1):
print(seed, end=' ')
new_lsq = LatticeLSQ(n, d, seed)
print('')
print('Done!')
def test_determinant( n, d, beta=1):
lsq = LatticeLSQ(n, d, 0)
scaled_seq = []
for i in range(d):
scaled_seq.append( valSeq3[i]/beta )
h = lsq.generate_matrix( scaled_seq )
g = np.linalg.inv(h)
det_g = np.linalg.det(g)
print( 'det(H) = ', np.linalg.det(h) )
print( 'det(G) = ', det_g )
sigma = ( det_g**(2.0/n) )/(2*math.e*math.pi)
print( 'Var Bound = ', sigma )
def test_determinant_var( n, d, sigma2):
lsq = LatticeLSQ(n, d, 0)
#det_g_orig = np.linalg.det( np.linalg.inv( lsq.generate_matrix( valSeq3 ) ) )
det_g_orig = np.linalg.det( np.linalg.inv( lsq.generate_matrix( valSeq5 ) ) )
#scaling_factor = np.sqrt( 2*math.e*math.pi * sigma2 )
scaling_factor = np.sqrt( sigma2 / 0.0585 ) / (det_g_orig**(1.0/n) )
scaled_seq = []
if (d == 3):
valseq = valSeq3
elif(d == 5):
valseq = valSeq5
for i in range(d):
scaled_seq.append( valSeq[i] / scaling_factor )
h = lsq.generate_matrix( scaled_seq )
g = np.linalg.inv(h)
det_g = np.linalg.det(g)
print( 'det(H) = ', np.linalg.det(h) )
print( 'det(G) = ', det_g )
sigma = ( det_g**(2.0/n) )/(2*math.e*math.pi)
print( 'Var Bound = ', sigma )
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.db import models
from django.utils.html import escape
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from rapidsms.models import ExtensibleModelBase
class Point(models.Model):
"""
This model represents an anonymous point on the globe. It should be
replaced with something from GeoDjango soon, but I can't seem to get
Spatialite to build right now...
"""
latitude = models.DecimalField(max_digits=13, decimal_places=10)
longitude = models.DecimalField(max_digits=13, decimal_places=10)
def __unicode__(self):
return "%s, %s" % (self.latitude, self.longitude)
def __repr__(self):
return '<%s: %s>' %\
(type(self).__name__, self)
class LocationType(models.Model):
"""
This model represents the 'type' of Location, as an option for a
simpler way of having a location heirarchy without having different
classes for each location type (as is supported by the generic
relation to parent).
"""
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True, primary_key=True)
def __unicode__(self):
return self.name
class Location(models.Model):
"""
This model represents a named point on the globe. It is deliberately
spartan, so more specific apps can extend it with their own fields
and relationships without clashing with built-in functionality.
"""
__metaclass__ = ExtensibleModelBase
point = models.ForeignKey(Point, null=True, blank=True)
type = models.ForeignKey(LocationType, related_name="locations",
blank=True, null=True)
parent_type = models.ForeignKey(ContentType, null=True, blank=True)
parent_id = models.PositiveIntegerField(null=True, blank=True)
parent = generic.GenericForeignKey("parent_type", "parent_id")
# choices for the Location.direction method.
# (values stolen from label-overlay.js)
class Direction:
CENTER = "center"
ABOVE = "above"
RIGHT = "right"
BELOW = "below"
LEFT = "left"
def __unicode__(self):
"""
"""
return getattr(self, "name", "#%d" % self.pk)
@property
def uid(self):
"""
Return a unique ID for this location, suitable for embedding in
URLs. The primary key is insufficient, because the name of the
model must also be included.
This method (and ``get_for_uid`` will go away, once the ``slug``
field is validated as unique across all Location subclasses.
"""
return "%s:%d" % (self.content_type, self.pk)
@property
def content_type(self):
return ContentType.objects.get_for_model(self).model
@staticmethod
def get_for_uid(uid):
"""
Return the object (an instance of a subclass of Location) named
by ``uid``. The UID should be in the form ``model:id``, as
returned by the Location.uid property.
"""
model, pk = uid.split(":")
type = ContentType.objects.get(model=model)
return type.get_object_for_this_type(pk=pk)
@staticmethod
def subclasses():
"""
Return a list of all known subclasses of Location.
"""
return [
cls
for cls in models.loading.get_models()
if issubclass(cls, Location) and\
(cls is not Location)]
@property
def path(self):
next = self
locations = []
while next is not None:
locations.insert(0, next)
next = next.parent
return locations
def as_html(self):
"""
Return the HTML fragment to be embedded in the map. This method
should be overridden by subclasses wishing to fully customize
the the rendering of their instance in the map.
The output of this method is not escaped before being included
in the template, so be careful to escape it yourself.
"""
return escape(self.label)
@property
def label(self):
"""
Return the caption for this Location, to be embedded in the
map. This method should be overridden by subclasses wishing to
provide better contextual information.
The output of this method is included in the template as-is, so
is HTML-escaped by default. If you wish to customize the HTML,
override the ``as_html`` method, instead.
"""
return unicode(self)
@property
def css_class(self):
"""
Return the CSS class name of the label overlay. This method
should be overriden by subclasses wishing to customize the
appearance of the embedded HTML fragment.
"""
return "bubble"
@property
def direction(self):
"""
Return the direction which the embedded HTML fragment should be
offset from the anchor point. Return one of Location.Direction.
"""
return self.Direction.ABOVE
#class Country(Location):
# name = models.CharField(max_length=100)
# iso_code = models.CharField("ISO Code", max_length=2)
# class Meta:
# verbose_name_plural = "countries"
# @property
# def label(self):
# return self.iso_code.upper()
#class State(Location):
# name = models.CharField(max_length=100)
# usps_code = models.CharField("USPS Code", max_length=2,
# help_text="The two-letter state abbreviation")
# @property
# def label(self):
# return self.usps_code.upper()
#class City(Location):
# name = models.CharField(max_length=100)
# class Meta:
# verbose_name_plural = "cities"
|
<gh_stars>1-10
__author__ = 'max'
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..nn import ChainCRF, VarMaskedGRU, VarMaskedRNN, VarMaskedLSTM
from ..nn import Embedding
from ..nn import utils
class BiRecurrentConv(nn.Module):
def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,
tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.5, p_rnn=(0.5, 0.5), initializer=None):
super(BiRecurrentConv, self).__init__()
self.word_embedd = Embedding(num_words, word_dim, init_embedding=embedd_word)
self.char_embedd = Embedding(num_chars, char_dim, init_embedding=embedd_char)
self.conv1d = nn.Conv1d(char_dim, num_filters, kernel_size, padding=kernel_size - 1)
# dropout word
self.dropout_in = nn.Dropout2d(p=p_in)
# standard dropout
self.dropout_rnn_in = nn.Dropout(p=p_rnn[0])
self.dropout_out = nn.Dropout(p_out)
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
self.rnn = RNN(word_dim + num_filters, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=True, dropout=p_rnn[1])
self.dense = None
out_dim = hidden_size * 2
if tag_space:
self.dense = nn.Linear(out_dim, tag_space)
out_dim = tag_space
self.dense_softmax = nn.Linear(out_dim, num_labels)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.nll_loss = nn.NLLLoss(size_average=False, reduce=False)
self.initializer = initializer
self.reset_parameters()
def reset_parameters(self):
if self.initializer is None:
return
for name, parameter in self.named_parameters():
if name.find('embedd') == -1:
if parameter.dim() == 1:
parameter.data.zero_()
else:
self.initializer(parameter.data)
def _get_rnn_output(self, input_word, input_char, mask=None, length=None, hx=None):
# hack length from mask
# we do not hack mask from length for special reasons.
# Thus, always provide mask if it is necessary.
if length is None and mask is not None:
length = mask.data.sum(dim=1).long()
# [batch, length, word_dim]
word = self.word_embedd(input_word)
# [batch, length, char_length, char_dim]
char = self.char_embedd(input_char)
char_size = char.size()
# first transform to [batch *length, char_length, char_dim]
# then transpose to [batch * length, char_dim, char_length]
char = char.view(char_size[0] * char_size[1], char_size[2], char_size[3]).transpose(1, 2)
# put into cnn [batch*length, char_filters, char_length]
# then put into maxpooling [batch * length, char_filters]
char, _ = self.conv1d(char).max(dim=2)
# reshape to [batch, length, char_filters]
char = torch.tanh(char).view(char_size[0], char_size[1], -1)
# apply dropout word on input
word = self.dropout_in(word)
char = self.dropout_in(char)
# concatenate word and char [batch, length, word_dim+char_filter]
input = torch.cat([word, char], dim=2)
# apply dropout rnn input
input = self.dropout_rnn_in(input)
# prepare packed_sequence
if length is not None:
seq_input, hx, rev_order, mask = utils.prepare_rnn_seq(input, length, hx=hx, masks=mask, batch_first=True)
seq_output, hn = self.rnn(seq_input, hx=hx)
output, hn = utils.recover_rnn_seq(seq_output, rev_order, hx=hn, batch_first=True)
else:
# output from rnn [batch, length, hidden_size]
output, hn = self.rnn(input, hx=hx)
# apply dropout for the output of rnn
output = self.dropout_out(output)
if self.dense is not None:
# [batch, length, tag_space]
output = self.dropout_out(F.elu(self.dense(output)))
return output, hn, mask, length
def forward(self, input_word, input_char, mask=None, length=None, hx=None):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
return output, mask, length
def loss(self, input_word, input_char, target, mask=None, length=None, hx=None, leading_symbolic=0):
# [batch, length, tag_space]
output, mask, length = self.forward(input_word, input_char, mask=mask, length=length, hx=hx)
# [batch, length, num_labels]
output = self.dense_softmax(output)
# preds = [batch, length]
_, preds = torch.max(output[:, :, leading_symbolic:], dim=2)
preds += leading_symbolic
output_size = output.size()
# [batch * length, num_labels]
output_size = (output_size[0] * output_size[1], output_size[2])
output = output.view(output_size)
if length is not None and target.size(1) != mask.size(1):
max_len = length.max()
target = target[:, :max_len].contiguous()
if mask is not None:
return (self.nll_loss(self.logsoftmax(output), target.view(-1)) * mask.contiguous().view(-1)).sum() / mask.sum(), \
(torch.eq(preds, target).type_as(mask) * mask).sum(), preds
else:
num = output_size[0] * output_size[1]
return self.nll_loss(self.logsoftmax(output), target.view(-1)).sum() / num, \
(torch.eq(preds, target).type_as(output)).sum(), preds
class BiVarRecurrentConv(BiRecurrentConv):
def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,
tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.33, p_rnn=(0.33, 0.33), initializer=None):
super(BiVarRecurrentConv, self).__init__(word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,
tag_space=tag_space, embedd_word=embedd_word, embedd_char=embedd_char,
p_in=p_in, p_out=p_out, p_rnn=p_rnn, initializer=initializer)
self.dropout_rnn_in = None
self.dropout_out = nn.Dropout2d(p_out)
if rnn_mode == 'RNN':
RNN = VarMaskedRNN
elif rnn_mode == 'LSTM':
RNN = VarMaskedLSTM
elif rnn_mode == 'GRU':
RNN = VarMaskedGRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
self.rnn = RNN(word_dim + num_filters, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=True, dropout=p_rnn, initializer=self.initializer)
def _get_rnn_output(self, input_word, input_char, mask=None, length=None, hx=None):
# [batch, length, word_dim]
word = self.word_embedd(input_word)
# [batch, length, char_length, char_dim]
char = self.char_embedd(input_char)
char_size = char.size()
# first transform to [batch *length, char_length, char_dim]
# then transpose to [batch * length, char_dim, char_length]
char = char.view(char_size[0] * char_size[1], char_size[2], char_size[3]).transpose(1, 2)
# put into cnn [batch*length, char_filters, char_length]
# then put into maxpooling [batch * length, char_filters]
char, _ = self.conv1d(char).max(dim=2)
# reshape to [batch, length, char_filters]
char = torch.tanh(char).view(char_size[0], char_size[1], -1)
# apply dropout word on input
word = self.dropout_in(word)
char = self.dropout_in(char)
# concatenate word and char [batch, length, word_dim+char_filter]
input = torch.cat([word, char], dim=2)
# output from rnn [batch, length, hidden_size]
output, hn = self.rnn(input, mask, hx=hx)
# apply dropout for the output of rnn
# [batch, length, hidden_size] --> [batch, hidden_size, length] --> [batch, length, hidden_size]
output = self.dropout_out(output.transpose(1, 2)).transpose(1, 2)
if self.dense is not None:
# [batch, length, tag_space] --> [batch, tag_space, length] --> [batch, length, tag_space]
output = self.dropout_out(F.elu(self.dense(output)).transpose(1, 2)).transpose(1, 2)
return output, hn, mask, length
class BiRecurrentConvCRF(BiRecurrentConv):
def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,
tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.5, p_rnn=(0.5, 0.5), bigram=False, initializer=None):
super(BiRecurrentConvCRF, self).__init__(word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,
tag_space=tag_space, embedd_word=embedd_word, embedd_char=embedd_char,
p_in=p_in, p_out=p_out, p_rnn=p_rnn, initializer=initializer)
out_dim = tag_space if tag_space else hidden_size * 2
self.crf = ChainCRF(out_dim, num_labels, bigram=bigram)
self.dense_softmax = None
self.logsoftmax = None
self.nll_loss = None
def forward(self, input_word, input_char, mask=None, length=None, hx=None):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
# [batch, length, num_label, num_label]
return self.crf(output, mask=mask), mask
def loss(self, input_word, input_char, target, mask=None, length=None, hx=None, leading_symbolic=0):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
if length is not None:
max_len = length.max()
target = target[:, :max_len]
# [batch, length, num_label, num_label]
return self.crf.loss(output, target, mask=mask).mean()
def decode(self, input_word, input_char, target=None, mask=None, length=None, hx=None, leading_symbolic=0):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
if target is None:
return self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic), None
if length is not None:
max_len = length.max()
target = target[:, :max_len]
preds = self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic)
if mask is None:
return preds, torch.eq(preds, target.data).float().sum()
else:
return preds, (torch.eq(preds, target.data).float() * mask.data).sum()
class BiVarRecurrentConvCRF(BiVarRecurrentConv):
def __init__(self, word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,
tag_space=0, embedd_word=None, embedd_char=None, p_in=0.33, p_out=0.33, p_rnn=(0.33, 0.33), bigram=False, initializer=None):
super(BiVarRecurrentConvCRF, self).__init__(word_dim, num_words, char_dim, num_chars, num_filters, kernel_size, rnn_mode, hidden_size, num_layers, num_labels,
tag_space=tag_space, embedd_word=embedd_word, embedd_char=embedd_char,
p_in=p_in, p_out=p_out, p_rnn=p_rnn, initializer=initializer)
out_dim = tag_space if tag_space else hidden_size * 2
self.crf = ChainCRF(out_dim, num_labels, bigram=bigram)
self.dense_softmax = None
self.logsoftmax = None
self.nll_loss = None
def forward(self, input_word, input_char, mask=None, length=None, hx=None):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
# [batch, length, num_label, num_label]
return self.crf(output, mask=mask), mask
def loss(self, input_word, input_char, target, mask=None, length=None, hx=None, leading_symbolic=0):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
if length is not None:
max_len = length.max()
target = target[:, :max_len]
# [batch, length, num_label, num_label]
return self.crf.loss(output, target, mask=mask).mean()
def decode(self, input_word, input_char, target=None, mask=None, length=None, hx=None, leading_symbolic=0):
# output from rnn [batch, length, tag_space]
output, _, mask, length = self._get_rnn_output(input_word, input_char, mask=mask, length=length, hx=hx)
if target is None:
return self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic), None
if length is not None:
max_len = length.max()
target = target[:, :max_len]
preds = self.crf.decode(output, mask=mask, leading_symbolic=leading_symbolic)
if mask is None:
return preds, torch.eq(preds, target.data).float().sum()
else:
return preds, (torch.eq(preds, target.data).float() * mask.data).sum()
|
""" Protocol Test Helpers """
from contextlib import contextmanager
from typing import Dict, Iterable, Union
import copy
import json
import uuid
from aries_staticagent import StaticConnection, Message, Module, crypto
from voluptuous import Any
from .backchannel import Backchannel
from .provider import Provider
from .schema import MessageSchema
def _recipients_from_packed_message(packed_message: bytes) -> Iterable[str]:
"""
Inspect the header of the packed message and extract the recipient key.
"""
try:
wrapper = json.loads(packed_message)
except Exception as err:
raise ValueError("Invalid packed message") from err
recips_json = crypto.b64_to_bytes(
wrapper["protected"], urlsafe=True
).decode("ascii")
try:
recips_outer = json.loads(recips_json)
except Exception as err:
raise ValueError("Invalid packed message recipients") from err
return map(
lambda recip: recip['header']['kid'], recips_outer['recipients']
)
class Suite:
"""
Manage connections to agent under test.
The Channel Manager itself is a static connection to the test subject
allowing it to be used as the backchannel.
"""
TYPE_PREFIX = 'https://didcomm.org/'
ALT_TYPE_PREFIX = 'did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/'
def __init__(self):
self.frontchannels: Dict[str, StaticConnection] = {}
self._backchannel = None
self._provider = None
self._reply = None
@property
def backchannel(self):
"""Return a reference to the backchannel (self)."""
return self._backchannel
def set_backchannel(self, backchannel: Backchannel):
"""Set backchannel."""
self._backchannel = backchannel
@property
def provider(self):
"""Return a reference to the provider (self)."""
return self._provider
def set_provider(self, provider: Provider):
"""Set provider."""
self._provider = provider
@contextmanager
def reply(self, handler):
"""Handle potential to reply."""
self._reply = handler
yield
self._reply = None
async def handle(self, packed_message: bytes):
"""
Route an incoming message the appropriate frontchannels.
"""
# TODO messages in plaintext cannot be routed
handled = False
for recipient in _recipients_from_packed_message(packed_message):
if recipient in self.frontchannels:
conn = self.frontchannels[recipient]
with conn.reply_handler(self._reply):
await conn.handle(packed_message)
handled = True
if not handled:
raise RuntimeError('Inbound message was not handled')
def new_frontchannel(
self,
*,
their_vk: Union[bytes, str] = None,
recipients: [Union[bytes, str]] = None,
routing_keys: [Union[bytes, str]] = None,
endpoint: str = None) -> StaticConnection:
"""
Create a new connection and add it as a frontchannel.
Args:
fc_vk: The new frontchannel's verification key
fc_sk: The new frontchannel's signing key
their_vk: The test subject's verification key for this channel
endpoint: The HTTP URL to the endpoint of the test subject.
Returns:
Returns the new front channel (static connection).
"""
fc_keys = crypto.create_keypair()
new_fc = StaticConnection(
fc_keys,
their_vk=their_vk,
endpoint=endpoint,
recipients=recipients,
routing_keys=routing_keys
)
frontchannel_index = crypto.bytes_to_b58(new_fc.verkey)
self.frontchannels[frontchannel_index] = new_fc
return new_fc
def add_frontchannel(self, connection: StaticConnection):
"""Add an already created connection as a frontchannel."""
frontchannel_index = crypto.bytes_to_b58(connection.verkey)
self.frontchannels[frontchannel_index] = connection
def remove_frontchannel(self, connection: StaticConnection):
"""
Remove a frontchannel.
Args:
fc_vk: The frontchannel's verification key
"""
frontchannel_index = crypto.bytes_to_b58(connection.verkey)
if frontchannel_index in self.frontchannels:
del self.frontchannels[frontchannel_index]
@contextmanager
def temporary_channel(
self,
*,
their_vk: Union[bytes, str] = None,
recipients: [Union[bytes, str]] = None,
routing_keys: [Union[bytes, str]] = None,
endpoint: str = None) -> StaticConnection:
"""Use 'with' statement to use a temporary channel."""
channel = self.new_frontchannel(
their_vk=their_vk, endpoint=endpoint, recipients=recipients,
routing_keys=routing_keys
)
yield channel
self.remove_frontchannel(channel)
async def interrupt(generator, on: str = None): # pylint: disable=invalid-name
"""Yield from protocol generator until yielded event matches on."""
async for event, *data in generator:
yield [event, *data]
if on and event == on:
return
async def yield_messages(generator):
"""Yield only the event and messages from generator."""
async for event, *data in generator:
yield [
event,
*list(filter(
lambda item: isinstance(item, Message),
data
))
]
async def collect_messages(generator):
"""Executor for protocol generators, returning all yielded messages."""
messages = []
async for _event, yielded in yield_messages(generator):
messages.extend(map(
# Must deep copy to get an accurate snapshot of the data
# at the time it was yielded.
copy.deepcopy,
yielded
))
return messages
async def event_message_map(generator):
"""
Executor for protocol generators, returning map of event to the yielded
messages for that event.
"""
map_ = {}
async for event, *messages in yield_messages(generator):
map_[event] = list(map(
# Must deep copy to get an accurate snapshot of the data
# at the time it was yielded.
copy.deepcopy,
messages
))
return map_
async def event_data_map(generator):
"""
Executor for protocol generators, returning map of event to the yielded
data for that event.
"""
map_ = {}
async for event, *data in generator:
map_[event] = data
return map_
async def last(generator):
"""Executor for protocol generators, returning the last yielded value."""
last_data = None
async for _event, *data in generator:
last_data = data
if len(last_data) == 1:
return last_data[0]
return last_data
async def run(generator):
"""
Executor for protocol generators that simply runs the generator to
completion.
"""
async for _event, *_data in generator:
pass
class BaseHandler(Module):
"""
Base protocol handler to handle common tasks across all protocols such as thread decorators.
"""
DOC_URI = "null_DOC_URI"
PROTOCOL = "null_PROTOCOL"
VERSION = "null_VERSION"
def __init__(self):
super().__init__()
self.reset()
def reset(self):
self.reset_thread_state()
self.reset_events()
def reset_thread_state(self):
self.thid = None
self.sender_order = -1
self.received_orders = {}
def reset_events(self):
self.events = []
self.attrs = None
def add_event(self, name):
self.events.append(name)
def assert_event(self, name):
assert name in self.events
def verify_msg(self, typ, msg, conn, pid, schema, alt_pid=None):
assert msg.mtc.is_authcrypted()
assert msg.mtc.sender == crypto.bytes_to_b58(conn.recipients[0])
assert msg.mtc.recipient == conn.verkey_b58
schema['@type'] = Any("{}/{}".format(pid, typ), "{}/{}".format(pid if not alt_pid else alt_pid, typ))
schema['@id'] = str
msg_schema = MessageSchema(schema)
msg_schema(msg)
self._received_msg(msg, conn)
async def send_async(self, msg, conn):
id = self._prepare_to_send_msg(msg)
await conn.send_async(msg)
return id
async def send_and_await_reply_async(self, msg, conn):
self._prepare_to_send_msg(msg)
return await conn.send_and_await_reply_async(msg)
def _received_msg(self, msg, conn):
msgId = msg["@id"]
thid = msgId
senderId = conn.verkey_b58
senderOrder = 0
receivedOrders = {}
foundThid = False
if "~thread" in msg:
thread = msg["~thread"]
if "thid" in thread:
thid = thread["thid"]
foundThid = True
if "sender_order" in thread:
senderOrder = thread["sender_order"]
if "received_orders" in thread:
receivedOrders = thread["received_orders"]
if self.thid:
if not foundThid:
raise RuntimeError(
'Received message without a ~thread.thid field but is a continuation of thread "{}"; message: {}'.format(self.thid, msg))
if not self.thid == thid:
raise RuntimeError(
'Received message and was expecting ~thread.thid to be "{}" but found "{}"; message: {}'.format(self.thid, thid, msg))
elif not msgId == thid:
raise RuntimeError(
'There is no existing thread but received a message in which "@id" and "~thread.thid" fields differ; message: {}'.format(msg))
self.thid = thid
def _prepare_to_send_msg(self, msg):
if not "@id" in msg:
msg["@id"] = self.make_uuid()
id = msg["@id"]
self.sender_order += 1
if self.thid:
msg["~thread"] = {
"thid": self.thid,
"sender_order": self.sender_order,
"received_orders": self.received_orders
}
else:
self.thid = id
return id
def make_uuid(self) -> str:
return uuid.uuid4().urn[9:]
|
import argparse, pulp
import networkx as nx
import numpy as np
import random
def gen_graph(max_n, min_n, g_type='erdos_renyi', edge=4):
# choose a random number of vertices in the graph between max_n and min_n
cur_n = np.random.randint(max_n - min_n + 1) + min_n
g = None
# create the right corresponding type of graph
if g_type == 'erdos_renyi':
g = nx.erdos_renyi_graph(n=cur_n, p=0.15)
elif g_type == 'powerlaw':
g = nx.powerlaw_cluster_graph(n=cur_n, m=4, p=0.05)
elif g_type == 'barabasi_albert':
g = nx.barabasi_albert_graph(n=cur_n, m=edge)
elif g_type == 'watts_strogatz':
g = nx.watts_strogatz_graph(n=cur_n, k=cur_n // 10, p=0.1)
# randomly give weights to the graph
for edge in nx.edges(g):
g[edge[0]][edge[1]]['weight'] = random.uniform(0, 1)
return g
def getEdgeVar(v1, v2, vert):
u1 = min(v1, v2)
u2 = max(v1, v2)
if not ((u1, u2) in vert):
vert[(u1, u2)] = pulp.LpVariable('u%d_%d' % (u1, u2), 0, 1, pulp.LpInteger)
return vert[(u1, u2)]
def getNodeVar(v, node):
if not v in node:
node[v] = pulp.LpVariable('v%d' % v, 0, 1, pulp.LpInteger)
return node[v]
def createOpt(G):
prob = pulp.LpProblem('MILP Maximum Cut', pulp.LpMinimize)
edgeVar = {}
nodeVar = {}
obj = 0
for j, (v1, v2) in enumerate(G.edges()):
e12 = getEdgeVar(v1, v2, edgeVar)
node1 = getNodeVar(v1, nodeVar)
node2 = getNodeVar(v2, nodeVar)
prob.addConstraint(e12 <= node1 + node2)
prob.addConstraint(e12 <= 2 - node1 - node2)
obj = obj + (G[v1][v2]['weight']) * e12
prob.setObjective(-1 * obj) # Note that this is LpMinimum
return prob
def cmdLineParser():
'''
Command Line Parser.
'''
parser = argparse.ArgumentParser(description='Minimum Vertex Cover')
parser.add_argument('-o', dest='outPrefix', type=str, action='store', default=None, help='Output Prefix')
parser.add_argument('-g', dest='g_type', type=str, action='store', default='erdos_renyi', help='Graph type')
parser.add_argument('-max_n', dest='max_n', type=int, action='store', default=700, help='max number of nodes')
parser.add_argument('-min_n', dest='min_n', type=int, action='store', default=500, help='min number of nodes')
return parser.parse_args()
def generateInstance(max_n, min_n, g_type, edge, outPrefix=None):
G = gen_graph(max_n, min_n, g_type, edge)
P = createOpt(G)
if outPrefix != None:
# Write out
nx.write_gpickle(G, outPrefix.replace('lpfiles', 'gpickle') + '.gpickle')
P.writeLP(outPrefix + '.lp')
def main():
args = cmdLineParser()
G = gen_graph(args.max_n, args.min_n, args.g_type)
P = createOpt(G)
if args.outPrefix != None:
# Write out
nx.write_gpickle(G, args.outPrefix + '.gpickle')
P.writeLP(args.outPrefix + '.lp')
if __name__ == '__main__':
main()
|
<reponame>jianzhangcs/DenoiSeg
import numpy as np
from numba import jit
from scipy import ndimage
from tqdm import tqdm, tqdm_notebook
@jit
def pixel_sharing_bipartite(lab1, lab2):
assert lab1.shape == lab2.shape
psg = np.zeros((lab1.max() + 1, lab2.max() + 1), dtype=np.int)
for i in range(lab1.size):
psg[lab1.flat[i], lab2.flat[i]] += 1
return psg
def intersection_over_union(psg):
"""
Computes IOU.
:Authors:
<NAME>
"""
rsum = np.sum(psg, 0, keepdims=True)
csum = np.sum(psg, 1, keepdims=True)
return psg / (rsum + csum - psg)
def matching_iou(psg, fraction=0.5):
"""
Computes IOU.
:Authors:
<NAME>
"""
iou = intersection_over_union(psg)
matching = iou > 0.5
matching[:, 0] = False
matching[0, :] = False
return matching
def measure_precision(iou=0.5, partial_dataset=False):
def precision(lab_gt, lab, iou=iou, partial_dataset=partial_dataset):
"""
precision = TP / (TP + FP + FN) i.e. "intersection over union" for a graph matching
:Authors:
<NAME>
"""
psg = pixel_sharing_bipartite(lab_gt, lab)
matching = matching_iou(psg, fraction=iou)
assert matching.sum(0).max() < 2
assert matching.sum(1).max() < 2
n_gt = len(set(np.unique(lab_gt)) - {0})
n_hyp = len(set(np.unique(lab)) - {0})
n_matched = matching.sum()
if partial_dataset:
return n_matched, (n_gt + n_hyp - n_matched)
else:
return n_matched / (n_gt + n_hyp - n_matched)
return precision
def matching_overlap(psg, fractions=(0.5,0.5)):
"""
create a matching given pixel_sharing_bipartite of two label images based on mutually overlapping regions of sufficient size.
NOTE: a true matching is only gauranteed for fractions > 0.5. Otherwise some cells might have deg=2 or more.
NOTE: doesnt break when the fraction of pixels matching is a ratio only slightly great than 0.5? (but rounds to 0.5 with float64?)
"""
afrac, bfrac = fractions
tmp = np.sum(psg, axis=1, keepdims=True)
m0 = np.where(tmp==0,0,psg / tmp)
tmp = np.sum(psg, axis=0, keepdims=True)
m1 = np.where(tmp==0,0,psg / tmp)
m0 = m0 > afrac
m1 = m1 > bfrac
matching = m0 * m1
matching = matching.astype('bool')
return matching
def measure_seg(partial_dataset=False):
def seg(lab_gt, lab, partial_dataset=partial_dataset):
"""
calculate seg from pixel_sharing_bipartite
seg is the average conditional-iou across ground truth cells
conditional-iou gives zero if not in matching
----
calculate conditional intersection over union (CIoU) from matching & pixel_sharing_bipartite
for a fraction > 0.5 matching. Any CIoU between matching pairs will be > 1/3. But there may be some
IoU as low as 1/2 that don't match, and thus have CIoU = 0.
"""
psg = pixel_sharing_bipartite(lab_gt, lab)
iou = intersection_over_union(psg)
matching = matching_overlap(psg, fractions=(0.5, 0))
matching[0, :] = False
matching[:, 0] = False
n_gt = len(set(np.unique(lab_gt)) - {0})
n_matched = iou[matching].sum()
if partial_dataset:
return n_matched, n_gt
else:
return n_matched / n_gt
return seg
def isnotebook():
"""
Checks if code is run in a notebook, which can be useful to determine what sort of progressbar to use.
https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook/24937408#24937408
Returns
-------
bool
True if running in notebook else False.
"""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False
def compute_labels(prediction, threshold):
prediction_exp = np.exp(prediction[..., 1:])
prediction_softmax = prediction_exp / np.sum(prediction_exp, axis=2)[..., np.newaxis]
prediction_fg = prediction_softmax[..., 1]
pred_thresholded = prediction_fg > threshold
labels, _ = ndimage.label(pred_thresholded)
return labels
|
<filename>compiler/characterizer/measurements.py
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
from tech import drc, parameter, spice
from abc import ABC, abstractmethod
from .stimuli import *
from .charutils import *
class spice_measurement(ABC):
"""Base class for spice stimulus measurements."""
def __init__(self, measure_name, measure_scale=None, has_port=True):
#Names must be unique for correct spice simulation, but not enforced here.
self.name = measure_name
self.measure_scale = measure_scale
self.has_port = has_port #Needed for error checking
#Some meta values used externally. variables are added here for consistency accross the objects
self.meta_str = None
self.meta_add_delay = False
@abstractmethod
def get_measure_function(self):
return None
@abstractmethod
def get_measure_values(self):
return None
def write_measure(self, stim_obj, input_tuple):
measure_func = self.get_measure_function()
if measure_func == None:
debug.error("Did not set measure function",1)
measure_vals = self.get_measure_values(*input_tuple)
measure_func(stim_obj, *measure_vals)
def retrieve_measure(self, port=None):
self.port_error_check(port)
if port != None:
value = parse_spice_list("timing", "{0}{1}".format(self.name.lower(), port))
else:
value = parse_spice_list("timing", "{0}".format(self.name.lower()))
if type(value)!=float or self.measure_scale == None:
return value
else:
return value*self.measure_scale
def port_error_check(self, port):
if self.has_port and port == None:
debug.error("Cannot retrieve measurement, port input was expected.",1)
elif not self.has_port and port != None:
debug.error("Unexpected port input received during measure retrieval.",1)
class delay_measure(spice_measurement):
"""Generates a spice measurement for the delay of 50%-to-50% points of two signals."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, targ_dir_str,\
trig_vdd=0.5, targ_vdd=0.5, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd)
def get_measure_function(self):
return stimuli.gen_meas_delay
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd):
"""Set the constants for this measurement: signal names, directions, and trigger scales"""
self.trig_dir_str = trig_dir_str
self.targ_dir_str = targ_dir_str
self.trig_val_of_vdd = trig_vdd
self.targ_val_of_vdd = targ_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
#Time delays and ports are variant and needed as inputs when writing the measurement
def get_measure_values(self, trig_td, targ_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
trig_val = self.trig_val_of_vdd * vdd_voltage
targ_val = self.targ_val_of_vdd * vdd_voltage
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
return (meas_name,trig_name,targ_name,trig_val,targ_val,self.trig_dir_str,self.targ_dir_str,trig_td,targ_td)
class slew_measure(delay_measure):
def __init__(self, measure_name, signal_name, slew_dir_str, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(signal_name, slew_dir_str)
def set_meas_constants(self, signal_name, slew_dir_str):
"""Set the values needed to generate a Spice measurement statement based on the name of the measurement."""
self.trig_dir_str = slew_dir_str
self.targ_dir_str = slew_dir_str
if slew_dir_str == "RISE":
self.trig_val_of_vdd = 0.1
self.targ_val_of_vdd = 0.9
elif slew_dir_str == "FALL":
self.trig_val_of_vdd = 0.9
self.targ_val_of_vdd = 0.1
else:
debug.error("Unrecognised slew measurement direction={}".format(slew_dir_str),1)
self.trig_name_no_port = signal_name
self.targ_name_no_port = signal_name
#Time delays and ports are variant and needed as inputs when writing the measurement
class power_measure(spice_measurement):
"""Generates a spice measurement for the average power between two time points."""
def __init__(self, measure_name, power_type="", measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(power_type)
def get_measure_function(self):
return stimuli.gen_meas_power
def set_meas_constants(self, power_type):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
#Not needed for power simulation
self.power_type = power_type #Expected to be "RISE"/"FALL"
def get_measure_values(self, t_initial, t_final, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
meas_name = "{}{}".format(self.name, port)
else:
meas_name = self.name
return (meas_name,t_initial,t_final)
class voltage_when_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage of a signal based on the voltage of another."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, trig_vdd, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, trig_vdd)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, trig_vdd):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.trig_dir_str = trig_dir_str
self.trig_val_of_vdd = trig_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
def get_measure_values(self, trig_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
trig_voltage = self.trig_val_of_vdd*vdd_voltage
return (meas_name,trig_name,targ_name,trig_voltage,self.trig_dir_str,trig_td)
class voltage_at_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage at a specific time.
The time is considered variant with different periods."""
def __init__(self, measure_name, targ_name, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(targ_name)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage_at_time
def set_meas_constants(self, targ_name):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.targ_name_no_port = targ_name
def get_measure_values(self, time_at, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name,targ_name,time_at)
|
import argparse
import os
import pathlib
import platform
import re
import shutil
import sys
import tarfile
from functools import cached_property
from typing import List, Optional
from colorama import Fore, Style # type:ignore
from sphinx.cmd.build import main as sphinx_build # type:ignore
from aio.run import runner
from envoy.base import utils
from .exceptions import SphinxBuildError, SphinxEnvError
class SphinxRunner(runner.Runner):
_build_dir = "."
_build_sha = "UNKNOWN"
@property
def blob_sha(self) -> str:
"""Returns either the version tag or the current build sha."""
return self.docs_tag or self.build_sha
@property
def build_dir(self) -> pathlib.Path:
"""Returns current build_dir - most likely a temp directory"""
return pathlib.Path(self.tempdir.name)
@property
def build_sha(self) -> str:
"""Returns either a provided build_sha or a default."""
return self.args.build_sha or self._build_sha
@cached_property
def colors(self) -> dict:
"""Color scheme for build summary."""
return dict(
chrome=Fore.LIGHTYELLOW_EX,
key=Fore.LIGHTCYAN_EX,
value=Fore.LIGHTMAGENTA_EX)
@cached_property
def config_file(self) -> pathlib.Path:
"""Populates a config file with self.configs and returns the file
path."""
return utils.to_yaml(self.configs, self.config_file_path)
@property
def config_file_path(self) -> pathlib.Path:
"""Path to a (temporary) build config."""
return self.build_dir.joinpath("build.yaml")
@cached_property
def configs(self) -> dict:
"""Build configs derived from provided args."""
_configs = dict(
version_string=self.version_string,
release_level=self.release_level,
blob_sha=self.blob_sha,
version_number=self.version_number,
docker_image_tag_name=self.docker_image_tag_name)
if self.validate_fragments:
if self.validator_path:
_configs["validator_path"] = str(self.validator_path)
if self.descriptor_path:
_configs["descriptor_path"] = str(self.descriptor_path)
else:
_configs["skip_validation"] = "true"
return _configs
@property
def descriptor_path(self) -> Optional[pathlib.Path]:
"""Path to a descriptor file for config validation."""
return (
pathlib.Path(self.args.descriptor_path)
if self.args.descriptor_path
else None)
@property
def docker_image_tag_name(self) -> str:
"""Tag name of current docker image."""
return re.sub(
r"([0-9]+\.[0-9]+)\.[0-9]+.*",
r"v\1-latest",
self.version_number)
@property
def docs_tag(self) -> str:
"""Tag name - ie named version for this docs build"""
return self.args.docs_tag
@cached_property
def html_dir(self) -> pathlib.Path:
"""Path to (temporary) directory for outputting html."""
return self.build_dir.joinpath("generated", "html")
@property
def output_path(self) -> pathlib.Path:
"""Path to tar file or directory for saving generated html docs."""
return pathlib.Path(self.args.output_path)
@property
def overwrite(self) -> bool:
"""Overwrite output path if exists."""
return self.args.overwrite
@property
def py_compatible(self) -> bool:
"""Current python version is compatible."""
return bool(
sys.version_info.major == 3
and sys.version_info.minor >= 8)
@property
def release_level(self) -> str:
"""Current python version is compatible."""
return "tagged" if self.docs_tag else "pre-release"
@cached_property
def rst_dir(self) -> pathlib.Path:
"""Populates an rst directory with contents of given rst tar, and
returns the path to the directory."""
rst_dir = self.build_dir.joinpath("generated", "rst")
if self.rst_tar:
utils.extract(rst_dir, self.rst_tar)
return rst_dir
@property
def rst_tar(self) -> pathlib.Path:
"""Path to the rst tarball."""
return pathlib.Path(self.args.rst_tar)
@property
def sphinx_args(self) -> List[str]:
"""Command args for sphinx."""
return [
"-W", "--keep-going", "--color", "-b", "html",
str(self.rst_dir), str(self.html_dir)]
@property
def validate_fragments(self) -> bool:
"""Validate configuration fragments."""
return bool(
self.validator_path
or self.args.validate_fragments)
@property
def validator_path(self) -> Optional[pathlib.Path]:
"""Path to validator utility for validating snippets."""
return (
pathlib.Path(self.args.validator_path)
if self.args.validator_path
else None)
@property
def version_file(self) -> pathlib.Path:
"""Path to version files for deriving docs version."""
return pathlib.Path(self.args.version_file)
@cached_property
def version_number(self) -> str:
"""Semantic version."""
return (
self.args.version
if self.args.version
else self.version_file.read_text().strip())
@property
def version_string(self) -> str:
"""Version string derived from either docs_tag or build_sha."""
return (
f"tag-{self.docs_tag}"
if self.docs_tag
else f"{self.version_number}-{self.build_sha[:6]}")
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
super().add_arguments(parser)
parser.add_argument("--build_sha")
parser.add_argument("--docs_tag")
parser.add_argument("--version_file")
parser.add_argument("--validator_path")
parser.add_argument("--descriptor_path")
parser.add_argument("--version")
parser.add_argument(
"--validate_fragments", default=False, action="store_true")
parser.add_argument(
"--overwrite", default=False, action="store_true")
parser.add_argument("rst_tar")
parser.add_argument("output_path")
def build_html(self) -> None:
if sphinx_build(self.sphinx_args):
raise SphinxBuildError("BUILD FAILED")
def build_summary(self) -> None:
print()
print(self._color("#### Sphinx build configs #####################"))
print(self._color("###"))
for k, v in self.configs.items():
print(
f"{self._color('###')} {self._color(k, 'key')}: "
f"{self._color(v, 'value')}")
print(self._color("###"))
print(self._color("###############################################"))
print()
def check_env(self) -> None:
if not self.py_compatible:
raise SphinxEnvError(
"ERROR: python version must be >= 3.8, "
f"you have {platform.python_version()}")
if not self.configs["release_level"] == "tagged":
return
if f"v{self.version_number}" != self.docs_tag:
raise SphinxEnvError(
"Given git tag does not match the VERSION file content:"
f"{self.docs_tag} vs v{self.version_number}")
# this should probs only check the first line
version_current = self.rst_dir.joinpath(
"version_history", "current.rst").read_text()
if self.version_number not in version_current:
raise SphinxEnvError(
f"Git tag ({self.version_number}) not found in "
"version_history/current.rst")
def save_html(self) -> None:
if self.output_path.exists():
self.log.warning(
f"Output path ({self.output_path}) exists, removing")
if self.output_path.is_file():
self.output_path.unlink()
else:
shutil.rmtree(self.output_path)
if not utils.is_tarlike(self.output_path):
shutil.copytree(self.html_dir, self.output_path)
return
with tarfile.open(self.output_path, "w") as tar:
tar.add(self.html_dir, arcname=".")
@runner.cleansup
@runner.catches((SphinxBuildError, SphinxEnvError))
async def run(self):
self.validate_args()
os.environ["ENVOY_DOCS_BUILD_CONFIG"] = str(self.config_file)
try:
self.check_env()
except SphinxEnvError as e:
print(e)
return 1
self.build_summary()
try:
self.build_html()
except SphinxBuildError as e:
print(e)
return 1
self.save_html()
def validate_args(self):
if self.output_path.exists():
if not self.overwrite:
raise SphinxBuildError(
f"Output path ({self.output_path}) exists and "
"`--overwrite` is not set`")
def _color(self, msg, name=None):
return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}"
|
<gh_stars>0
#!/usr/bin/python
#encoding: utf-8
import sys
import os
from base_generator import *
baseNS = "AtomScript"
def gen_all_expr():
out_dir = "../AtomScript/AST/Expression/"
ns = baseNS + ".AST.Expression"
usingGeneric = "System.Collections.Generic"
# 基类
c = CommonClass(ns, "Expr", "IVisitable")
c.setAbs()
c.addFunc("public virtual", "void", "Accept", [("AstVisitor", "visitor")], [])
c.writeTo(out_dir + c.className + ".cs")
# 赋值
c = CommonClass(ns, "AssignExpr", "Expr")
c.addProp("Token", "name")
c.addProp("Expr", "value")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# 二元表达式
c = CommonClass(ns, "BinaryExpr", "Expr")
c.addProp("Expr", "left")
c.addProp("Token", "op")
c.addProp("Expr", "right")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "CallExpr", "Expr")
c.addUsing(usingGeneric)
c.addProp("Expr", "callee")
c.addProp("Token", "paren")
c.addProp("List<Expr>", "arguments")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# 括号
c = CommonClass(ns, "GroupingExpr", "Expr")
c.addProp("Expr", "expression")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# Get
c = CommonClass(ns, "GetExpr", "Expr")
c.addProp("Expr", "obj")
c.addProp("Token", "name")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# 字面值
c = CommonClass(ns, "LiteralExpr", "Expr")
c.addProp("Token", "literal")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# 逻辑
c = CommonClass(ns, "LogicalExpr", "Expr")
c.addProp("Expr", "left")
c.addProp("Token", "op")
c.addProp("Expr", "right")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# Set
c = CommonClass(ns, "SetExpr", "Expr")
c.addProp("Expr", "obj")
c.addProp("Token", "name")
c.addProp("Expr", "value")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# Super
c = CommonClass(ns, "SuperExpr", "Expr")
c.addProp("Token", "keyword")
c.addProp("Token", "method")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# This
c = CommonClass(ns, "ThisExpr", "Expr")
c.addProp("Token", "keyword")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# 一元表达式
c = CommonClass(ns, "UnaryExpr", "Expr")
c.addProp("Token", "op")
c.addProp("Expr", "right")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
# 变量
c = CommonClass(ns, "VariableExpr", "Expr")
c.addProp("Token", "name")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
def gen_all_stmt():
out_dir = "../AtomScript/AST/Statement/"
ns = baseNS + ".AST.Statement"
usingExpr = baseNS + ".AST.Expression"
usingSystem = "System"
usingGeneric = "System.Collections.Generic"
usingScanner = baseNS + ".Scanner"
c = CommonClass(ns, "Stmt", "IVisitable")
c.setAbs()
c.addFunc("public virtual", "void", "Accept", [("AstVisitor", "visitor")], [])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "BlockStmt", "Stmt")
c.addUsing(usingGeneric)
c.addProp("List<Stmt>", "stmts")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "ClassDeclarationStmt", "Stmt")
c.addUsing(usingGeneric)
c.addUsing(usingExpr)
c.addProp("Token", "name")
c.addProp("VariableExpr", "superclass")
c.addProp("List<FuncDeclarationStmt>", "methods")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "ExpressionStmt", "Stmt")
c.addUsing(usingExpr)
c.addProp("Expr", "expr")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "FuncDeclarationStmt", "Stmt")
c.addUsing(usingGeneric)
c.addProp("Token", "name")
c.addProp("List<Token>", "parameters")
c.addProp("BlockStmt", "body")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "ForStmt", "Stmt")
c.addUsing(usingExpr)
c.addProp("Stmt", "initializer")
c.addProp("Expr", "condition")
c.addProp("Expr", "increment")
c.addProp("Stmt", "body")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "IfStmt", "Stmt")
c.addUsing(usingExpr)
c.addProp("Expr", "condition")
c.addProp("Stmt", "thenBranch")
c.addProp("Stmt", "elseBranch")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "PrintStmt", "Stmt")
c.addUsing(usingExpr)
c.addProp("Expr", "expr")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "ReturnStmt", "Stmt")
c.addUsing(usingExpr)
c.addProp("Token", "keyword")
c.addProp("Expr", "value")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "VarDeclarationStmt", "Stmt")
c.addUsing(usingExpr)
c.addProp("Token", "name")
c.addProp("Expr", "initializer")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
c = CommonClass(ns, "WhileStmt", "Stmt")
c.addUsing(usingExpr)
c.addProp("Expr", "condition")
c.addProp("Stmt", "body")
c.addFunc("public override", "void", "Accept", [("AstVisitor", "visitor")], ["visitor.Visit(this);"])
c.writeTo(out_dir + c.className + ".cs")
def gen_ast():
out_dir = "../AtomScript/AST/"
ns = baseNS + ".AST"
usingGeneric = "System.Collections.Generic"
usingStmt = baseNS + ".AST.Statement"
c = CommonClass(ns, "Ast", "IVisitable")
c.addUsing(usingGeneric)
c.addUsing(usingStmt)
c.addProp("List<Stmt>", "stmts")
c.addFunc("public", "void", "Accept", [("AstVisitor", "visitor")], ["for (int i = 0; i < stmts.Count; i++) {", " stmts[i].Accept(visitor);", "}"])
c.writeTo(out_dir + c.className + ".cs")
def gen_ast_visitor():
out_dir = "../AtomScript/AST/"
ns = baseNS + ".AST"
c = CommonClass(ns, "AstVisitor")
c.setAbs()
c.addUsing(baseNS + ".AST.Expression")
c.addUsing(baseNS + ".AST.Statement")
c.addFunc("public virtual", "void", "Visit", [("AssignExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("BinaryExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("CallExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("GetExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("GroupingExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("LiteralExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("LogicalExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("SetExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("SuperExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("ThisExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("UnaryExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("VariableExpr", "expr")], [])
c.addFunc("public virtual", "void", "Visit", [("BlockStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("ClassDeclarationStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("ExpressionStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("ForStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("FuncDeclarationStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("IfStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("PrintStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("ReturnStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("VarDeclarationStmt", "stmt")], [])
c.addFunc("public virtual", "void", "Visit", [("WhileStmt", "stmt")], [])
c.writeTo(out_dir + c.className + ".cs")
def gen_token_type():
out_dir = "../AtomScript/AST/"
ns = baseNS + ".AST"
c = CommonEnum(ns, "TokenType")
c.addEnum("UNDEFINED", "未定义")
c.addComment("单字符Token")
c.addEnum("LEFT_PAREN", "左括号")
c.addEnum("RIGHT_PAREN", "右括号")
c.addEnum("LEFT_BRACE", "左大括号")
c.addEnum("RIGHT_BRACE", "右大括号")
c.addEnum("COMMA", "逗号")
c.addEnum("DOT", "点")
c.addEnum("MINUS", "减")
c.addEnum("PLUS", "加")
c.addEnum("SEMICOLON", "分号")
c.addEnum("SLASH", "斜线")
c.addEnum("STAR", "星号")
c.addEnum("COLON", "冒号")
c.addComment("一个或两个字符Token")
c.addEnum("BANG", "相反的")
c.addEnum("BANG_EQUAL", "不等于")
c.addEnum("EQUAL", "赋值")
c.addEnum("EQUAL_EQUAL", "等于")
c.addEnum("GREATER", "大于")
c.addEnum("GREATER_EQUAL", "大于等于")
c.addEnum("LESS", "小于")
c.addEnum("LESS_EQUAL", "小于等于")
c.addComment("值类型")
c.addEnum("IDENTIFIER", "标识符")
c.addEnum("STRING")
c.addEnum("NUMBER")
c.addComment("关键字")
c.addEnum("AND")
c.addEnum("CLASS")
c.addEnum("ELSE")
c.addEnum("FALSE")
c.addEnum("FUNC")
c.addEnum("FOR")
c.addEnum("IF")
c.addEnum("NIL")
c.addEnum("OR")
c.addEnum("PRINT")
c.addEnum("RETURN")
c.addEnum("SUPER")
c.addEnum("THIS")
c.addEnum("TRUE")
c.addEnum("VAR")
c.addEnum("WHILE")
c.addEnum("EOF")
c.writeTo(out_dir + c.enumName+ ".cs")
def gen_token():
out_dir = "../AtomScript/AST/"
ns = baseNS + ".AST"
c = CommonClass(ns, "Token", None)
c.addProp("TokenType", "type")
c.addProp("string", "lexeme")
c.addProp("object", "literal")
c.addProp("int", "line")
c.addProp("int", "column")
c.writeTo(out_dir + c.className + ".cs")
if __name__ == "__main__":
gen_all_expr()
gen_all_stmt()
gen_ast_visitor()
gen_ast()
gen_token_type()
gen_token()
|
<filename>tests/rnn_gen.py
# coding: utf-8
# In[18]:
from fuel.datasets import Dataset
from librnn.pylearn2.datasets.music import MusicSequence
from blocks.bricks import Sigmoid, Tanh, MLP, Linear, Rectifier
from blocks.bricks.recurrent import SimpleRecurrent, GatedRecurrent, LSTM
from blocks.bricks import recurrent
from blocks.bricks.parallel import Fork
from blocks.initialization import Orthogonal, IsotropicGaussian, Constant
from blocks.algorithms import GradientDescent, Scale, Adam
from fuel.streams import DataStream
from fuel.schemes import ConstantScheme
from theano import tensor
from blocks.bricks import WEIGHT, BIAS
from blocks.graph import ComputationGraph
from blocks.filter import VariableFilter
from blocks.main_loop import MainLoop
from blocks.model import Model
from blocks.extensions import FinishAfter, Printing
#from blocks.extensions.saveload import SerializeMainLoop
from blocks.extensions.monitoring import TrainingDataMonitoring, DataStreamMonitoring
from blocks.bricks.cost import BinaryCrossEntropy
from blocks_contrib.bricks.recurrent import DelayLine
# In[19]:
class MusicFuel(Dataset):
def __init__(self, which_set, which_dataset):
self.pylearn2_dataset = MusicSequence(which_set=which_set, which_dataset=which_dataset)
self.sources = self.pylearn2_dataset.get_data_specs()[-1]
def open(self):
num_examples = self.pylearn2_dataset.get_num_examples()
return self.pylearn2_dataset.iterator(1, num_examples, mode='sequential',
data_specs=self.pylearn2_dataset.get_data_specs(), return_tuple=True)
def get_data(self,state=None,request=None):
return next(state)
# In[17]:
import theano
x, y = tensor.tensor3s('features', 'targets')
def rnn_output(x, name):
inputtostate = Linear(name=name+'_input_to_state', input_dim=96,
output_dim=48)
inputtogate = Linear(name=name+'_input_to_gate' , input_dim=96,
output_dim=48)
inputtoreset = Linear(name=name+'_input_to_reset', input_dim=96,
output_dim=48)
x_s = inputtostate.apply(x)
x_g = inputtogate.apply(x)
x_r = inputtoreset.apply(x)
RNN = GatedRecurrent(activation=Tanh(), dim=48,
name=name+'_RNN', use_update_gate=True, use_reset_gate=True)
fork = Fork(weights_init=IsotropicGaussian(.01),
biases_init=Constant(0.),
input_dim=96, output_dims=[48]*3,
output_names=['inputs', 'reset_inputs',
'update_inputs'])
f = fork.apply(x)
print fork
s = RNN.apply(inputs=f[0], reset_inputs=f[1], update_inputs=f[2])
#x_s, update_inputs=x_g, reset_inputs=x_r)
statetooutput = Linear(name=name+'_state_to_output', input_dim=48,
output_dim=96)
z = statetooutput.apply(s)
proto = GatedRecurrent(activation=Tanh(), dim=48,
name=name+'_proto', use_update_gate=True,
use_reset_gate=True)
proto_fork = Fork(weights_init=IsotropicGaussian(.01),
biases_init=Constant(0.),
input_dim=96, output_dims=[48]*3,
output_names=['inputs', 'reset_inputs',
'update_inputs'])
y_hat = Sigmoid(name=name+'_last_layer').apply(pre_out)
y_hat.name = 'output_sequence'
inputto = [inputtostate, inputtogate,
inputtoreset, statetooutput]
for i in inputto:
i.weights_init = IsotropicGaussian(0.01)
i.biases_init = Constant(0.)
RNN.weights_init = Orthogonal()
RNN.biases_init = Constant(0.)
fork.initialize()
RNN.initialize()
statetooutput.initialize()
inputtostate.initialize()
inputtogate.initialize()
inputtoreset.initialize()
return y_hat
y_hat = rnn_output(x, 'a')
y_last = y_hat[-1]
predict = theano.function(inputs = [x, ], outputs = y_hat)
#cost = BinaryCrossEntropy().apply(y, y_hat)
cost = tensor.nnet.binary_crossentropy(y_hat, y).sum(axis=2).mean()
cost.name = 'BCE'
cg = ComputationGraph(cost)
params = VariableFilter(roles=[WEIGHT, BIAS])(cg.variables)
# In[7]:
trainset = DataStream(MusicFuel(which_set='train', which_dataset='jsb'))
testset = DataStream(MusicFuel(which_set='test', which_dataset='jsb'))
validset = DataStream(MusicFuel(which_set='valid', which_dataset='jsb'))
batch_size = 1
num_epochs = 100
cost.name = "sequence_log_likelihood"
algorithm = GradientDescent(
cost=cost, params=params,
step_rule=Adam(0.001))
main_loop = MainLoop(
algorithm=algorithm,
data_stream=trainset,
model=None,
extensions=[FinishAfter(after_n_epochs=num_epochs),
TrainingDataMonitoring([cost], prefix="train",
after_every_epoch=True),
DataStreamMonitoring([cost], validset, prefix="valid"),
DataStreamMonitoring([cost], testset, prefix="test"),
Printing()])
main_loop.run()
# In[20]:
|
from src.gilded_rose import GildedRose
from src.item import Item
from src.item_types import ItemTypes
import pytest
def test_negative_quality():
gildedrose = GildedRose([Item(name="Other", sell_in=0, quality=0)])
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 0
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 0
def test_update_quality_decrease():
gildedrose = GildedRose([Item(name="Other", sell_in=1, quality=10)])
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 9
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 7
@pytest.mark.parametrize(
"input, expected", [([], 0), ([Item(name="FAKE", sell_in=0, quality=1)], 1)]
)
def test_create_giled_rose(input, expected):
gildedrose = GildedRose(input)
assert len(gildedrose.items) == expected
@pytest.mark.parametrize(
"item_name, quality, expected_quality",
[
(ItemTypes.AGED_BRIE.value, 50, 50),
(ItemTypes.SULFURAS.value, 80, 80),
(ItemTypes.BACKSTAGE_PASSES.value, 50, 0),
],
)
def test_update_quality_maximum_value(item_name, quality, expected_quality):
gildedrose = GildedRose([Item(name=item_name, sell_in=0, quality=quality)])
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == expected_quality
def test_update_quality_aged_brie():
initial_quality = 10
gildedrose = GildedRose(
[Item(name=ItemTypes.AGED_BRIE.value, sell_in=2, quality=initial_quality)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == initial_quality + 1
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == initial_quality + 2
def test_update_quality_expired_aged_brie():
initial_quality = 10
gildedrose = GildedRose(
[Item(name=ItemTypes.AGED_BRIE.value, sell_in=0, quality=initial_quality)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == initial_quality + 2
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == initial_quality + 4
def test_update_quality_sulfuras():
initial_quality = 10
gildedrose = GildedRose(
[Item(name=ItemTypes.SULFURAS.value, sell_in=1, quality=initial_quality)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == initial_quality
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == initial_quality
def test_update_quality_backstage_passes_11_days_or_more():
gildedrose = GildedRose(
[Item(name=ItemTypes.BACKSTAGE_PASSES.value, sell_in=13, quality=10)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 11
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 12
def test_update_quality_backstage_passes_10_days_left():
gildedrose = GildedRose(
[Item(name=ItemTypes.BACKSTAGE_PASSES.value, sell_in=10, quality=10)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 12
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 14
def test_update_quality_backstage_passes_5_days_left():
gildedrose = GildedRose(
[Item(name=ItemTypes.BACKSTAGE_PASSES.value, sell_in=5, quality=10)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 13
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 16
def test_update_quality_backstage_passes_0_days_left():
gildedrose = GildedRose(
[Item(name=ItemTypes.BACKSTAGE_PASSES.value, sell_in=1, quality=10)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 13
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 0
def test_update_quality_magically_conjured():
gildedrose = GildedRose(
[Item(name=ItemTypes.MAGICALLY_CONJURED.value, sell_in=5, quality=10)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 8
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 6
def test_update_quality_expired_magically_conjured():
gildedrose = GildedRose(
[Item(name=ItemTypes.MAGICALLY_CONJURED.value, sell_in=0, quality=10)]
)
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 6
gildedrose.update_quality()
item = gildedrose.items[0]
assert item.quality == 2
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Documentation Test
"""
import logging
import json
import os
import csv
import requests
from typing import Union
from mapswipe_workers.utils import error_handling
from mapswipe_workers.utils import slack
from mapswipe_workers.definitions import DATA_PATH
from mapswipe_workers.definitions import CustomError
from mapswipe_workers.basic import auth
# Make sure to import all project types here
from mapswipe_workers.ProjectTypes.BuildArea.BuildAreaImport import BuildAreaImport
from mapswipe_workers.ProjectTypes.BuildArea.BuildAreaProject import BuildAreaProject
from mapswipe_workers.ProjectTypes.Footprint.FootprintImport import FootprintImport
from mapswipe_workers.ProjectTypes.Footprint.FootprintProject import FootprintProject
########################################################################################################################
# INIT #
########################################################################################################################
def get_environment(modus='development'):
"""
The function to get the firebase and postgres configuration
Parameters
----------
modus : str
either `development` or `production to decide which firebase configuration to use
Returns
-------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
postgres : database connection class
The database connection to postgres database
"""
if modus == 'development':
# we use the dev instance for testing
firebase = auth.dev_firebase_admin_auth()
postgres = auth.dev_psqlDB
logging.warning('ALL - get_environment - use development instance')
elif modus == 'production':
# we use the dev instance for testing
firebase = auth.firebase_admin_auth()
postgres = auth.psqlDB
logging.warning('ALL - get_environment - use production instance')
else:
firebase = None
postgres = None
return firebase, postgres
def init_import(project_type, import_key, import_dict):
"""
The function to init an import in regard to its type
Parameters
----------
project_type : int
the type of the project
import_key : str
the key for the import as depicted in firebase
import_dict : dict
a dictionary with the attributes for the import
Returns
-------
imp :
the import instance
"""
class_to_type = {
# Make sure to import all project types here
1: BuildAreaImport,
2: FootprintImport
}
imp = class_to_type[int(project_type)](import_key, import_dict)
return imp
def init_project(project_type, project_id, firebase, postgres):
"""
The function to init a project in regard to its type
Parameters
----------
project_type : int
the type of the project
project_id : int
the id of the project
firebase : pyrebase firebase object
initialized firebase app with admin authentication
postgres : database connection class
The database connection to postgres database
Returns
-------
proj :
the project instance
"""
class_to_type = {
# Make sure to import all project types here
1: BuildAreaProject,
2: FootprintProject
}
proj = class_to_type[project_type](project_id, firebase, postgres)
return proj
def get_projects(firebase, postgres, filter='all'):
"""
The function to download project information from firebase and init the projects
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
postgres : database connection class
The database connection to postgres database
filter : str or list
The filter for the projects.
Can be either 'all', 'active', 'not_finished' or a list of project ids as integer
Returns
-------
projects_list : list
The list containing the projects
"""
# create a list for projects according to filter
projects_list = []
fb_db = firebase.database()
all_projects = fb_db.child("projects").get().val()
# return empty list if there are no projects in firebase
if all_projects == None:
logging.warning('All - get_projects - no projects in firebase')
projects_list = []
return projects_list
# we need to check if the user provided a list of project ids to filter
if type(filter) is list:
project_ids = filter.copy()
filter = 'user'
else:
project_ids = []
for project_id in all_projects:
# a valid project in firebase has at least 12 attributes
if len(all_projects[project_id]) < 12:
logging.warning('%s - get_projects - project is in firebase, but misses critical information' % project_id)
continue
# we check all conditions for each group of projects
conditions = {
'all': True,
'active': all_projects[project_id]['state'] == 0,
'not_finished': all_projects[project_id]['progress'] < 100,
'user': int(project_id) in project_ids
}
if conditions[filter]:
try:
project_type = all_projects[project_id]['projectType']
except:
project_type = 1
proj = init_project(project_type, project_id, firebase, postgres)
projects_list.append(proj)
return projects_list
def project_exists(project_id, firebase, postgres):
"""
The function to check if all project information exists in firebase and postgres database.
Parameters
----------
project_id : int
the id of the project
firebase : pyrebase firebase object
initialized firebase app with admin authentication
postgres : database connection class
The database connection to postgres database
Returns
-------
bool
True if project exists, False it not
"""
in_firebase = project_exists_firebase(project_id, firebase)
in_postgres = project_exists_postgres(project_id, postgres)
if in_firebase is True and in_postgres is True:
return True
else:
return False
def project_exists_firebase(project_id, firebase):
"""
The function to check whether a project exists in firebase.
Parameters
----------
project_id : int
the id of the project
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
bool
True if project info and group info exist in firebase, false otherwise
"""
fb_db = firebase.database()
project_data = fb_db.child("projects").child(project_id).get().val()
if not project_data:
logging.warning('%s - project_exists_firebase - project info NOT in firebase' % project_id)
else:
logging.warning('%s - project_exists_firebase - project info in firebase' % project_id)
groups_data = fb_db.child("groups").child(project_id).shallow().get().val()
if not groups_data:
logging.warning('%s - project_exists_firebase - groups info NOT in firebase' % project_id)
else:
logging.warning('%s - project_exists_firebase - groups info in firebase' % project_id)
if project_data and groups_data:
return True
else:
return False
def project_exists_postgres(project_id, postgres):
"""
The function to check whether a project exists in postgres.
Parameters
----------
project_id : int
the id of the project
postgres : database connection class
The database connection to postgres database
Returns
-------
bool
True is project info, group info and task info exist in postgres database. False otherwise.
"""
p_con = postgres()
data = [project_id]
sql_query = 'SELECT * FROM projects WHERE project_id = %s'
project_data = p_con.retr_query(sql_query, data)
if not project_data:
logging.warning('%s - project_exists_postgres - project info NOT in postgres' % project_id)
else:
logging.warning('%s - project_exists_postgres - project info in postgres' % project_id)
sql_query = 'SELECT * FROM groups WHERE project_id = %s LIMIT 1'
groups_data = p_con.retr_query(sql_query, data)
if not groups_data:
logging.warning('%s - project_exists_postgres - groups info NOT in postgres' % project_id)
else:
logging.warning('%s - project_exists_postgres - groups info in postgres' % project_id)
sql_query = 'SELECT * FROM tasks WHERE project_id = %s LIMIT 1'
tasks_data = p_con.retr_query(sql_query, data)
if not tasks_data:
logging.warning('%s - project_exists_postgres - tasks info NOT in postgres' % project_id)
else:
logging.warning('%s - project_exists_postgres - tasks info in postgres' % project_id)
del p_con
if project_data and groups_data and tasks_data:
return True
else:
return False
########################################################################################################################
# IMPORT #
########################################################################################################################
def get_new_imports(firebase):
"""
The function to get new project imports from firebase which have not been imported
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
new_imports : dict
a dictionary of imports which have not been imported already
"""
fb_db = firebase.database()
all_imports = fb_db.child("imports").get().val()
new_imports = {}
if all_imports:
for import_key, new_import in all_imports.items():
try:
# check if project was already imported and "complete" is set
complete = new_import['complete']
except:
# insert into new projects dict
new_imports[import_key] = new_import
logging.warning('ALL - get_new_imports - got %s projects which have not been imported' % len(new_imports))
return new_imports
def run_import(modus):
"""
A function to create all newly imported projects in firebase
Parameters
----------
modus : str
either `development` or `production to decide which firebase configuration to use
Returns
-------
imported_projects : list
list of tuple with import_key, project_id and project_type of imported projects
"""
# get dev or production environment for firebase and postgres
firebase, postgres = get_environment(modus)
# this will return a list of imports
imported_projects = []
# check for new imports in firebase
imports = get_new_imports(firebase)
for import_key, import_dict in imports.items():
# let's have a look at the project type
try:
project_type = import_dict['projectType']
except:
project_type = 1
# now let's init the import
try:
imp = init_import(project_type, import_key, import_dict)
# and now let's finally create a project
project_id, project_type = imp.create_project(firebase, postgres)
imported_projects.append((imp.import_key, project_id, project_type))
try:
msg = "### IMPORT SUCCESSFUL ### \nproject-name: %s \nimport-key: %s \nproject-id: %s \nproject-type: %s \nMake sure to activate the project in firebase. \nHappy Swiping. :)" % (imp.name, import_key, project_id, project_type)
slack.send_slack_message(msg)
except:
logging.exception('could not send slack message.')
except CustomError as error:
error_handling.send_error(error, import_key)
logging.exception('%s - get_new_imports - import failed' % import_key)
continue
return imported_projects
########################################################################################################################
# UPDATE #
########################################################################################################################
def run_update(modus, filter):
"""
The function to update project progress and contributors in firebase
Parameters
----------
modus : str
The environment to use for firebase and postgres
Can be either 'development' or 'production'
filter : str or list
The filter for the projects.
Can be either 'all', 'active', 'not_finished' or a list of project ids as integer
Returns
-------
updated_projects : list
The list of all projects ids for projects which have been updated
"""
# get dev or production environment for firebase and postgres
firebase, postgres = get_environment(modus)
project_list = get_projects(firebase, postgres, filter)
updated_projects = []
for proj in project_list:
proj.update_project(firebase, postgres)
updated_projects.append(proj.id)
# update users
update_users_postgres(firebase, postgres)
return updated_projects
def update_users_postgres(firebase, postgres, users_txt_filename='raw_users.txt')-> bool:
"""
The fucntion to replace the users table in postgres with the current information from firebase
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
postgres : database connection class
the database connection to postgres database
users_txt_filename : string
the path for the textfile which temporally stores the raw information
Returns
-------
bool
True if successful, False otherwise
"""
# open new txt file and write header
users_txt_file = open(users_txt_filename, 'w', newline='')
fieldnames = ('user_id', 'contributions', 'distance', 'username')
w = csv.DictWriter(users_txt_file, fieldnames=fieldnames, delimiter=';', quotechar="'")
#query users from fdb
users = firebase.database().child("users").get().val()
# check that there are users in firebase
if not users:
logging.warning('ALL - update_users - there are no users in firebase')
else:
for user in users:
try:
#check for missing info, add dummy values
if not 'username' in users[user]:
users[user]['username'] = 'unknown'
if not 'contributions' in users[user]:
users[user]['contributions'] = 0
if not 'distance' in users[user]:
users[user]['distance'] = 0
output_dict = {
"user_id": user,
"contributions": users[user]['contributions'],
"distance": users[user]['distance'],
"username": users[user]['username']
}
w.writerow(output_dict)
except Exception as e:
logging.warning('ALL - update_users - users missed critical information: %s' % e)
users_txt_file.close()
# create new table in postgres for raw_users
p_con = postgres()
sql_insert = '''
DROP TABLE IF EXISTS raw_users CASCADE;
CREATE TABLE raw_users (
user_id varchar
,contributions int
,distance double precision
,username varchar
);
'''
p_con.query(sql_insert, None)
# insert user data from text file into new table in postgres
f = open(users_txt_filename, 'r')
columns = ['user_id', 'contributions', 'distance', 'username']
p_con.copy_from(f, 'raw_users', sep=';', columns=columns)
logging.warning('ALL - update_users - inserted raw users into table raw_users')
f.close()
os.remove(users_txt_filename)
logging.warning('ALL - update_users - deleted file: %s' % users_txt_filename)
# update users in postgres, update contributions and distance and handle conflicts
sql_insert = '''
INSERT INTO
users
SELECT
*
-- duplicates is set to zero by default, this will be updated on conflict only
--0
FROM
raw_users a
ON CONFLICT ON CONSTRAINT "users_pkey"
DO UPDATE SET contributions = excluded.contributions
,distance = excluded.distance;
DROP TABLE IF EXISTS raw_users CASCADE;
''' # conflict action https://www.postgresql.org/docs/current/sql-insert.html
p_con.query(sql_insert, None)
logging.warning('ALL - update_users - inserted results into users table and updated contributions and/or distance')
del p_con
return True
########################################################################################################################
# TRANSFER RESULTS #
########################################################################################################################
def get_results_from_firebase(firebase):
"""
The function to download all results from firebase
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
results : dict
The results in a dictionary with the following format:
{
"task_id" {
"user1_id": {
"data": {...}
},
"user2_id": {
"data": {...}
},
}
}
"""
fb_db = firebase.database()
results = fb_db.child("results").get().val()
return results
def delete_firebase_results(firebase, all_results):
"""
The function to delete all results in firebase
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
results : dict
The results in a dictionary with the following format:
{
"task_id": {
"user1_id": {},
"user2_id": {},
}
}
Returns
-------
bool
True if successful, False otherwise
Notes
-----
We use the update method of firebase instead of delete.
Update allows to delete items at multiple locations at the same time.
"""
fb_db = firebase.database()
# we will use multilocation update to deete the entries
# therefore we crate an dict with the items we want to delete
data = {}
for task_id, results in all_results.items():
for child_id, result in results.items():
key = 'results/{task_id}/{child_id}'.format(
task_id=task_id,
child_id=child_id)
data[key] = None
fb_db.update(data)
del fb_db
logging.warning('ALL - delete_firebase_results - deleted all results in firebase')
return True
def results_to_txt(all_results):
"""
The function to save results from firebase in csv format
Parameters
----------
all_results : dict
The results in a dictionary with the following format:
{
"task_id" {
"user1_id": {
"data": {...}
},
"user2_id": {
"data": {...}
},
}
}
Returns
-------
results_txt_filename : str
The name of the file with the results
"""
results_txt_filename = 'raw_results.txt'
# If csv file is a file object, it should be opened with newline=''
results_txt_file = open(results_txt_filename, 'w', newline='')
fieldnames = ('task_id', 'project_id', 'user_id', 'timestamp', 'info')
w = csv.DictWriter(results_txt_file, fieldnames=fieldnames, delimiter='\t', quotechar="'")
number_of_results = 0
for task_id, results in all_results.items():
for child_id, result in results.items():
number_of_results += 1
try:
output_dict = {
"task_id": result['data']['id'],
"project_id": int(result['data']['projectId']),
"user_id": result['data']['user'],
"timestamp": int(result['data']['timestamp']),
"info": {}
}
for key in result['data'].keys():
# those keys have already been set
if not key in ['user', 'projectId', 'timestamp', 'id']:
output_dict['info'][key] = result['data'][key]
# the info column should have json format for uploading to postgres
output_dict['info'] = json.dumps(output_dict['info'])
w.writerow(output_dict)
except Exception as e:
logging.warning('ALL - results_to_txt - result missed critical information: %s' % e)
results_txt_file.close()
logging.warning('ALL - results_to_txt - there are %s results to import' % number_of_results)
logging.warning('ALL - results_to_txt - created file: %s' % results_txt_filename)
return results_txt_filename
def save_results_postgres(postgres, results_filename):
"""
The function to save results in the postgres database
Parameters
----------
postgres : database connection class
The database connection to postgres database
results_filename : str
The name of the file with the results
Returns
-------
bool
True if successful, False otherwise
"""
### this function saves the results from firebase to the postgres database
# pre step delete table if exist
p_con = postgres()
# first importer to a table where we store the geom as text
sql_insert = '''
DROP TABLE IF EXISTS raw_results CASCADE;
CREATE TABLE raw_results (
task_id varchar,
project_id int,
user_id varchar,
timestamp bigint,
info json
);
'''
p_con.query(sql_insert, None)
# copy data to the new table
# old: postgres we should use LOAD DATA LOCAL INFILE Syntax
f = open(results_filename, 'r')
columns = ['task_id', 'project_id', 'user_id', 'timestamp', 'info']
p_con.copy_from(f, 'raw_results', sep='\t', columns=columns)
logging.warning('ALL - save_results_postgres - inserted raw results into table raw_results')
f.close()
os.remove(results_filename)
logging.warning('ALL - save_results_postgres - deleted file: %s' % results_filename)
# second import all entries into the task table and convert into psql geometry
sql_insert = '''
INSERT INTO
results
SELECT
*,
-- duplicates is set to zero by default, this will be updated on conflict only
0
FROM
raw_results
ON CONFLICT ON CONSTRAINT "results_pkey"
DO UPDATE SET duplicates = results.duplicates + 1;
DROP TABLE IF EXISTS raw_results CASCADE;
'''
p_con.query(sql_insert, None)
logging.warning('ALL - save_results_postgres - inserted results into results table and updated duplicates count')
del p_con
return True
def run_transfer_results(modus):
"""
The function to download results from firebase, upload them to postgres and then delete the transfered results in firebase.
Parameters
----------
modus : str
The environment to use for firebase and postgres
Can be either 'development' or 'production'
Returns
-------
bool
True if successful, False otherwise
"""
results_filename = '{}/tmp/results.json'.format(DATA_PATH)
if not os.path.isdir(DATA_PATH+'/tmp'):
os.mkdir(DATA_PATH+'/tmp')
# get dev or production environment for firebase and postgres
firebase, postgres = get_environment(modus)
# first check if we have results stored locally, that have not been inserted in postgres
if os.path.isfile(results_filename):
# start to import the old results first
with open(results_filename) as results_file:
results = json.load(results_file)
results_txt_filename = results_to_txt(results)
logging.warning("ALL - run_transfer_results - there are results in %s that we didnt't insert. do it now!" % results_filename)
save_results_postgres(postgres, results_txt_filename)
delete_firebase_results(firebase, results)
os.remove(results_filename)
logging.warning('ALL - run_transfer_results - removed "results.json" file')
fb_db = firebase.database()
# this tries to set the max pool connections to 100
adapter = requests.adapters.HTTPAdapter(max_retries=5, pool_connections=100, pool_maxsize=100)
for scheme in ('http://', 'https://'):
fb_db.requests.mount(scheme, adapter)
# download all results and save as in json file to avoid data loss when script fails
all_results = fb_db.child("results").get().val()
del fb_db
logging.warning('ALL - run_transfer_results - downloaded all results from firebase')
# test if there are any results to transfer
if all_results:
with open(results_filename, 'w') as fp:
json.dump(all_results, fp)
logging.warning('ALL - run_transfer_results - wrote results data to %s' % results_filename)
results_txt_filename = results_to_txt(all_results)
save_results_postgres(postgres, results_txt_filename)
delete_firebase_results(firebase, all_results)
os.remove(results_filename)
logging.warning('ALL - run_transfer_results - removed %s' % results_filename)
else:
logging.warning('ALL - run_transfer_results - there are no results to transfer in firebase')
return True
########################################################################################################################
# EXPORT #
########################################################################################################################
def export_all_projects(firebase):
"""
The function to export all projects in a json file
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
bool
True if successful, False otherwise
"""
# check if output path for projects is correct and existing
if not os.path.isdir(DATA_PATH):
os.mkdir(DATA_PATH)
fb_db = firebase.database()
all_projects = fb_db.child("projects").get().val()
if not all_projects:
logging.warning("ALL - export_all_projects - no projects in firebase. Can't export")
return False
else:
# save projects as json
output_json_file = '{}/projects.json'.format(DATA_PATH)
# don't export api key
for project_id in all_projects.keys():
try:
del all_projects[project_id]['info']['apiKey']
except:
pass
with open(output_json_file, 'w') as outfile:
json.dump(all_projects, outfile, indent=4)
logging.warning('ALL - export_all_projects - exported projects file: %s' % output_json_file)
return True
def export_users_and_stats(firebase):
"""
The function to save users and stats as a json file
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
bool
True if successful, False otherwise
"""
# check if output path for projects is correct and existing
if not os.path.isdir(DATA_PATH):
os.mkdir(DATA_PATH)
fb_db = firebase.database()
all_users = fb_db.child("users").get().val()
if not all_users:
logging.warning("ALL - export_users_and_stats - no users in firebase. Can't export")
return False
else:
# compute stats from user data and save in dict
stats = {
'users': len(all_users),
'totalDistanceMappedInSqKm': 0,
'totalContributionsByUsers': 0
}
for user in all_users:
try:
# for some user there might be no distance attribute, if they didn't map anything etc.
stats['totalDistanceMappedInSqKm'] += all_users[user]['distance']
stats['totalContributionsByUsers'] += all_users[user]['contributions']
except:
pass
# export users as json file
output_json_file = '{}/users.json'.format(DATA_PATH)
with open(output_json_file, 'w') as outfile:
json.dump(all_users, outfile, indent=4)
logging.warning('ALL - export_users_and_stats - exported users file: %s' % output_json_file)
# export stats as json file
output_json_file = '{}/stats.json'.format(DATA_PATH)
with open(output_json_file, 'w') as outfile:
json.dump(stats, outfile, indent=4)
logging.warning('ALL - export_users_and_stats - exported stats file: %s' % output_json_file)
return True
def run_export(modus: str, filter: Union[str, list])-> list:
"""
The function to export general statistics along with progress and results per
projects as well as all users in json format for to use in the mapswipe api.
Examples
----------
Output structure:
progress_<project_id>.json
{
"timestamps": [
int,
int,
..
],
"progress": [
int,
int,
..
],
"contributors": [
int,
int
]
}
stats.json
{
"users": int,
"totalDistanceMappedInSqKm": float,
"totalContributionsByUsers": int
}
results_<project_id>.json
{
"task_id": {
"project_id": int,
"decision": float,
"yes_count": int,
"maybe_count": int,
"bad_imagery_count": int,
}
users.json
{
"user_id": {
"contribution": int,
"distance": int,
"username": str
}
}
Parameters
----------
modus : str
The environment to use for firebase and postgres
Can be either 'development' or 'production'
filter : str or list
The filter for the projects.
Can be either 'all', 'active', 'not_finished' or a list of project ids as integer
Returns
-------
exported_projects : list
The list of all projects ids for projects which have been updated
"""
firebase, postgres = get_environment(modus)
project_list = get_projects(firebase, postgres, filter)
logging.warning('ALL - run_export - got %s projects to export. Filter is set for %s projects' % (len(project_list),
filter))
exported_projects = []
for project in project_list:
project.export_progress()
logging.warning('%s - run_export - progress successfully exported' % project.id)
project.export_results(postgres)
logging.warning('%s - run_export - results successfully exported' % project.id)
exported_projects.append(project.id)
export_all_projects(firebase)
export_users_and_stats(firebase)
return exported_projects
########################################################################################################################
# DELETE PROJECTS #
########################################################################################################################
def delete_project_firebase(project_id, import_key, firebase):
"""
The function to delete the project and groups in firebase
Parameters
----------
project_id : int
the id of the project
import_key : str
the key for the import as depicted in firebase
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
bool
True if successful. False otherwise
"""
fb_db = firebase.database()
# we create this element to do a multi location update
data = {
"projects/{}/".format(project_id): None,
"groups/{}/".format(project_id): None,
"imports/{}/".format(import_key): None
}
fb_db.update(data)
logging.warning('%s - delete_project_firebase - deleted project and groups and import in firebase' % project_id)
return True
def delete_project_postgres(project_id, import_key, postgres):
"""
The function to delete results, tasks, groups, import of project in postgres.
Parameters
----------
project_id : int
the id of the project
import_key : str
the key for the import as depicted in firebase
postgres : database connection class
The database connection to postgres database
Returns
-------
bool
True is successful. False otherwise.
TODO:
-----
Handle exception:
pypostgres.err.InternalError: (1205, 'Lock wait timeout exceeded; try restarting transaction')
"""
p_con = postgres()
sql_insert = """
BEGIN TRANSACTION;
DELETE FROM projects WHERE project_id = %s;
DELETE FROM results WHERE project_id = %s;
DELETE FROM tasks WHERE project_id = %s;
DELETE FROM groups WHERE project_id = %s;
DELETE FROM imports WHERE import_id = %s
END TRANSACTION;
"""
data = [int(project_id), int(project_id), int(project_id), int(project_id), str(import_key)]
p_con.query(sql_insert, data)
del p_con
logging.warning('%s - delete_results_postgres - deleted all results, tasks, groups and import and project in postgres' % project_id)
return True
def delete_local_files(project_id, import_key):
"""
The function to delete all local files of this project at the server.
Parameters
----------
project_id : int
the id of the project
import_key : str
the key for the import as depicted in firebase
Returns
-------
deleted_files : list
a list with the names of the deleted files
"""
deleted_files = []
file_list = [
'{}/results/results_{}.json'.format(DATA_PATH, project_id),
'{}/input_geometries/raw_input_{}.geojson'.format(DATA_PATH, import_key),
'{}/input_geometries/raw_input_{}.kml'.format(DATA_PATH, import_key),
'{}/input_geometries/valid_input_{}.geojson'.format(DATA_PATH, import_key),
'{}/input_geometries/valid_input_{}.kml'.format(DATA_PATH, import_key),
'{}/progress/progress_{}.json'.format(DATA_PATH, project_id),
]
for filepath in file_list:
if os.path.isfile(filepath):
os.remove(filepath)
deleted_files.append(filepath)
logging.warning('%s - delete_project_firebase - deleted local files: %s' % (project_id, deleted_files))
return deleted_files
def run_delete(modus, list):
"""
The function to delete a list of projects and all corresponding information (results, tasks, groups) in firebase and postgres
Parameters
----------
modus : str
The environment to use for firebase and postgres
Can be either 'development' or 'production'
list : list
The ids of the projects to delete
Returns
-------
deleted_projects : list
The list of all projects ids for projects which have been deleted
"""
# get dev or production environment for firebase and postgres
firebase, postgres = get_environment(modus)
deleted_projects = []
if not list:
logging.warning('ALL - run_delete - no input list provided.')
else:
project_list = get_projects(firebase, postgres, list)
for proj in project_list:
proj.delete_project(firebase, postgres)
deleted_projects.append(proj.id)
return deleted_projects
########################################################################################################################
# ARCHIVE PROJECTS #
########################################################################################################################
def run_archive(modus, list):
"""
The function to archive a list of projects and its corresponding information (groups) to reduce storage in firebase
Parameters
----------
modus : str
The environment to use for firebase and postgres
Can be either 'development' or 'production'
list : list
The ids of the projects to archive
Returns
-------
archived_projects : list
The list of all projects ids for projects which have been archived
"""
# get dev or production environment for firebase and postgres
firebase, postgres = get_environment(modus)
if not list:
logging.warning('ALL - run_archive - no input list provided.')
return False
else:
project_list = get_projects(firebase, postgres, list)
archived_projects = []
for proj in project_list:
logging.warning('ALL - run_archive - currently not implemented.')
pass
# TODO implement archive function on project level
# proj.archive_project(firebase, postgres)
# archived_projects.append(proj.id)
return archived_projects
if __name__ == '__main__':
pass
|
<filename>tests/python_to_cpp/other/1.n5110_BMP_converter.py
# [https://github.com/weewStack/Python-projects/blob/master/000-BMP-Converter/n5110_BMP_converter.py <- https://github.com/weewStack/Python-projects <- https://youtu.be/0Kwqdkhgbfw <- https://stackoverflow.com/questions/10439104/reading-bmp-files-in-python <- google:‘python bmp’]
import sys # To get the data from the argument
import struct # To unpack the data from the header
from typing import List
if __name__ == '__main__':
# 1- Getting the file name
file_name = sys.argv[1]
file_name = file_name.split(".")[0]
# 2- Opening & preparation to read the data the file
bmp = open(file_name + '.bmp', 'rb')
# Getting the offset postion 10 -> 4 reads
bmp.seek(10, 0)
offset = struct.unpack('I', bmp.read(4))[0]
# Get the height & width : postion 18,22 -> 4 reads
bmp.seek(18, 0)
bmp_w = struct.unpack('I', bmp.read(4))[0]
bmp_h = struct.unpack('I', bmp.read(4))[0]
print(bmp_h, bmp_w)
# Get the size : postion 34 -> 4 reads
bmp.seek(34, 0)
bmp_s = struct.unpack('I', bmp.read(4))[0]
# Getting the number of bytes in a row
bmp_b = int(bmp_s/bmp_h)
print(bmp_h, bmp_w, bmp_s, bmp_b)
# 3- Reading Data from the Picture
bmp.seek(offset, 0)
bmp_line = ''
bmp_list : List[str] = []
bmp_list_v : List[str] = []
for line in range(bmp_h):
for byte in range(bmp_b):
bmp_byte = bmp.read(1)
bmp_line += bin(255-struct.unpack('B', bmp_byte)[0])[2:].zfill(8)
bmp_list.append(bmp_line[:bmp_w])
bmp_list_v.append(bmp_line[:bmp_w].replace("0", " "))
bmp_line = ''
bmp_list_v.reverse()
for line in bmp_list_v:
print(line)
# 4- Reshape the data to adjust to n5110
byte_word = ""
n5110_line : List[str] = []
n5110_array : List[List[str]] = []
for line in range(0, bmp_h, 8):
for bit_num in range(bmp_w):
for bit in range(line, line + 8):
if bit > bmp_h - 1:
byte_word += "0"
else:
byte_word += bmp_list[bit][bit_num]
n5110_line.append('0x' + hex(int(byte_word, 2))[2:].upper().lower())
byte_word = ''
n5110_array.append(n5110_line)
n5110_line = []
n5110_array.reverse()
# 5- Save the new array in a text file
text_file = open(file_name + '.txt', 'w', newline = "\n")
text_file.write(
'static unsigned short ' + file_name + '_rows = ' + str(len(n5110_array)) + ';\n'
)
text_file.write(
'static unsigned short ' + file_name + '_cols = ' + str(len(n5110_array[0])) + ';\n'
)
text_file.write('static unsigned char ' + file_name + '[] =\n')
text_file.write('{\n')
for l_cnt, lines in enumerate(n5110_array):
for cnt, hexa in enumerate(lines):
text_file.write(hexa)
if cnt < len(lines)-1:
text_file.write(',')
if l_cnt < len(n5110_array)-1:
text_file.write(',\n')
else:
text_file.write('\n')
text_file.write('};')
|
<filename>metashare/accounts/forms.py
from django import forms
from metashare.accounts.models import UserProfile, EditorGroupApplication, \
OrganizationApplication, Organization, OrganizationManagers, EditorGroup, \
EditorGroupManagers
from django.conf import settings
from django.contrib.admin import widgets
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
class ModelForm(forms.ModelForm):
"""
Base form for META-SHARE model forms -- disables the colon after a label,
and formats error messages as expected by the templates.
"""
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
# Avoid the default ':' suffix
self.label_suffix = ''
required_css_class = 'required'
error_css_class = 'error'
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = u'<tr%(html_class_attr)s><th>%(label)s%(errors)s</th><td>%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br /><span class="helptext">%s</span>',
errors_on_separate_row = False)
class Form(forms.Form):
"""
Base form for META-SHARE forms -- disables the colon after a label,
and formats error messages as expected by the templates.
"""
def __init__(self, *args, **kwargs):
super(Form, self).__init__(*args, **kwargs)
# Avoid the default ':' suffix
self.label_suffix = ''
required_css_class = 'required'
error_css_class = 'error'
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = u'<tr%(html_class_attr)s><th>%(label)s%(errors)s</th><td>%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br /><span class="helptext">%s</span>',
errors_on_separate_row = False)
class RegistrationRequestForm(Form):
"""
Form used to create user account requests from new users.
"""
shortname = forms.CharField(User._meta.get_field('username').max_length,
label=_("Desired account name"))
first_name = forms.CharField(User._meta.get_field('first_name').max_length,
label=_("First name"))
last_name = forms.CharField(User._meta.get_field('last_name').max_length,
label=_("Last name"))
email = forms.EmailField(label=_("E-mail"))
password = forms.CharField(User._meta.get_field('password').max_length,
label=_("Password"), widget=forms.PasswordInput())
confirm_password = forms.CharField(
User._meta.get_field('password').max_length,
label=_("Password confirmation"), widget=forms.PasswordInput())
accepted_tos = forms.BooleanField()
def clean_shortname(self):
"""
Make sure that the user name is still available.
"""
_user_name = self.cleaned_data['shortname']
try:
User.objects.get(username=_user_name)
except:
pass
else:
raise ValidationError(_('User account name already exists, ' \
'please choose another one.'))
return _user_name
def clean_email(self):
"""
Make sure that there is no account yet registered with this email.
"""
_email = self.cleaned_data['email']
try:
User.objects.get(email=_email)
except:
pass
else:
raise ValidationError(_('There is already an account registered ' \
'with this e-mail address.'))
return _email
def clean_confirm_password(self):
"""
Make sure that the password confirmation is the same as password.
"""
pswrd = self.cleaned_data.get('password', None)
pswrd_conf = self.cleaned_data['confirm_password']
if pswrd != pswrd_conf:
raise ValidationError('The two password fields did not match.')
return pswrd
# cfedermann: possible extensions for future improvements.
# - add validation for shortname for forbidden characters
class ContactForm(Form):
"""
Form used to contact the superusers of the META-SHARE node.
"""
subject = forms.CharField(min_length=6, max_length=80,
error_messages={'min_length': _('Please provide a meaningful and '
'sufficiently indicative subject.')})
message = forms.CharField(min_length=30, max_length=2500,
widget=forms.Textarea, error_messages={'min_length': _('Your message '
'appears to be rather short. Please make sure to phrase your '
'request as precise as possible. This will help us to process it '
'as quick as possible.')})
class ResetRequestForm(Form):
"""
Form used to reset an existing user account.
"""
username = forms.CharField(max_length=30)
email = forms.EmailField()
def clean(self):
cleaned_data = self.cleaned_data
username = cleaned_data.get("username")
email = cleaned_data.get("email")
if username and email:
# Only do something if both fields are valid so far.
user = User.objects.filter(username=username, email=email)
if not user:
raise forms.ValidationError('Not a valid username-email combination.')
return cleaned_data
class UserProfileForm(ModelForm):
"""
Form used to update the user account profile information.
"""
class Meta:
"""
Meta class connecting to UserProfile object model.
"""
model = UserProfile
exclude = ('user', 'modified', 'uuid', 'default_editor_groups')
class EditorGroupApplicationForm(ModelForm):
"""
Form used to apply to new editor groups membership.
"""
class Meta:
"""
Meta class connecting to EditorGroupApplication object model.
"""
model = EditorGroupApplication
exclude = ('user', 'created')
def __init__(self, editor_group_qs, *args, **kwargs):
"""
Initializes the `EditorGroupApplicationForm` with the editor groups
of the given query set.
"""
super(EditorGroupApplicationForm, self).__init__(*args, **kwargs)
# If there is a list of editor groups, then modify the ModelChoiceField
self.fields['editor_group'].queryset = editor_group_qs
class UpdateDefaultEditorGroupForm(ModelForm):
"""
Form used to update default editor groups.
"""
default_editor_groups = forms.ModelMultipleChoiceField([],
widget=widgets.FilteredSelectMultiple(_("default editor groups"),
is_stacked=False),
required=False)
class Media:
css = {
# required by the FilteredSelectMultiple widget
'all':['{}css/widgets.css'.format(settings.ADMIN_MEDIA_PREFIX)],
}
# required by the FilteredSelectMultiple widget
js = ['/{}admin/jsi18n/'.format(settings.DJANGO_BASE)]
class Meta:
"""
Meta class connecting to UserProfile object model.
"""
model = UserProfile
exclude = ('user', 'modified', 'uuid', 'birthdate', 'affiliation', \
'position', 'homepage')
def __init__(self, available_editor_group, chosen_editor_group, *args, **kwargs):
"""
Initializes the `UpdateDefaultEditorGroupForm` with the editor groups
of the given query set.
"""
super(UpdateDefaultEditorGroupForm, self).__init__(*args, **kwargs)
# If there is a list of editor groups, then modify the ModelChoiceField
self.fields['default_editor_groups'].queryset = available_editor_group
self.fields['default_editor_groups'].initial = chosen_editor_group
class OrganizationApplicationForm(ModelForm):
"""
Form used to apply to new organizations membership.
"""
class Meta:
"""
Meta class connecting to OrganizationApplication object model.
"""
model = OrganizationApplication
exclude = ('user', 'created')
def __init__(self, organization_qs, *args, **kwargs):
"""
Initializes the `OrganizationApplicationForm` with the organizations
of the given query set.
"""
super(OrganizationApplicationForm, self).__init__(*args, **kwargs)
# If there is a list of organizations, then modify the ModelChoiceField
self.fields['organization'].queryset = organization_qs
class EditorGroupForm(ModelForm):
"""
Form used to render the add/change admin views for `EditorGroup` model
instances.
"""
class Meta:
model = EditorGroup
widgets = {
'permissions': forms.widgets.MultipleHiddenInput
}
class EditorGroupManagersForm(ModelForm):
"""
Form used to render the add/change admin views for `EditorGroupManagers`
model instances.
"""
class Meta:
model = EditorGroupManagers
widgets = {
'permissions': forms.widgets.MultipleHiddenInput
}
class OrganizationForm(ModelForm):
"""
Form used to render the add/change admin views for `Organization` model
instances.
"""
class Meta:
model = Organization
widgets = {
'permissions': widgets.FilteredSelectMultiple(
Organization._meta.get_field('permissions').verbose_name, False)
}
class OrganizationManagersForm(ModelForm):
"""
Form used to render the add/change admin views for `OrganizationManagers`
model instances.
"""
class Meta:
model = OrganizationManagers
widgets = {
'permissions': widgets.FilteredSelectMultiple(OrganizationManagers \
._meta.get_field('permissions').verbose_name, False)
}
|
<gh_stars>10-100
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Basic tests for CurvesROIWidget"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "07/03/2018"
from silx.gui import qt
from silx.gui.plot.stats import stats
from silx.gui.plot import StatsWidget
from silx.gui.plot.stats import statshandler
from silx.gui.utils.testutils import TestCaseQt, SignalListener
from silx.gui.plot import Plot1D, Plot2D
from silx.gui.plot3d.SceneWidget import SceneWidget
from silx.gui.plot.items.roi import RectangleROI, PolygonROI
from silx.gui.plot.tools.roi import RegionOfInterestManager
from silx.gui.plot.stats.stats import Stats
from silx.gui.plot.CurvesROIWidget import ROI
from silx.utils.testutils import ParametricTestCase
import unittest
import logging
import numpy
_logger = logging.getLogger(__name__)
class TestStatsBase(object):
"""Base class for stats TestCase"""
def setUp(self):
self.createCurveContext()
self.createImageContext()
self.createScatterContext()
def tearDown(self):
self.plot1d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot1d.close()
del self.plot1d
self.plot2d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot2d.close()
del self.plot2d
self.scatterPlot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.scatterPlot.close()
del self.scatterPlot
def createCurveContext(self):
self.plot1d = Plot1D()
x = range(20)
y = range(20)
self.plot1d.addCurve(x, y, legend='curve0')
self.curveContext = stats._CurveContext(
item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=False,
roi=None)
def createScatterContext(self):
self.scatterPlot = Plot2D()
lgd = 'scatter plot'
self.xScatterData = numpy.array([0, 2, 3, 20, 50, 60, 36])
self.yScatterData = numpy.array([2, 3, 4, 26, 69, 6, 18])
self.valuesScatterData = numpy.array([5, 6, 7, 10, 90, 20, 5])
self.scatterPlot.addScatter(self.xScatterData, self.yScatterData,
self.valuesScatterData, legend=lgd)
self.scatterContext = stats._ScatterContext(
item=self.scatterPlot.getScatter(lgd),
plot=self.scatterPlot,
onlimits=False,
roi=None
)
def createImageContext(self):
self.plot2d = Plot2D()
self._imgLgd = 'test image'
self.imageData = numpy.arange(32*128).reshape(32, 128)
self.plot2d.addImage(data=self.imageData,
legend=self._imgLgd, replace=False)
self.imageContext = stats._ImageContext(
item=self.plot2d.getImage(self._imgLgd),
plot=self.plot2d,
onlimits=False,
roi=None
)
def getBasicStats(self):
return {
'min': stats.StatMin(),
'minCoords': stats.StatCoordMin(),
'max': stats.StatMax(),
'maxCoords': stats.StatCoordMax(),
'std': stats.Stat(name='std', fct=numpy.std),
'mean': stats.Stat(name='mean', fct=numpy.mean),
'com': stats.StatCOM()
}
class TestStats(TestStatsBase, TestCaseQt):
"""
Test :class:`BaseClass` class and inheriting classes
"""
def setUp(self):
TestCaseQt.setUp(self)
TestStatsBase.setUp(self)
def tearDown(self):
TestStatsBase.tearDown(self)
TestCaseQt.tearDown(self)
def testBasicStatsCurve(self):
"""Test result for simple stats on a curve"""
_stats = self.getBasicStats()
xData = yData = numpy.array(range(20))
self.assertEqual(_stats['min'].calculate(self.curveContext), 0)
self.assertEqual(_stats['max'].calculate(self.curveContext), 19)
self.assertEqual(_stats['minCoords'].calculate(self.curveContext), (0,))
self.assertEqual(_stats['maxCoords'].calculate(self.curveContext), (19,))
self.assertEqual(_stats['std'].calculate(self.curveContext), numpy.std(yData))
self.assertEqual(_stats['mean'].calculate(self.curveContext), numpy.mean(yData))
com = numpy.sum(xData * yData) / numpy.sum(yData)
self.assertEqual(_stats['com'].calculate(self.curveContext), com)
def testBasicStatsImage(self):
"""Test result for simple stats on an image"""
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(self.imageContext), 0)
self.assertEqual(_stats['max'].calculate(self.imageContext), 128 * 32 - 1)
self.assertEqual(_stats['minCoords'].calculate(self.imageContext), (0, 0))
self.assertEqual(_stats['maxCoords'].calculate(self.imageContext), (127, 31))
self.assertEqual(_stats['std'].calculate(self.imageContext), numpy.std(self.imageData))
self.assertEqual(_stats['mean'].calculate(self.imageContext), numpy.mean(self.imageData))
yData = numpy.sum(self.imageData.astype(numpy.float64), axis=1)
xData = numpy.sum(self.imageData.astype(numpy.float64), axis=0)
dataXRange = range(self.imageData.shape[1])
dataYRange = range(self.imageData.shape[0])
ycom = numpy.sum(yData*dataYRange) / numpy.sum(yData)
xcom = numpy.sum(xData*dataXRange) / numpy.sum(xData)
self.assertEqual(_stats['com'].calculate(self.imageContext), (xcom, ycom))
def testStatsImageAdv(self):
"""Test that scale and origin are taking into account for images"""
image2Data = numpy.arange(32 * 128).reshape(32, 128)
self.plot2d.addImage(data=image2Data, legend=self._imgLgd,
replace=True, origin=(100, 10), scale=(2, 0.5))
image2Context = stats._ImageContext(
item=self.plot2d.getImage(self._imgLgd),
plot=self.plot2d,
onlimits=False,
roi=None,
)
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(image2Context), 0)
self.assertEqual(
_stats['max'].calculate(image2Context), 128 * 32 - 1)
self.assertEqual(
_stats['minCoords'].calculate(image2Context), (100, 10))
self.assertEqual(
_stats['maxCoords'].calculate(image2Context), (127*2. + 100,
31 * 0.5 + 10))
self.assertEqual(_stats['std'].calculate(image2Context),
numpy.std(self.imageData))
self.assertEqual(_stats['mean'].calculate(image2Context),
numpy.mean(self.imageData))
yData = numpy.sum(self.imageData, axis=1)
xData = numpy.sum(self.imageData, axis=0)
dataXRange = numpy.arange(self.imageData.shape[1], dtype=numpy.float64)
dataYRange = numpy.arange(self.imageData.shape[0], dtype=numpy.float64)
ycom = numpy.sum(yData * dataYRange) / numpy.sum(yData)
ycom = (ycom * 0.5) + 10
xcom = numpy.sum(xData * dataXRange) / numpy.sum(xData)
xcom = (xcom * 2.) + 100
self.assertTrue(numpy.allclose(
_stats['com'].calculate(image2Context), (xcom, ycom)))
def testBasicStatsScatter(self):
"""Test result for simple stats on a scatter"""
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(self.scatterContext), 5)
self.assertEqual(_stats['max'].calculate(self.scatterContext), 90)
self.assertEqual(_stats['minCoords'].calculate(self.scatterContext), (0, 2))
self.assertEqual(_stats['maxCoords'].calculate(self.scatterContext), (50, 69))
self.assertEqual(_stats['std'].calculate(self.scatterContext), numpy.std(self.valuesScatterData))
self.assertEqual(_stats['mean'].calculate(self.scatterContext), numpy.mean(self.valuesScatterData))
data = self.valuesScatterData.astype(numpy.float64)
comx = numpy.sum(self.xScatterData * data) / numpy.sum(data)
comy = numpy.sum(self.yScatterData * data) / numpy.sum(data)
self.assertEqual(_stats['com'].calculate(self.scatterContext),
(comx, comy))
def testKindNotManagedByStat(self):
"""Make sure an exception is raised if we try to execute calculate
of the base class"""
b = stats.StatBase(name='toto', compatibleKinds='curve')
with self.assertRaises(NotImplementedError):
b.calculate(self.imageContext)
def testKindNotManagedByContext(self):
"""
Make sure an error is raised if we try to calculate a statistic with
a context not managed
"""
myStat = stats.Stat(name='toto', fct=numpy.std, kinds=('curve'))
myStat.calculate(self.curveContext)
with self.assertRaises(ValueError):
myStat.calculate(self.scatterContext)
with self.assertRaises(ValueError):
myStat.calculate(self.imageContext)
def testOnLimits(self):
stat = stats.StatMin()
self.plot1d.getXAxis().setLimitsConstraints(minPos=2, maxPos=5)
curveContextOnLimits = stats._CurveContext(
item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=True,
roi=None)
self.assertEqual(stat.calculate(curveContextOnLimits), 2)
self.plot2d.getXAxis().setLimitsConstraints(minPos=32)
imageContextOnLimits = stats._ImageContext(
item=self.plot2d.getImage('test image'),
plot=self.plot2d,
onlimits=True,
roi=None)
self.assertEqual(stat.calculate(imageContextOnLimits), 32)
self.scatterPlot.getXAxis().setLimitsConstraints(minPos=40)
scatterContextOnLimits = stats._ScatterContext(
item=self.scatterPlot.getScatter('scatter plot'),
plot=self.scatterPlot,
onlimits=True,
roi=None)
self.assertEqual(stat.calculate(scatterContextOnLimits), 20)
class TestStatsFormatter(TestCaseQt):
"""Simple test to check usage of the :class:`StatsFormatter`"""
def setUp(self):
TestCaseQt.setUp(self)
self.plot1d = Plot1D()
x = range(20)
y = range(20)
self.plot1d.addCurve(x, y, legend='curve0')
self.curveContext = stats._CurveContext(
item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=False,
roi=None)
self.stat = stats.StatMin()
def tearDown(self):
self.plot1d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot1d.close()
del self.plot1d
TestCaseQt.tearDown(self)
def testEmptyFormatter(self):
"""Make sure a formatter with no formatter definition will return a
simple cast to str"""
emptyFormatter = statshandler.StatFormatter()
self.assertEqual(
emptyFormatter.format(self.stat.calculate(self.curveContext)), '0.000')
def testSettedFormatter(self):
"""Make sure a formatter with no formatter definition will return a
simple cast to str"""
formatter= statshandler.StatFormatter(formatter='{0:.3f}')
self.assertEqual(
formatter.format(self.stat.calculate(self.curveContext)), '0.000')
class TestStatsHandler(TestCaseQt):
"""Make sure the StatHandler is correctly making the link between
:class:`StatBase` and :class:`StatFormatter` and checking the API is valid
"""
def setUp(self):
TestCaseQt.setUp(self)
self.plot1d = Plot1D()
x = range(20)
y = range(20)
self.plot1d.addCurve(x, y, legend='curve0')
self.curveItem = self.plot1d.getCurve('curve0')
self.stat = stats.StatMin()
def tearDown(self):
Stats._getContext.cache_clear()
self.plot1d.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot1d.close()
self.plot1d = None
TestCaseQt.tearDown(self)
def testConstructor(self):
"""Make sure the constructor can deal will all possible arguments:
* tuple of :class:`StatBase` derivated classes
* tuple of tuples (:class:`StatBase`, :class:`StatFormatter`)
* tuple of tuples (str, pointer to function, kind)
"""
handler0 = statshandler.StatsHandler(
(stats.StatMin(), stats.StatMax())
)
res = handler0.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('min' in res)
self.assertEqual(res['min'], '0')
self.assertTrue('max' in res)
self.assertEqual(res['max'], '19')
handler1 = statshandler.StatsHandler(
(
(stats.StatMin(), statshandler.StatFormatter(formatter=None)),
(stats.StatMax(), statshandler.StatFormatter())
)
)
res = handler1.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('min' in res)
self.assertEqual(res['min'], '0')
self.assertTrue('max' in res)
self.assertEqual(res['max'], '19.000')
handler2 = statshandler.StatsHandler(
(
(stats.StatMin(), None),
(stats.StatMax(), statshandler.StatFormatter())
))
res = handler2.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('min' in res)
self.assertEqual(res['min'], '0')
self.assertTrue('max' in res)
self.assertEqual(res['max'], '19.000')
handler3 = statshandler.StatsHandler((
(('amin', numpy.argmin), statshandler.StatFormatter()),
('amax', numpy.argmax)
))
res = handler3.calculate(item=self.curveItem, plot=self.plot1d,
onlimits=False)
self.assertTrue('amin' in res)
self.assertEqual(res['amin'], '0.000')
self.assertTrue('amax' in res)
self.assertEqual(res['amax'], '19')
with self.assertRaises(ValueError):
statshandler.StatsHandler(('name'))
class TestStatsWidgetWithCurves(TestCaseQt, ParametricTestCase):
"""Basic test for StatsWidget with curves"""
def setUp(self):
TestCaseQt.setUp(self)
self.plot = Plot1D()
self.plot.show()
x = range(20)
y = range(20)
self.plot.addCurve(x, y, legend='curve0')
y = range(12, 32)
self.plot.addCurve(x, y, legend='curve1')
y = range(-2, 18)
self.plot.addCurve(x, y, legend='curve2')
self.widget = StatsWidget.StatsWidget(plot=self.plot)
self.statsTable = self.widget._statsTable
mystats = statshandler.StatsHandler((
stats.StatMin(),
(stats.StatCoordMin(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatMax(),
(stats.StatCoordMax(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatDelta(),
('std', numpy.std),
('mean', numpy.mean),
stats.StatCOM()
))
self.statsTable.setStats(mystats)
def tearDown(self):
Stats._getContext.cache_clear()
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
self.statsTable = None
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.plot = None
TestCaseQt.tearDown(self)
def testDisplayActiveItemsSyncOptions(self):
"""
Test that the several option of the sync options are well
synchronized between the different object"""
widget = StatsWidget.StatsWidget(plot=self.plot)
table = StatsWidget.StatsTable(plot=self.plot)
def check_display_only_active_item(only_active):
# check internal value
self.assertIs(widget._statsTable._displayOnlyActItem, only_active)
# self.assertTrue(table._displayOnlyActItem is only_active)
# check gui display
self.assertEqual(widget._options.isActiveItemMode(), only_active)
for displayOnlyActiveItems in (True, False):
with self.subTest(displayOnlyActiveItems=displayOnlyActiveItems):
widget.setDisplayOnlyActiveItem(displayOnlyActiveItems)
# table.setDisplayOnlyActiveItem(displayOnlyActiveItems)
check_display_only_active_item(displayOnlyActiveItems)
check_display_only_active_item(only_active=False)
widget.setAttribute(qt.Qt.WA_DeleteOnClose)
table.setAttribute(qt.Qt.WA_DeleteOnClose)
widget.close()
table.close()
def testInit(self):
"""Make sure all the curves are registred on initialization"""
self.assertEqual(self.statsTable.rowCount(), 3)
def testRemoveCurve(self):
"""Make sure the Curves stats take into account the curve removal from
plot"""
self.plot.removeCurve('curve2')
self.assertEqual(self.statsTable.rowCount(), 2)
for iRow in range(2):
self.assertTrue(self.statsTable.item(iRow, 0).text() in ('curve0', 'curve1'))
self.plot.removeCurve('curve0')
self.assertEqual(self.statsTable.rowCount(), 1)
self.plot.removeCurve('curve1')
self.assertEqual(self.statsTable.rowCount(), 0)
def testAddCurve(self):
"""Make sure the Curves stats take into account the add curve action"""
self.plot.addCurve(legend='curve3', x=range(10), y=range(10))
self.assertEqual(self.statsTable.rowCount(), 4)
def testUpdateCurveFromAddCurve(self):
"""Make sure the stats of the cuve will be removed after updating a
curve"""
self.plot.addCurve(legend='curve0', x=range(10), y=range(10))
self.qapp.processEvents()
self.assertEqual(self.statsTable.rowCount(), 3)
curve = self.plot._getItem(kind='curve', legend='curve0')
tableItems = self.statsTable._itemToTableItems(curve)
self.assertEqual(tableItems['max'].text(), '9')
def testUpdateCurveFromCurveObj(self):
self.plot.getCurve('curve0').setData(x=range(4), y=range(4))
self.qapp.processEvents()
self.assertEqual(self.statsTable.rowCount(), 3)
curve = self.plot._getItem(kind='curve', legend='curve0')
tableItems = self.statsTable._itemToTableItems(curve)
self.assertEqual(tableItems['max'].text(), '3')
def testSetAnotherPlot(self):
plot2 = Plot1D()
plot2.addCurve(x=range(26), y=range(26), legend='new curve')
self.statsTable.setPlot(plot2)
self.assertEqual(self.statsTable.rowCount(), 1)
self.qapp.processEvents()
plot2.setAttribute(qt.Qt.WA_DeleteOnClose)
plot2.close()
plot2 = None
def testUpdateMode(self):
"""Make sure the update modes are well take into account"""
self.plot.setActiveCurve('curve0')
for display_only_active in (True, False):
with self.subTest(display_only_active=display_only_active):
self.widget.setDisplayOnlyActiveItem(display_only_active)
self.plot.getCurve('curve0').setData(x=range(4), y=range(4))
self.widget.setUpdateMode(StatsWidget.UpdateMode.AUTO)
update_stats_action = self.widget._options.getUpdateStatsAction()
# test from api
self.assertEqual(self.widget.getUpdateMode(), StatsWidget.UpdateMode.AUTO)
self.widget.show()
# check stats change in auto mode
self.plot.getCurve('curve0').setData(x=range(4), y=range(-1, 3))
self.qapp.processEvents()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
self.assertTrue(float(curve0_min) == -1.)
self.plot.getCurve('curve0').setData(x=range(4), y=range(1, 5))
self.qapp.processEvents()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
self.assertTrue(float(curve0_min) == 1.)
# check stats change in manual mode only if requested
self.widget.setUpdateMode(StatsWidget.UpdateMode.MANUAL)
self.assertEqual(self.widget.getUpdateMode(), StatsWidget.UpdateMode.MANUAL)
self.plot.getCurve('curve0').setData(x=range(4), y=range(2, 6))
self.qapp.processEvents()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
self.assertTrue(float(curve0_min) == 1.)
update_stats_action.trigger()
tableItems = self.statsTable._itemToTableItems(self.plot.getCurve('curve0'))
curve0_min = tableItems['min'].text()
self.assertTrue(float(curve0_min) == 2.)
def testItemHidden(self):
"""Test if an item is hide, then the associated stats item is also
hide"""
curve0 = self.plot.getCurve('curve0')
curve1 = self.plot.getCurve('curve1')
curve2 = self.plot.getCurve('curve2')
self.plot.show()
self.widget.show()
self.qWaitForWindowExposed(self.widget)
self.assertFalse(self.statsTable.isRowHidden(0))
self.assertFalse(self.statsTable.isRowHidden(1))
self.assertFalse(self.statsTable.isRowHidden(2))
curve0.setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.statsTable.isRowHidden(0))
curve0.setVisible(True)
self.qapp.processEvents()
self.assertFalse(self.statsTable.isRowHidden(0))
curve1.setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.statsTable.isRowHidden(1))
tableItems = self.statsTable._itemToTableItems(curve2)
curve2_min = tableItems['min'].text()
self.assertTrue(float(curve2_min) == -2.)
curve0.setVisible(False)
curve1.setVisible(False)
curve2.setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.statsTable.isRowHidden(0))
self.assertTrue(self.statsTable.isRowHidden(1))
self.assertTrue(self.statsTable.isRowHidden(2))
class TestStatsWidgetWithImages(TestCaseQt):
"""Basic test for StatsWidget with images"""
IMAGE_LEGEND = 'test image'
def setUp(self):
TestCaseQt.setUp(self)
self.plot = Plot2D()
self.plot.addImage(data=numpy.arange(128*128).reshape(128, 128),
legend=self.IMAGE_LEGEND, replace=False)
self.widget = StatsWidget.StatsTable(plot=self.plot)
mystats = statshandler.StatsHandler((
(stats.StatMin(), statshandler.StatFormatter()),
(stats.StatCoordMin(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
(stats.StatMax(), statshandler.StatFormatter()),
(stats.StatCoordMax(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
(stats.StatDelta(), statshandler.StatFormatter()),
('std', numpy.std),
('mean', numpy.mean),
(stats.StatCOM(), statshandler.StatFormatter(None))
))
self.widget.setStats(mystats)
def tearDown(self):
Stats._getContext.cache_clear()
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.plot = None
TestCaseQt.tearDown(self)
def test(self):
image = self.plot._getItem(
kind='image', legend=self.IMAGE_LEGEND)
tableItems = self.widget._itemToTableItems(image)
maxText = '{0:.3f}'.format((128 * 128) - 1)
self.assertEqual(tableItems['legend'].text(), self.IMAGE_LEGEND)
self.assertEqual(tableItems['min'].text(), '0.000')
self.assertEqual(tableItems['max'].text(), maxText)
self.assertEqual(tableItems['delta'].text(), maxText)
self.assertEqual(tableItems['coords min'].text(), '0.0, 0.0')
self.assertEqual(tableItems['coords max'].text(), '127.0, 127.0')
def testItemHidden(self):
"""Test if an item is hide, then the associated stats item is also
hide"""
self.widget.show()
self.plot.show()
self.qWaitForWindowExposed(self.widget)
self.assertFalse(self.widget.isRowHidden(0))
self.plot.getImage(self.IMAGE_LEGEND).setVisible(False)
self.qapp.processEvents()
self.assertTrue(self.widget.isRowHidden(0))
class TestStatsWidgetWithScatters(TestCaseQt):
SCATTER_LEGEND = 'scatter plot'
def setUp(self):
TestCaseQt.setUp(self)
self.scatterPlot = Plot2D()
self.scatterPlot.addScatter([0, 1, 2, 20, 50, 60],
[2, 3, 4, 26, 69, 6],
[5, 6, 7, 10, 90, 20],
legend=self.SCATTER_LEGEND)
self.widget = StatsWidget.StatsTable(plot=self.scatterPlot)
mystats = statshandler.StatsHandler((
stats.StatMin(),
(stats.StatCoordMin(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatMax(),
(stats.StatCoordMax(), statshandler.StatFormatter(None, qt.QTableWidgetItem)),
stats.StatDelta(),
('std', numpy.std),
('mean', numpy.mean),
stats.StatCOM()
))
self.widget.setStats(mystats)
def tearDown(self):
Stats._getContext.cache_clear()
self.scatterPlot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.scatterPlot.close()
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.scatterPlot = None
TestCaseQt.tearDown(self)
def testStats(self):
scatter = self.scatterPlot._getItem(
kind='scatter', legend=self.SCATTER_LEGEND)
tableItems = self.widget._itemToTableItems(scatter)
self.assertEqual(tableItems['legend'].text(), self.SCATTER_LEGEND)
self.assertEqual(tableItems['min'].text(), '5')
self.assertEqual(tableItems['coords min'].text(), '0, 2')
self.assertEqual(tableItems['max'].text(), '90')
self.assertEqual(tableItems['coords max'].text(), '50, 69')
self.assertEqual(tableItems['delta'].text(), '85')
class TestEmptyStatsWidget(TestCaseQt):
def test(self):
widget = StatsWidget.StatsWidget()
widget.show()
self.qWaitForWindowExposed(widget)
class TestLineWidget(TestCaseQt):
"""Some test for the StatsLineWidget."""
def setUp(self):
TestCaseQt.setUp(self)
mystats = statshandler.StatsHandler((
(stats.StatMin(), statshandler.StatFormatter()),
))
self.plot = Plot1D()
self.plot.show()
self.x = range(20)
self.y0 = range(20)
self.curve0 = self.plot.addCurve(self.x, self.y0, legend='curve0')
self.y1 = range(12, 32)
self.plot.addCurve(self.x, self.y1, legend='curve1')
self.y2 = range(-2, 18)
self.plot.addCurve(self.x, self.y2, legend='curve2')
self.widget = StatsWidget.BasicGridStatsWidget(plot=self.plot,
kind='curve',
stats=mystats)
def tearDown(self):
Stats._getContext.cache_clear()
self.qapp.processEvents()
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
self.widget.setPlot(None)
self.widget._lineStatsWidget._statQlineEdit.clear()
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
self.plot = None
TestCaseQt.tearDown(self)
def testProcessing(self):
self.widget._lineStatsWidget.setStatsOnVisibleData(False)
self.qapp.processEvents()
self.plot.setActiveCurve(legend='curve0')
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '0.000')
self.plot.setActiveCurve(legend='curve1')
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '12.000')
self.plot.getXAxis().setLimitsConstraints(minPos=2, maxPos=5)
self.widget.setStatsOnVisibleData(True)
self.qapp.processEvents()
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '14.000')
self.plot.setActiveCurve(None)
self.assertIsNone(self.plot.getActiveCurve())
self.widget.setStatsOnVisibleData(False)
self.qapp.processEvents()
self.assertFalse(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '14.000')
self.widget.setKind('image')
self.plot.addImage(numpy.arange(100*100).reshape(100, 100) + 0.312)
self.qapp.processEvents()
self.assertTrue(self.widget._lineStatsWidget._statQlineEdit['min'].text() == '0.312')
def testUpdateMode(self):
"""Make sure the update modes are well take into account"""
self.plot.setActiveCurve(self.curve0)
_autoRB = self.widget._options._autoRB
_manualRB = self.widget._options._manualRB
# test from api
self.widget.setUpdateMode(StatsWidget.UpdateMode.AUTO)
self.assertTrue(_autoRB.isChecked())
self.assertFalse(_manualRB.isChecked())
# check stats change in auto mode
curve0_min = self.widget._lineStatsWidget._statQlineEdit['min'].text()
new_y = numpy.array(self.y0) - 2.56
self.plot.addCurve(x=self.x, y=new_y, legend=self.curve0)
curve0_min2 = self.widget._lineStatsWidget._statQlineEdit['min'].text()
self.assertTrue(curve0_min != curve0_min2)
# check stats change in manual mode only if requested
self.widget.setUpdateMode(StatsWidget.UpdateMode.MANUAL)
self.assertFalse(_autoRB.isChecked())
self.assertTrue(_manualRB.isChecked())
new_y = numpy.array(self.y0) - 1.2
self.plot.addCurve(x=self.x, y=new_y, legend=self.curve0)
curve0_min3 = self.widget._lineStatsWidget._statQlineEdit['min'].text()
self.assertTrue(curve0_min3 == curve0_min2)
self.widget._options._updateRequested()
curve0_min3 = self.widget._lineStatsWidget._statQlineEdit['min'].text()
self.assertTrue(curve0_min3 != curve0_min2)
# test from gui
self.widget.showRadioButtons(True)
self.widget._options._autoRB.toggle()
self.assertTrue(_autoRB.isChecked())
self.assertFalse(_manualRB.isChecked())
self.widget._options._manualRB.toggle()
self.assertFalse(_autoRB.isChecked())
self.assertTrue(_manualRB.isChecked())
class TestUpdateModeWidget(TestCaseQt):
"""Test UpdateModeWidget"""
def setUp(self):
TestCaseQt.setUp(self)
self.widget = StatsWidget.UpdateModeWidget(parent=None)
def tearDown(self):
self.widget.setAttribute(qt.Qt.WA_DeleteOnClose)
self.widget.close()
self.widget = None
TestCaseQt.tearDown(self)
def testSignals(self):
"""Test the signal emission of the widget"""
self.widget.setUpdateMode(StatsWidget.UpdateMode.AUTO)
modeChangedListener = SignalListener()
manualUpdateListener = SignalListener()
self.widget.sigUpdateModeChanged.connect(modeChangedListener)
self.widget.sigUpdateRequested.connect(manualUpdateListener)
self.widget.setUpdateMode(StatsWidget.UpdateMode.AUTO)
self.assertEqual(self.widget.getUpdateMode(), StatsWidget.UpdateMode.AUTO)
self.assertEqual(modeChangedListener.callCount(), 0)
self.qapp.processEvents()
self.widget.setUpdateMode(StatsWidget.UpdateMode.MANUAL)
self.assertEqual(self.widget.getUpdateMode(), StatsWidget.UpdateMode.MANUAL)
self.qapp.processEvents()
self.assertEqual(modeChangedListener.callCount(), 1)
self.assertEqual(manualUpdateListener.callCount(), 0)
self.widget._updatePB.click()
self.widget._updatePB.click()
self.assertEqual(manualUpdateListener.callCount(), 2)
self.widget._autoRB.setChecked(True)
self.assertEqual(modeChangedListener.callCount(), 2)
self.widget._updatePB.click()
self.assertEqual(manualUpdateListener.callCount(), 2)
class TestStatsROI(TestStatsBase, TestCaseQt):
"""
Test stats based on ROI
"""
def setUp(self):
TestCaseQt.setUp(self)
self.createRois()
TestStatsBase.setUp(self)
self.createHistogramContext()
self.roiManager = RegionOfInterestManager(self.plot2d)
self.roiManager.addRoi(self._2Droi_rect)
self.roiManager.addRoi(self._2Droi_poly)
def tearDown(self):
self.roiManager.clear()
self.roiManager = None
self._1Droi = None
self._2Droi_rect = None
self._2Droi_poly = None
self.plotHisto.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plotHisto.close()
self.plotHisto = None
TestStatsBase.tearDown(self)
TestCaseQt.tearDown(self)
def createRois(self):
self._1Droi = ROI(name='my1DRoi', fromdata=2.0, todata=5.0)
self._2Droi_rect = RectangleROI()
self._2Droi_rect.setGeometry(size=(10, 10), origin=(10, 0))
self._2Droi_poly = PolygonROI()
points = numpy.array(((0, 20), (0, 0), (10, 0)))
self._2Droi_poly.setPoints(points=points)
def createCurveContext(self):
TestStatsBase.createCurveContext(self)
self.curveContext = stats._CurveContext(
item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=False,
roi=self._1Droi)
def createHistogramContext(self):
self.plotHisto = Plot1D()
x = range(20)
y = range(20)
self.plotHisto.addHistogram(x, y, legend='histo0')
self.histoContext = stats._HistogramContext(
item=self.plotHisto.getHistogram('histo0'),
plot=self.plotHisto,
onlimits=False,
roi=self._1Droi)
def createScatterContext(self):
TestStatsBase.createScatterContext(self)
self.scatterContext = stats._ScatterContext(
item=self.scatterPlot.getScatter('scatter plot'),
plot=self.scatterPlot,
onlimits=False,
roi=self._1Droi
)
def createImageContext(self):
TestStatsBase.createImageContext(self)
self.imageContext = stats._ImageContext(
item=self.plot2d.getImage(self._imgLgd),
plot=self.plot2d,
onlimits=False,
roi=self._2Droi_rect
)
self.imageContext_2 = stats._ImageContext(
item=self.plot2d.getImage(self._imgLgd),
plot=self.plot2d,
onlimits=False,
roi=self._2Droi_poly
)
def testErrors(self):
# test if onlimits is True and give also a roi
with self.assertRaises(ValueError):
stats._CurveContext(item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=True,
roi=self._1Droi)
# test if is a curve context and give an invalid 2D roi
with self.assertRaises(TypeError):
stats._CurveContext(item=self.plot1d.getCurve('curve0'),
plot=self.plot1d,
onlimits=False,
roi=self._2Droi_rect)
def testBasicStatsCurve(self):
"""Test result for simple stats on a curve"""
_stats = self.getBasicStats()
xData = yData = numpy.array(range(0, 10))
self.assertEqual(_stats['min'].calculate(self.curveContext), 2)
self.assertEqual(_stats['max'].calculate(self.curveContext), 5)
self.assertEqual(_stats['minCoords'].calculate(self.curveContext), (2,))
self.assertEqual(_stats['maxCoords'].calculate(self.curveContext), (5,))
self.assertEqual(_stats['std'].calculate(self.curveContext), numpy.std(yData[2:6]))
self.assertEqual(_stats['mean'].calculate(self.curveContext), numpy.mean(yData[2:6]))
com = numpy.sum(xData[2:6] * yData[2:6]) / numpy.sum(yData[2:6])
self.assertEqual(_stats['com'].calculate(self.curveContext), com)
def testBasicStatsImageRectRoi(self):
"""Test result for simple stats on an image"""
self.assertEqual(self.imageContext.values.compressed().size, 121)
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(self.imageContext), 10)
self.assertEqual(_stats['max'].calculate(self.imageContext), 1300)
self.assertEqual(_stats['minCoords'].calculate(self.imageContext), (10, 0))
self.assertEqual(_stats['maxCoords'].calculate(self.imageContext), (20.0, 10.0))
self.assertAlmostEqual(_stats['std'].calculate(self.imageContext),
numpy.std(self.imageData[0:11, 10:21]))
self.assertAlmostEqual(_stats['mean'].calculate(self.imageContext),
numpy.mean(self.imageData[0:11, 10:21]))
compressed_values = self.imageContext.values.compressed()
compressed_values = compressed_values.reshape(11, 11)
yData = numpy.sum(compressed_values.astype(numpy.float64), axis=1)
xData = numpy.sum(compressed_values.astype(numpy.float64), axis=0)
dataYRange = range(11)
dataXRange = range(10, 21)
ycom = numpy.sum(yData*dataYRange) / numpy.sum(yData)
xcom = numpy.sum(xData*dataXRange) / numpy.sum(xData)
self.assertEqual(_stats['com'].calculate(self.imageContext), (xcom, ycom))
def testBasicStatsImagePolyRoi(self):
"""Test a simple rectangle ROI"""
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(self.imageContext_2), 0)
self.assertEqual(_stats['max'].calculate(self.imageContext_2), 2432)
self.assertEqual(_stats['minCoords'].calculate(self.imageContext_2), (0.0, 0.0))
# not 0.0, 19.0 because not fully in. Should all pixel have a weight,
# on to manage them in stats. For now 0 if the center is not in, else 1
self.assertEqual(_stats['maxCoords'].calculate(self.imageContext_2), (0.0, 19.0))
def testBasicStatsScatter(self):
self.assertEqual(self.scatterContext.values.compressed().size, 2)
_stats = self.getBasicStats()
self.assertEqual(_stats['min'].calculate(self.scatterContext), 6)
self.assertEqual(_stats['max'].calculate(self.scatterContext), 7)
self.assertEqual(_stats['minCoords'].calculate(self.scatterContext), (2, 3))
self.assertEqual(_stats['maxCoords'].calculate(self.scatterContext), (3, 4))
self.assertEqual(_stats['std'].calculate(self.scatterContext), numpy.std([6, 7]))
self.assertEqual(_stats['mean'].calculate(self.scatterContext), numpy.mean([6, 7]))
def testBasicHistogram(self):
_stats = self.getBasicStats()
xData = yData = numpy.array(range(2, 6))
self.assertEqual(_stats['min'].calculate(self.histoContext), 2)
self.assertEqual(_stats['max'].calculate(self.histoContext), 5)
self.assertEqual(_stats['minCoords'].calculate(self.histoContext), (2,))
self.assertEqual(_stats['maxCoords'].calculate(self.histoContext), (5,))
self.assertEqual(_stats['std'].calculate(self.histoContext), numpy.std(yData))
self.assertEqual(_stats['mean'].calculate(self.histoContext), numpy.mean(yData))
com = numpy.sum(xData * yData) / numpy.sum(yData)
self.assertEqual(_stats['com'].calculate(self.histoContext), com)
class TestAdvancedROIImageContext(TestCaseQt):
"""Test stats result on an image context with different scale and
origins"""
def setUp(self):
TestCaseQt.setUp(self)
self.data_dims = (100, 100)
self.data = numpy.random.rand(*self.data_dims)
self.plot = Plot2D()
def tearDown(self):
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
self.plot = None
TestCaseQt.tearDown(self)
def test(self):
"""Test stats result on an image context with different scale and
origins"""
roi_origins = [(0, 0), (2, 10), (14, 20)]
img_origins = [(0, 0), (14, 20), (2, 10)]
img_scales = [1.0, 0.5, 2.0]
_stats = {'sum': stats.Stat(name='sum', fct=numpy.sum), }
for roi_origin in roi_origins:
for img_origin in img_origins:
for img_scale in img_scales:
with self.subTest(roi_origin=roi_origin,
img_origin=img_origin,
img_scale=img_scale):
self.plot.addImage(self.data, legend='img',
origin=img_origin,
scale=img_scale)
roi = RectangleROI()
roi.setGeometry(origin=roi_origin, size=(20, 20))
context = stats._ImageContext(
item=self.plot.getImage('img'),
plot=self.plot,
onlimits=False,
roi=roi)
x_start = int((roi_origin[0] - img_origin[0]) / img_scale)
x_end = int(x_start + (20 / img_scale)) + 1
y_start = int((roi_origin[1] - img_origin[1])/ img_scale)
y_end = int(y_start + (20 / img_scale)) + 1
x_start = max(x_start, 0)
x_end = min(max(x_end, 0), self.data_dims[1])
y_start = max(y_start, 0)
y_end = min(max(y_end, 0), self.data_dims[0])
th_sum = numpy.sum(self.data[y_start:y_end, x_start:x_end])
self.assertAlmostEqual(_stats['sum'].calculate(context),
th_sum)
|
<filename>Lib/site-packages/hackedit/app/index/db.py
"""
Low level api for managing the file/symbols index.
"""
import logging
import os
import sqlite3
import sys
import re
from hackedit.api import system
#: Version of the database (will be appended to the db filename)
DB_VERSION = '0.1'
#: File name of the database
DB_FILE_NAME = 'index-%s.db' % DB_VERSION
if os.environ.get('HACKEDIT_CORE_TEST_SUITE', default=None) is not None:
DB_FILE_NAME = 'test-index-%s.db' % DB_VERSION
#: sql statements used to create the file and symbol tables.
SQL_CREATE_TABLES = [
# Project table
"""CREATE TABLE Project (
PROJECT_ID INTEGER PRIMARY KEY,
PROJECT_PATH VARCHAR(512),
PROJECT_NAME VARCHAR(256));
""",
# File table
"""CREATE TABLE File (
FILE_ID INTEGER PRIMARY KEY,
FILE_PATH VARCHAR(512),
FILE_TIME_STAMP FLOAT,
FILE_NAME VARCHAR(256),
PROJECT_ID INT NOT NULL REFERENCES Project(PROJECT_ID));
""",
"CREATE VIRTUAL TABLE File_index USING fts4(FILE_ID INT, CONTENT);",
# Symbol table
"""CREATE TABLE Symbol (
SYMBOL_ID INTEGER PRIMARY KEY,
SYMBOL_LINE INT,
SYMBOL_COLUMN INT,
SYMBOL_ICON_THEME VARCHAR(128),
SYMBOL_ICON_PATH VARCHAR(256),
SYMBOL_NAME VARCHAR(256),
FILE_ID INT NOT NULL REFERENCES File(FILE_ID),
PROJECT_ID INT NOT NULL REFERENCES Project(PROJECT_ID),
PARENT_SYMBOL_ID INT REFERENCES Symbol(SYMBOL_ID));
""",
"""CREATE TABLE Todo (
TODO_ID INTEGER PRIMARY KEY,
TODO_LINE INT,
TODO_COLUMN INT,
TODO_CONTENT VARCHAR(256),
FILE_ID INT NOT NULL REFERENCES File(FILE_ID),
PROJECT_ID INT NOT NULL REFERENCES Project(PROJECT_ID));
""",
"CREATE VIRTUAL TABLE Symbol_index USING fts4(SYMBOL_ID INT, CONTENT);",
]
#: name of the project id column
COL_PROJECT_ID = 'PROJECT_ID'
#: name of the project path column
COL_PROJECT_PATH = 'PROJECT_PATH'
#: name of the project nale column
COL_PROJECT_NAME = 'PROJECT_NAME'
#: name of the file id column
COL_FILE_ID = 'FILE_ID'
#: name of the file path column
COL_FILE_PATH = 'FILE_PATH'
#: name of the file mtime column
COL_FILE_TIME_STAMP = 'FILE_TIME_STAMP'
#: name of the file mtime column
COL_FILE_NAME = 'FILE_NAME'
#: name of the file project id column
COL_FILE_PROJECT_ID = COL_PROJECT_ID
#: name of the symbol id column
COL_SYMBOL_ID = 'SYMBOL_ID'
#: name of the symbol line column
COL_SYMBOL_LINE = 'SYMBOL_LINE'
#: name of the symbol column column
COL_SYMBOL_COLUMN = 'SYMBOL_COLUMN'
#: name of the symbol icon theme column
COL_SYMBOL_ICON_THEME = 'SYMBOL_ICON_THEME'
#: name of the symbol icon path column
COL_SYMBOL_ICON_PATH = 'SYMBOL_ICON_PATH'
#: name of the symbol name column
COL_SYMBOL_NAME = 'SYMBOL_NAME'
#: name of the symbol file id column
COL_SYMBOL_FILE_ID = COL_FILE_ID
#: name of the symbol parent symbol id column
COL_SYMBOL_PARENT_SYMBOL_ID = 'PARENT_SYMBOL_ID'
class DbHelper:
"""
Context manager that open a database connection and let you execute some
actions on it. The connection is automatically closed when the context
manager goes out of scope.
.. note:: The database tables are created automatically if the database
file did not exist.
"""
prog_camel_case = re.compile(r'(?:[A-Z][a-z]+)+')
def __init__(self):
"""
:param db_path: Path to the database file.
"""
self.conn = None
self.exists = os.path.exists(self.get_db_path())
def __enter__(self):
"""
Enters the context manager: creates the database if it does not exists
and create the connection object.
"""
db_path = self.get_db_path()
self.conn = sqlite3.connect(db_path, timeout=60)
self.conn.row_factory = sqlite3.Row
if not self.exists:
_logger().debug('creating database %r', db_path)
self._create_tables()
return self
def __exit__(self, *args, **kwargs):
"""
Exits the context manager: closes the connection object.
"""
self.conn.close()
@staticmethod
def get_db_path():
"""
Gets the path to the index database.
"""
return os.path.join(system.get_app_data_directory(), DB_FILE_NAME)
# ---------------------------------------------------------------
# Project management
# ---------------------------------------------------------------
def create_project(self, project_path):
"""
Creates a project. If the project does already exists, the method
simply returns it's project id.
A project is just a path that will get scanned recursively to build
the file and symbol index.
:param project_path: path of the project to create.
:return: PROJECT_ID
"""
sql = "INSERT INTO Project(PROJECT_PATH, PROJECT_NAME) VALUES(?, ?);"
if not self.has_project(project_path):
project_name = os.path.split(project_path)[1]
c = self.conn.cursor()
DbHelper.exec_sql(c, sql, project_path, project_name)
self.conn.commit()
return self._get_last_insert_row_id(c)
else:
p = self.get_project(project_path)
return int(p[COL_PROJECT_ID])
def has_project(self, project_path):
"""
Checks if the project exists in the database.
:param project_path: path of the project to check.
:returns: True if the file has been added to the db.
"""
statement = 'SELECT COUNT(*) FROM Project WHERE PROJECT_PATH = ?;'
c = self.conn.cursor()
DbHelper.exec_sql(c, statement, project_path)
results = c.fetchone()
count = 0
if results:
count = results['COUNT(*)']
return count > 0
def get_projects(self):
"""
Gets the complete list of indexed projects.
"""
c = self.conn.cursor()
statement = 'SELECT * FROM Project;'
DbHelper.exec_sql(c, statement)
while True:
row = c.fetchone()
if row is None:
return
yield row
def get_project(self, project_path):
"""
Gets a Project item.
:param project_path: path of the project item to retrieve.
"""
c = self.conn.cursor()
statement = 'SELECT * FROM Project WHERE PROJECT_PATH = ?'
DbHelper.exec_sql(c, statement, project_path)
return c.fetchone()
def delete_project(self, project_path):
"""
Delete a project from the index database (all associated files and
symbols will be deleted).
:param project_path: path
"""
proj = self.get_project(project_path)
if proj is None:
return False
pid = proj[COL_PROJECT_ID]
statement = 'DELETE FROM Project where PROJECT_ID = ?;'
c = self.conn.cursor()
DbHelper.exec_sql(c, statement, pid)
statement = 'DELETE FROM File where PROJECT_ID = ?;'
c = self.conn.cursor()
DbHelper.exec_sql(c, statement, pid)
statement = 'DELETE FROM Symbol where PROJECT_ID = ?;'.format(pid)
c = self.conn.cursor()
DbHelper.exec_sql(c, statement, pid)
self.conn.commit()
return True
# ---------------------------------------------------------------
# File management
# ---------------------------------------------------------------
# FILE CRUD Operations
def create_file(self, file_path, project_id, commit=True):
"""
Creates a file.
If the file does already exists, the method simply returns the file id.
:param file_path: Path of the file to create
:param project_id: Id of the parent project.
:returns: FILE_ID
"""
sql = "INSERT INTO File(FILE_PATH, FILE_NAME, PROJECT_ID) " \
"VALUES(?, ?, ?);"
if not self.has_file(file_path):
file_name = os.path.split(file_path)[1]
c = self.conn.cursor()
DbHelper.exec_sql(c, sql, file_path, file_name, project_id)
fid = self._get_last_insert_row_id(c)
# add to file index
searchable_name = self._get_searchable_name(file_name)
sql = "INSERT INTO File_index(FILE_ID, CONTENT) VALUES(?, ?);"
DbHelper.exec_sql(c, sql, fid, searchable_name)
if commit:
self.conn.commit()
return fid
else:
f = self.get_file_by_path(file_path)
return int(f[COL_FILE_ID])
def update_file(self, file_path, mtime, new_path=None, commit=True):
"""
Updates a file in the database.
:param file_path: Path of the file to update.
:param mtime: The new modification time of the file.
:param new_path: The new file path. None to specify the path/name
has not changed.
:raises: ValueError if the file_path is not in the db.
"""
file_row = self.get_file_by_path(file_path)
if file_row is None:
raise ValueError('invalid file path')
if new_path is None:
new_path = file_path
file_name = os.path.split(new_path)[1]
fid = file_row[COL_FILE_ID]
c = self.conn.cursor()
sql = 'UPDATE File SET FILE_TIME_STAMP=?, FILE_PATH = ?, ' \
'FILE_NAME = ? WHERE FILE_ID = ?;'
DbHelper.exec_sql(c, sql, mtime, new_path, file_name, fid)
if commit:
self.conn.commit()
def delete_file(self, file_path, commit=True):
"""
Deletes a file from the index.
:param file_path: Path of the file to remove.
:raises: ValueError if the file_path does not exists in the database
"""
file_row = self.get_file_by_path(file_path)
if file_row is None:
return False
fid = file_row[COL_FILE_ID]
c = self.conn.cursor()
statement = 'DELETE FROM File where FILE_ID = ?'
DbHelper.exec_sql(c, statement, fid)
statement = 'DELETE FROM Symbol where FILE_ID = ?'
DbHelper.exec_sql(c, statement, fid)
if commit:
self.conn.commit()
return True
# Utility methods related to files management
def has_file(self, file_path):
"""
Checks if the file exists in the database.
:returns: True if the file has been added to the db.
"""
statement = 'SELECT COUNT(*) FROM FILE WHERE FILE_PATH = ?;'
c = self.conn.cursor()
DbHelper.exec_sql(c, statement, file_path)
results = c.fetchone()
count = 0
if results:
count = results['COUNT(*)']
return count > 0
def get_files(self, project_ids=None, name_filter=''):
"""
Generates the list of all files found in the index.
Client code can specify to only look into the specified projects and
apply a name filter.
:param project_ids: the list of project ids to look into.
Use None the gets the whole list of files, across
projects.
:param name_filter: optional name filter to apply.
"""
searchable_name = '*%s*' % self._get_searchable_name(name_filter)
c = self.conn.cursor()
if project_ids:
# look into specified project files
project_ids = str(tuple(project_ids)).replace(',)', ')')
if name_filter:
self.conn.create_function('MATCH_RATIO', 2, match_ratio)
sql = 'SELECT * FROM File WHERE PROJECT_ID IN %s AND ' \
'FILE_ID IN ( SELECT FILE_ID FROM File_index WHERE ' \
'CONTENT MATCH ?) ' \
'ORDER BY MATCH_RATIO(FILE_NAME, ?) ASC;' % project_ids
DbHelper.exec_sql(c, sql, searchable_name, name_filter)
else:
sql = 'SELECT * FROM File WHERE PROJECT_ID IN %s ' \
'ORDER BY FILE_NAME ASC;' % project_ids
DbHelper.exec_sql(c, sql)
else:
# look into all files, across all projects
if name_filter:
self.conn.create_function('MATCH_RATIO', 2, match_ratio)
sql = 'SELECT * FROM File ' \
'WHERE FILE_ID IN ( SELECT FILE_ID FROM File_index WHERE '\
'CONTENT MATCH ?) ORDER BY MATCH_RATIO(FILE_NAME, ?) ASC;'
DbHelper.exec_sql(c, sql, searchable_name, name_filter)
else:
sql = 'SELECT * FROM File ORDER BY FILE_NAME ASC;'
DbHelper.exec_sql(c, sql)
while True:
row = c.fetchone()
if row is None:
return
yield row
def get_file_mtime(self, file_path):
"""
Gets the file's modification time from the db
:returns: mtime - float
:raises: ValueError if the file_path does not exists in the database
"""
file_row = self.get_file_by_path(file_path)
if file_row is None:
raise ValueError('invalid file path: %s' % file_path)
return file_row[COL_FILE_TIME_STAMP]
def get_file_by_path(self, path):
"""
Gets a File row from path
"""
c = self.conn.cursor()
statement = 'SELECT * FROM File WHERE FILE_PATH = ?'
DbHelper.exec_sql(c, statement, path)
return c.fetchone()
def get_file_by_id(self, fid):
"""
Returns a file item by ID.
:param fid: id of the file to retrieve.
"""
c = self.conn.cursor()
statement = 'SELECT * FROM File WHERE FILE_ID = ?'
DbHelper.exec_sql(c, statement, fid)
return c.fetchone()
# ---------------------------------------------------------------
# File management
# ---------------------------------------------------------------
# FILE CRUD Operations
def create_symbol(self, name, line, column, icon_theme, icon_path,
file_id, project_id, parent_symbol_id=None, commit=True):
"""
Adds a symbol to the database.
:param name: name of the symbol
:param line: line where the symbol is defined
:param column: column where the symbol is defined
:param icon_theme: icon from theme that will be used when displaying
the symbol
:param icon_path: icon from path that will be used when displaying
the symbol
:param file_id: Id of the file where the symbol can be found
:param parent_symbol_id: Optional parent symbol id
returns: symbol id that can be used to add child symbols.
"""
if parent_symbol_id is None:
parent_symbol_id = 'null'
statement = ('INSERT INTO Symbol(SYMBOL_LINE, SYMBOL_COLUMN, '
'SYMBOL_ICON_THEME, SYMBOL_ICON_PATH, SYMBOL_NAME, '
'FILE_ID, PARENT_SYMBOL_ID, PROJECT_ID) '
'values (?, ?, ?, ?, ?, ?, ?, ?);')
c = self.conn.cursor()
DbHelper.exec_sql(c, statement, line, column, icon_theme, icon_path,
name, file_id, str(parent_symbol_id), project_id)
sid = self._get_last_insert_row_id(c)
sql = "INSERT INTO Symbol_index(SYMBOL_ID, CONTENT) VALUES(?, ?);"
DbHelper.exec_sql(c, sql, sid, self._get_searchable_name(name))
if commit:
self.conn.commit()
return sid
def delete_file_symbols(self, file_id):
"""
Removes all symbols found in the specified file_id.
:param file_id: id of the file that contains the symbols to remove.
.. note:: there is no update of a symbol, symbols are always entirely
removed from db before the updated symbols are inserted.
"""
c = self.conn.cursor()
statement = 'DELETE FROM Symbol where FILE_ID = ?'
DbHelper.exec_sql(c, statement, file_id)
self.conn.commit()
def get_symbols(self, file_id=None, name_filter='', project_ids=None):
"""
Generates a filtered list of all symbol names (using fuzzy matching)
found in the index.
Client code can specify to look into the symbols of the specified
projects or the specified file.
.. note:: Both ``file_id`` and ``project_ids`` cannot be used together,
``file_id`` has the biggest priority.
:param file_id: Id of the file to look into.
:param project_ids: Id of the projects to look into.
Discarded if file_id is not None.
:param name_filter: Optional filter to apply on every symbol name.
:returns: Generator that yield Symbol items.
"""
c = self.conn.cursor()
searchable_name = '*%s*' % self._get_searchable_name(name_filter)
if file_id:
# get file symbols
if name_filter:
self.conn.create_function('MATCH_RATIO', 2, match_ratio)
sql = 'SELECT * FROM Symbol WHERE FILE_ID = ? AND ' \
'SYMBOL_ID IN ( SELECT SYMBOL_ID FROM Symbol_index ' \
'WHERE CONTENT MATCH ?) ' \
'ORDER BY MATCH_RATIO(SYMBOL_NAME, ?) ASC;'
self.exec_sql(c, sql, file_id, searchable_name, name_filter)
else:
sql = 'SELECT * FROM Symbol WHERE FILE_ID = ? ' \
'ORDER BY SYMBOL_NAME ASC;'
self.exec_sql(c, sql, file_id)
elif project_ids:
# get project symbols
project_ids = str(tuple(project_ids)).replace(',)', ')')
if name_filter:
self.conn.create_function('MATCH_RATIO', 2, match_ratio)
sql = 'SELECT * FROM Symbol ' \
'WHERE Symbol.PROJECT_ID IN %s AND ' \
'Symbol.SYMBOL_ID IN ( ' \
'SELECT SYMBOL_ID FROM Symbol_index ' \
'WHERE CONTENT MATCH ?)' \
'ORDER BY MATCH_RATIO(Symbol.SYMBOL_NAME, ?) ASC;' % \
project_ids
self.exec_sql(c, sql, searchable_name, name_filter)
else:
sql = 'SELECT * FROM Symbol ' \
'WHERE Symbol.PROJECT_ID IN %s ' \
'ORDER BY Symbol.SYMBOL_NAME ASC;' % project_ids
self.exec_sql(c, sql)
else:
# get all symbols
if name_filter:
self.conn.create_function('MATCH_RATIO', 2, match_ratio)
sql = 'SELECT * FROM Symbol ' \
'WHERE SYMBOL_ID IN ( ' \
'SELECT SYMBOL_ID FROM Symbol_index ' \
'WHERE CONTENT MATCH ?)' \
'ORDER BY MATCH_RATIO(SYMBOL_NAME, ?) ASC;'
DbHelper.exec_sql(c, sql, searchable_name, name_filter)
else:
sql = 'SELECT * FROM Symbol ORDER BY SYMBOL_NAME ASC;'
DbHelper.exec_sql(c, sql)
while True:
row = c.fetchone()
if row is None:
return
yield row
def _create_tables(self):
"""
Creates the two index tables: one for the file index and one for the
symbol index.
"""
c = self.conn.cursor()
for statement in SQL_CREATE_TABLES:
DbHelper.exec_sql(c, statement)
self.conn.commit()
@staticmethod
def _get_last_insert_row_id(c):
"""
Gets the last insert row id.
:param c: sqlite3.Cursor
:return: int
"""
DbHelper.exec_sql(c, "SELECT last_insert_rowid();")
return int(c.fetchone()['last_insert_rowid()'])
@staticmethod
def _get_searchable_name(name):
"""
Replaces capital letters by _ + small letter if name is camel/pascal
case.
:param name: name to convert
:return: converted name
"""
if DbHelper.is_camel_case(name):
v = ''.join([('_' + l.lower()) if l.isupper() else l
for l in name])
else:
v = name
return v
@staticmethod
def is_camel_case(name):
"""
Checks if a name is camel case (or pascal case).
"""
return bool(DbHelper.prog_camel_case.findall(name))
@staticmethod
def exec_sql(cursor, statement, *args):
try:
cursor.execute(statement, args)
except sqlite3.OperationalError as e:
_logger().warn('failed to executed SQL statement: %r. Args=%r - '
'Error = %s', statement, args, str(e))
def match_ratio(item, expr):
"""
MATCH_RATIO function that will be added to sqlite in order to sort the
filter list of symbols/files using fuzzy matching.
Computes the edit distance between the given expr (user input) and an item
from the db.
"""
try:
index = item.lower().index(get_search_tokens(expr)[0])
except ValueError:
ratio = sys.maxsize
else:
ratio = index - len(expr) / len(item)
return ratio
def get_search_tokens(expr):
return expr.lower().replace('_', ' ').replace('-', ' ').replace(
'.', ' ').split(' ')
def _logger():
return logging.getLogger(__name__)
|
""" Production environment setup:
- setup static files
- setup template files """
import os
import sys
import importlib.util
import shutil
import pathlib
import subprocess
import boto3
import gzip
import hashlib
import time
def load_module(name, path):
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
return module
BASE_PATH = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
env = load_module("env_loader", BASE_PATH / "../utils/env_loader/main.py").load()
get_insert_replace = load_module("get_insert_replace", BASE_PATH / "../utils/get_insert_replace/__init__.py")
TEMPLATES_DIR = BASE_PATH / "../app/templates"
TEMPLATES_DIR_PRODUCTION = BASE_PATH / "../app/templates_production"
STATIC_DIR = BASE_PATH / "../app/static"
STATIC_BUCKET = "taskstack-static"
STATIC_BUCKET_URL = f"https://{STATIC_BUCKET}.s3.eu-central-1.amazonaws.com"
OUTPUT_PATHS = {
"~": BASE_PATH / "output",
"/": BASE_PATH / "output/production_env_setup",
"/temp": BASE_PATH / "output/production_env_setup/temp",
"/temp/css": BASE_PATH / "output/production_env_setup/temp/css",
"/temp/js": BASE_PATH / "output/production_env_setup/temp/js"
}
s3_cli = boto3.client("s3",
aws_access_key_id=env["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=env["AWS_SECRET_ACCESS_KEY"],
region_name=env["AWS_DEFAULT_REGION"])
def collect_paths_from_dir_and_opt_recreate_dir_structure(dir_to_collect_from,
recreate_dir_structure_path=None,
ignore_direct_subdir_of_base_dir_func=None,
base_dir=None):
""" Collects all paths from the given dir and sub-dirs.
:param dir_to_collect_from: pathlib.Path -> path of the dir to collect from
:param recreate_dir_structure_path: pathlib.Path -> path of the dir where you want sub-dirs of the
<dir_to_collect_from> to be recreated
:param ignore_direct_subdir_of_base_dir_func: Callable -> takes the name of a direct sub-dir of the
<dir_to_collect_from> as parameter and returns a bool: True => ignore | False => don't ignore
:param base_dir: !!! don't set this parameter (used internally) !!!
:return: List[str] -> list of collected paths
"""
if base_dir is None:
base_dir = dir_to_collect_from
paths = []
for entry in os.listdir(dir_to_collect_from):
path = dir_to_collect_from / entry
if os.path.isdir(path):
if base_dir == dir_to_collect_from and callable(ignore_direct_subdir_of_base_dir_func) and \
ignore_direct_subdir_of_base_dir_func(entry):
continue
if recreate_dir_structure_path is not None:
try:
os.mkdir(recreate_dir_structure_path /
str(path)[str(path).find(str(base_dir)) + len(str(base_dir)) + 1:])
except FileExistsError:
pass
paths = paths + \
collect_paths_from_dir_and_opt_recreate_dir_structure(path,
recreate_dir_structure_path=
recreate_dir_structure_path,
base_dir=base_dir)
else:
paths.append(path)
return paths
def get_static_bucket_files():
""" Collects info about files in the STATIC_BUCKET.
:return: Dict[Dict] -> {<corresponding static file path>: {"Key": <obj key>,
"Hash": <md5 hash before compression>}, ...
}
"""
print(f"collect info about '{STATIC_BUCKET}' s3 bucket")
# STATIC_BUCKET files
# - Key: <path of corresponding local file (relative to static dir)> | <md5 hash before compression>
# - TagSet: name=<path of corresponding local file (relative to static dir)> &
# hash=<md5 hash before compression>
files = {}
paginator = s3_cli.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=STATIC_BUCKET):
try:
contents = page["Contents"]
except KeyError:
break
for obj in contents:
static_path = None
file_hash = None
for tag in s3_cli.get_object_tagging(Bucket=STATIC_BUCKET, Key=obj["Key"])["TagSet"]:
if tag["Key"] == "name":
static_path = tag["Value"]
if tag["Key"] == "hash":
file_hash = tag["Value"]
if static_path is not None and file_hash is not None:
files[static_path] = {"Key": obj["Key"], "Hash": file_hash}
return files
def get_file_md5_hash(file_path):
""" Hashes a file.
:param file_path: Union[str, pathlib.Path] -> path of the file to hash
:return: str -> file hash
"""
etag = hashlib.md5()
with open(file_path, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
etag.update(byte_block)
return etag.hexdigest()
def get_file_mime_type(file_name):
""" Returns mime-type of a file based on its file extension.
:param file_name: str -> name of the file
:return: str -> mime-type of the file
"""
mim_type = {
"png": "image/png",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"jfif": "image/jpeg",
"css": "text/css",
"js": "text/javascript",
"ico": "image/vnd.microsoft.icon",
"svg": "image/svg+xml",
"gif": "image/gif"
}
file_ext = file_name[len(file_name) - file_name[::-1].find("."):].lower()
try:
return mim_type[file_ext]
except KeyError:
raise ValueError(f"Unknown file extension: '{file_ext}'. You can add this file extension to "
f"tools/production_env_setup.py - get_file_mime_type")
def minify_gzip_and_get_css_and_js_files():
""" 'Minifys' and 'gzips' css/ js files (write to output/temp) and returns info about those files.
:return: Dict[Dict] -> {<path relative to static dir>: {
"Path": <path relative to cwd>,
"Hash": <md5 hash of file before compression>,
"Name": <corresponding file name in s3 bucket>
}, ...
}
"""
print("minify and gzip css and js files")
def path_relative_to_static_css_or_js_dir(path):
path_str = str(path)
css_dir_str = str(STATIC_DIR / "css")
js_dir_str = str(STATIC_DIR / "js")
if path_str.find(css_dir_str) != -1:
trim = css_dir_str
elif path_str.find(js_dir_str) != -1:
trim = js_dir_str
else:
raise ValueError("Invalid path")
return pathlib.Path(path_str[path_str.find(trim) + len(trim) + 1:])
files = {}
def _(in_dir, out_dir):
for i, path in enumerate(collect_paths_from_dir_and_opt_recreate_dir_structure(in_dir)):
in_path = path
name = path_relative_to_static_css_or_js_dir(path)
out_path = out_dir / name
if path.suffix == ".css":
cmd = f"java -jar {BASE_PATH / '3rd_party/yuicompressor.jar'} " \
f"{in_path} " \
f"-o {out_path} " \
f"--charset utf-8"
static_path = "css/" + str(name)
elif path.suffix == ".js":
cmd = f"java -jar {BASE_PATH / '3rd_party/google_closure_compiler.jar'} " \
f"--compilation_level SIMPLE_OPTIMIZATIONS " \
f"--js {in_path} " \
f"--js_output_file {out_path}"
static_path = "js/" + str(name)
else:
raise ValueError("Invalid path")
if ".min." in str(path_relative_to_static_css_or_js_dir(path)):
shutil.copy2(path, out_dir / path_relative_to_static_css_or_js_dir(path))
else:
process = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise ChildProcessError(stderr + "\nThis error could be caused by using Windows.")
file_hash = get_file_md5_hash(in_path)
files[static_path] = {"Path": str(out_path),
"Hash": file_hash,
"Name": file_hash}
with open(out_path, 'rb') as f_in:
f_in_content = f_in.read()
with gzip.open(out_path, 'wb') as f_out:
f_out.write(f_in_content)
_(STATIC_DIR / "css", OUTPUT_PATHS["/temp/css"])
_(STATIC_DIR / "js", OUTPUT_PATHS["/temp/js"])
return files
def gzip_and_get_other_static_files():
""" 'Gzips' static non css/ js files (write to output/temp) and returns info about those files.
:return: Dict[Dict] -> {<path relative to static dir>: {
"Path": <path relative to cwd>,
"Hash": <md5 hash of file before compression>,
"Name": <corresponding file name in s3 bucket>
}, ...
}
"""
print("gzip non css/ js static files")
def path_relative_to_static_dir(_path):
path_str = str(_path)
return pathlib.Path(path_str[path_str.find(str(STATIC_DIR)) + len(str(STATIC_DIR)) + 1:])
files = {}
paths = collect_paths_from_dir_and_opt_recreate_dir_structure(STATIC_DIR, OUTPUT_PATHS["/temp"],
lambda static_subdir:
static_subdir == "css" or
static_subdir == "js")
for i, path in \
enumerate(paths):
static_path = str(pathlib.PurePosixPath(path_relative_to_static_dir(path)))
files[static_path] = {"Path": str(OUTPUT_PATHS["/temp"] / static_path),
"Hash": get_file_md5_hash(path),
"Name": static_path}
with open(path, 'rb') as f_in:
f_in_content = f_in.read()
with gzip.open(OUTPUT_PATHS["/temp"] / static_path, 'wb') as f_out:
f_out.write(f_in_content)
return files
def sync_static_bucket(not_processed_static_files):
""" Syncs the STATIC_BUCKET with the local static files.
:param not_processed_static_files: Dict[Dict] -> return of minify_gzip_and_get_css_and_js_files /
gzip_and_get_other_static_files
"""
print(f"sync '{STATIC_BUCKET}' s3 bucket with local static files")
not_processed_static_bucket_files = get_static_bucket_files()
upload_count = 0
re_upload_count = 0
delete_count = 0
for static_path, file in not_processed_static_files.items():
if static_path in not_processed_static_bucket_files:
if file["Hash"] == not_processed_static_bucket_files[static_path]["Hash"]:
del not_processed_static_bucket_files[static_path]
continue
else:
s3_cli.delete_object(Bucket=STATIC_BUCKET, Key=not_processed_static_bucket_files[static_path]["Key"])
delete_count += 1
re_upload_count += 1
del not_processed_static_bucket_files[static_path]
s3_cli.upload_file(file["Path"],
STATIC_BUCKET,
file["Name"],
ExtraArgs={"ContentType": get_file_mime_type(static_path),
"ContentEncoding": "gzip",
"Tagging": f"name={static_path}&hash={file['Hash']}",
'CacheControl': 'max-age: 31536000', # = 1 year
'ACL': 'public-read'})
upload_count += 1
for name, file in not_processed_static_bucket_files.items():
s3_cli.delete_object(Bucket=STATIC_BUCKET, Key=file["Key"])
delete_count += 1
print(f"synced '{STATIC_BUCKET}' s3 bucket with local static files:\n"
f"\t- uploaded [{upload_count}] files ([{re_upload_count}] for re-upload)\n"
f"\t- deleted [{delete_count}] files ([{re_upload_count}] for re-upload)\n"
f"\t- re-uploaded [{re_upload_count}] files")
def setup_templates(static_files):
""" Write templates from TEMPLATES_DIR to TEMPLATES_DIR_PRODUCTION >> replace <static_url> func calls with the
corresponding STATIC_BUCKET URL.
:param static_files: Dict[Dict] -> return of minify_gzip_and_get_css_and_js_files /
gzip_and_get_other_static_files
"""
print("setup templates")
def path_relative_to_templates_dir(_path):
path_str = str(_path)
return pathlib.Path(path_str[path_str.find(str(TEMPLATES_DIR)) + len(str(TEMPLATES_DIR)) + 1:])
shutil.rmtree(TEMPLATES_DIR_PRODUCTION, ignore_errors=True)
time.sleep(.1)
os.mkdir(TEMPLATES_DIR_PRODUCTION)
templates_paths = collect_paths_from_dir_and_opt_recreate_dir_structure(TEMPLATES_DIR,
TEMPLATES_DIR_PRODUCTION)
marker_sequence_before = get_insert_replace.MarkerSequence(
[get_insert_replace.Marker("{{"),
get_insert_replace.OptionalRepeatableMarker(" "),
get_insert_replace.Marker("static_url"),
get_insert_replace.OptionalRepeatableMarker(" "),
get_insert_replace.Marker("("),
get_insert_replace.OptionalRepeatableMarker(" "),
get_insert_replace.MultiplePossibleMarkers([get_insert_replace.Marker("'"),
get_insert_replace.Marker("\"")])])
marker_sequence_after = get_insert_replace.MarkerSequence(
[get_insert_replace.MultiplePossibleMarkers([get_insert_replace.Marker("'"),
get_insert_replace.Marker("\"")]),
get_insert_replace.OptionalRepeatableMarker(" "),
get_insert_replace.Marker(")"),
get_insert_replace.OptionalRepeatableMarker(" "),
get_insert_replace.Marker("}}")])
def replace_static_url_func_call_with_corresponding_static_bucket_url(target_str, _, __):
nonlocal path
if target_str not in static_files:
raise ValueError(f"Error in template: '{path}'. The path '{target_str}' does not exist in '{STATIC_DIR}'.")
return f"\"{STATIC_BUCKET_URL}/{static_files[target_str]['Name']}\"", \
get_insert_replace.REPLACE_TARGET_AND_MARKER_SEQUENCE
replacer = get_insert_replace.Replacer([(marker_sequence_before, marker_sequence_after,
replace_static_url_func_call_with_corresponding_static_bucket_url)])
for i, path in enumerate(templates_paths):
with open(path, "r") as template:
content = replacer.replace(template.read())
with open(TEMPLATES_DIR_PRODUCTION / path_relative_to_templates_dir(path), "w") as processed_template:
processed_template.write(content)
def main():
if env.get("FLASK_ENV") != "production":
raise ValueError("Environment variable FLASK_ENV != 'production'")
for path in OUTPUT_PATHS.values():
os.mkdir(path)
static_files = {**minify_gzip_and_get_css_and_js_files(),
**gzip_and_get_other_static_files()}
sync_static_bucket(static_files)
os.mkdir(TEMPLATES_DIR_PRODUCTION)
setup_templates(static_files)
shutil.rmtree(OUTPUT_PATHS["/"], ignore_errors=True)
if __name__ == '__main__':
main()
|
<gh_stars>1-10
"""
The controllers module provides different controller classes,
applicable to different simulations.
A controller object's job is to control simulations-
At a high level a controller objects accepts a list of
parameters and chromosomes and (usually) returns
corresponding simulation data.
This is implemented polymporphically in subclasses.
Each controller class must therefore provide a run method, which is used by
the evaluator to run a simulation.
A controller must be able to accept simulation parameters (chromosomes)
from the evaluator.
The evaluator is therefore only concerned with assigining fitness to chromosomes.
On the whole this allows for deep modularization -
as long as the user can provide a controller which will (for instance)
reutrn sample and time arrays for arbitrary chromosome and parameter
lists a range of evaluators would be able to utilise it.
"""
import os
import subprocess
import math
class __Controller():
"""
Controller base class
"""
def run(self,
candidates,
parameters):
"""
At a high level - accepts a list of parameters and chromosomes
and (usually) returns corresponding simulation data. This is
implemented polymporphically in subclasses.
"""
raise NotImplementedError("Valid controller requires run method!")
class CLIController(__Controller):
"""
Control simulations via command line arguments executed through the Python os module.
"""
def __init__(self,cli_argument):
self.cli_argument = cli_argument
def run(self,
candidates,
parameters,
fitness_filename='evaluations'):
#"Run simulation"
for chromosome in candidates:
self.chromosome=chromosome
self.parameters=parameters #actually unneeded
#this manipulation is slightly messy, done for conversion of chromosome
#into something that can be executed on the shell
chromosome_str = ''.join(str(e)+' ' for e in chromosome)
cla = self.cli_argument+' '+fitness_filename+' '+chromosome_str
print(cla)
subprocess.call(cla, shell=True)
class NrnProject(__Controller):
"""
Run an nrnproject simulation based on optimizer parameters."""
def __init__(self,
nrnproject_path,
db_path,
exp_id=None):
self.sim_main_path=os.path.join(nrnproject_path,
'src/simrunner.py')
self.nrnproject_path=nrnproject_path
self.db_path=db_path
self.exp_id=exp_id
def __generate_cla(self):
sim_var_string = self.__generate_sim_var_string()
cla='python '+ self.sim_main_path + sim_var_string
return cla
def __generate_sim_var_string(self):
sim_var_string=''
for i in enumerate(self.parameters):
sim_var_string+= ' "sim_var[\'' + i[1] +'\'] = ' + str(self.chromosome[i[0]]) + '\"'
if self.exp_id !=None:
sim_var_string+= ' "sim_var[\'exp_id\'] ='+ str(self.exp_id) + '\"'
return sim_var_string
def run(self,
candidates,
parameters):
#"""Run simulations"""
import sqldbutils
exp_data_array=[]
for chromosome in candidates:
self.chromosome=chromosome
self.parameters=parameters
exp_id = sqldbutils.generate_exp_ids(self.db_path)
cla=self.__generate_cla()
os.chdir(self.nrnproject_path+'/src/') #there should be a smarter way
os.system(cla)
print(self.db_path)
print(exp_id)
exp_data=sqldbutils.sim_data(self.db_path,exp_id)
exp_data_array.append(exp_data)
return exp_data_array
class __CondorContext(object):
"""Context for Condor-based grid"""
def __init__(self,
host,
username,
password,
port):
self.messagehost=ssh_utils.host(host,username,
password,port)
def __split_list(self,
alist,
wanted_parts=1):
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def __prepare_candidates(self,candidates,candidates_per_job=1):
#Split candidate list into smaller ones (jobs):
#and make a job list
if optimizer_params.candidates_in_job != None:
candidates_in_job=optimizer_params.candidates_in_job
else:
candidates_in_job=candidates_per_job
num_candidates=len(candidates)
ids=range(num_candidates)
enumerated_candidates=zip(candidates,ids)
num_jobs=num_candidates/candidates_in_job
self.num_jobs=num_jobs
self.job_list=self.__split_list(enumerated_candidates,wanted_parts=self.num_jobs)
def __make_job_file(self,job,job_number):
#write the header:
filepath = os.path.join(self.tmpdir, 'run' + str(job_number) + '.sh')
run_shell = open(filepath, 'w')
run_shell.write('#!/bin/bash\n')
run_shell.write('reldir=`dirname $0`\n')
run_shell.write('cd $reldir\n')
run_shell.write('directory=`pwd`\n')
run_shell.write('pndirectory=$directory\n')
run_shell.write('#Untar the file:\n')
run_shell.write('/bin/tar xzf ./portable-neuron.tar.gz\n')
tarfile_name=optimizer_params.tarred_nrnproj
run_shell.write('/bin/tar xzf ./'+tarfile_name+'\n')
#CandidateData_list=[]
for enumerated_candidate in job:
chromosome = enumerated_candidate[0]
candidate_info = CandidateData(chromosome)
exp_id = enumerated_candidate[1]
candidate_info.set_exp_id(exp_id)
candidate_info.set_job_num(job_number)
self.CandidateData_list.append(candidate_info)
nproj = controllers.NrnProjSimRun(optimizer_params.project_path, chromosome)
run_shell.write('#issue the commands\n')
run_shell.write('$pndirectory/pnpython.sh \
$directory/src/simrunner.py "sim_var[\'exp_id\'] \
= ' + str(exp_id) + '\" ' + '"sim_var[\'''dbname''\'] \
= \'outputdb' + str(job_number) + '.sqlite\'"' +
nproj.sim_var_string + '\n')
run_shell.write('echo \'done\'\n')
run_shell.write('cp $directory/sims/outputdb' + str(job_number) + '.sqlite $directory\n')
#self.CandidateData_list=CandidateData_list
run_shell.close()
def __make_submit_file(self):
""" write the condor submit files"""
filepath = os.path.join(self.tmpdir, 'submitfile.submit')
submit_file=open(filepath,'w')
submit_file.write('universe = vanilla\n')
submit_file.write('log = pneuron.log\n')
submit_file.write('Error = err.$(Process)\n')
submit_file.write('Output = out.$(Process)\n')
submit_file.write('requirements = GLIBC == "2.11"\n')
tarfile_name=optimizer_params.tarred_nrnproj
submit_file.write('transfer_input_files = portable-neuron.tar.gz,'+tarfile_name+'\n')
submit_file.write('should_transfer_files = yes\n')
submit_file.write('when_to_transfer_output = on_exit_or_evict\n')
#this is where you have to do the clever stuff:
for shellno in range(self.num_jobs):
submit_file.write('executable = run'+str(shellno)+'.sh\n')
submit_file.write('queue\n')
#finally close the submit file
submit_file.close()
def __build_condor_files(self,candidates,parameters,candidates_per_job=100):
#prepare list of candidates to be farmed on grid:
self.__prepare_candidates(candidates,candidates_per_job=100)
#make the job files (shell scripts to be executed on the execute nodes)
job_number=0 #run shell script number
for job in self.job_list:
self.__make_job_file(job,job_number)
job_number+=1
#now make the submit file
self.__make_submit_file()
def __delete_remote_files(self,host):
import ssh_utils
command='rm -rf ./*'
ssh_utils.issue_command(host, command)
def __put_multiple_files(self,host,filelist,localdir='/',remotedir='/'):
import ssh_utils
for file in filelist:
localpath=os.path.join(localdir,file)
remotepath=os.path.join(remotedir,file)
ssh_utils.put_file(host,localpath,remotepath)
class NrnProjectCondor(NrnProject):
"""
Run NrnProject-based simulations on a Condor-managed
federated system
"""
def __init__(self,host,username,password,port=80,
local_analysis=False,candidates_per_job=100):
super(NrnProjectCondor,self).__init__()
#other things like the number of nodes to divide the work onto and
#host connection parameters need to go into this constructor
#the more I think about it the less this seems like a good idea
#though
if local_analysis:
self.run=self.__local_run
else:
self.run=self.__remote_run__
#make a context which provides grid utilities
self.context=__CondorContext(host,username,password,port)
self.cpj=candidates_per_job
def __condor_run(self,candidates,parameters):
"""
Run simulations on grid and analyse data locally (???I'm quite confused here...there is a mistake somewhere as the name doesn't match the description - which method is which?)
Once each generation has finished, all data is pulled to local
workstation in form of sqlite databases (1 database per job)
and these are analysed and the fitness estimated sequentially
the fitness array is then returned.
"""
import time
import ssh_utils
#Build submit and runx.sh files, exp_id now corresponds
#to position in chromosome and fitness arrays
self.context.__build_condor_files(candidates,parameters,
candidates_per_job=self.cpj)
#This is a file handling block..
#delete everything in the ssh_utilse directory you're about to put files in
self.__delete_remote_files__()
filelist=os.listdir(self.tmpdir)
#copy local files over, some stuff is missing here as it needs to be an attribute in the condor context
self.__put_multiple_files(filelist,localdir=self.tmpdir)
filelist=os.listdir(self.portableswdir)
#copy portable software files over:
self.__put_multiple_files(filelist,localdir=self.portableswdir)
#issue a command to the message host to issue commands to the grid:
ssh_utils.issue_command(context.messagehost,
'export PATH=/opt/Condor/release/bin:$PATH\ncondor_submit submitfile.submit')
#make a list of the database files we need:
self.jobdbnames=[]
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
self.jobdbnames.append(jobdbname)
#wait till we know file exists:
dbs_created=False
pulled_dbs=[] # list of databases which have been extracted from remote server
while (dbs_created==False):
print('waiting..')
time.sleep(20)
print('checking if dbs created:')
command='ls'
remote_filelist=ssh_utils.issue_command(self.messagehost, command)
for jobdbname in self.jobdbnames:
db_exists=jobdbname+'\n' in remote_filelist
if (db_exists==False):
print(jobdbname+' has not been generated')
dbs_created=False
elif db_exists==True and jobdbname not in pulled_dbs:
print(jobdbname+' has been generated')
remotefile=optimizer_params.remotedir+jobdbname
localpath=os.path.join(self.datadir,str(self.generation)+jobdbname)
ssh_utils.get_file(self.messagehost,remotefile,localpath)
pulled_dbs.append(jobdbname) #so that it is not extracted more than once
#here pop-in the fitness evaluation
if len(pulled_dbs)==len(self.jobdbnames):
dbs_created=True
#this block can be simplified, it need simply return exp_data containers
fitness=[]
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname=str(self.generation)+'outputdb'+str(job_num)+'.sqlite'
dbpath=os.path.join(self.datadir,dbname)
exp_id=CandidateData.exp_id
connection=sqldbutils.db_connect(dbpath) #establish a database connection
query='SELECT numerical_value\
FROM output_params WHERE experiment_id=\
'+str(exp_id)+' AND parameter="fitness"'
exp_fitness=sqldbutils.execute_query(connection,query)
exp_fitness=exp_fitness.fetchall()
exp_fitness=exp_fitness[0][0]
print('Fitness:')
print(exp_fitness)
fitness.append(exp_fitness)
self.generation+=1
return fitness
###ignore this for now###
def __local_evaluate(self,candidates,args):
import time
analysis
self.CandidateData_list=[]
analysis_var=self.analysis_var
#Build submitfile.submit and runx.sh files:
self.__buil_condor_files(candidates) #exp_id now corresponds to position in chromosome/fitness array
fitness=[]
#submit the jobs to the grid
os.chdir(self.tmpdir)
os.system('condor_submit submitfile.submit')
#wait till you know file exists:
dbs_created=False
while (dbs_created==False):
print('checking if dbs created:')
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
db_exists=os.path.exists(jobdbpath)
if (db_exists==False):
time.sleep(60)
dbs_created=False
break
dbs_created=True
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname='/outputdb'+str(job_num)+'.sqlite'
dbpath=self.datadir+dbname
exp_id=CandidateData.exp_id
exp_data=sqldbutils.sim_data(dbpath,exp_id)
analysis=analysis.IClampAnalysis(exp_data.samples,exp_data.t,analysis_var,5000,10000)
exp_fitness=analysis.evaluate_fitness(self.targets,self.weights,cost_function=analysis.normalised_cost_function)
fitness.append(exp_fitness)
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
os.remove(jobdbpath)
return fitness
class SineWaveController(__Controller):
"""
Simple sine wave generator which takes a number of variables ('amp', 'period', 'offset')
and produces an output based on these.
"""
def __init__(self, sim_time, dt):
self.sim_time = sim_time
self.dt = dt
def run_individual(self, sim_var, gen_plot=False, show_plot=False):
"""
Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated.
"""
print(">> Running individual: %s"%(sim_var))
import numpy as np
t = 0
times = []
volts = []
while t <= self.sim_time:
v = sim_var['offset'] + (sim_var['amp'] * (math.sin( 2*math.pi * t/sim_var['period'])))
times.append(t)
volts.append(v)
t += self.dt
if gen_plot:
from matplotlib import pyplot as plt
info = ""
for key in sim_var.keys():
info+="%s=%s "%(key, sim_var[key])
plt.plot(times,volts, label=info)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=1)
if show_plot:
plt.show()
return np.array(times), np.array(volts)
def run(self,candidates,parameters):
"""
Run simulation for each candidate
This run method will loop through each candidate and run the simulation
corresponding to its parameter values. It will populate an array called
traces with the resulting voltage traces for the simulation and return it.
"""
traces = []
for candidate in candidates:
sim_var = dict(zip(parameters,candidate))
t,v = self.run_individual(sim_var)
traces.append([t,v])
return traces |
# <NAME>
# CPSC 386 FALL 2016
# Project 5 (Final Project)
# <EMAIL>
# Armada.py is a top-down space shooter game made using Python 3.4 and Pygame 1.9.
import sys, random, time, pygame
from pygame.locals import *
# Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0 )
RED = (255, 0, 0 )
GREEN = (0, 255, 0 )
# Text color
TEXT_COLOR = WHITE
# Set up window size and FPS
GAME_WINDOW_WIDTH = 1400
GAME_WINDOW_HEIGHT = 800
FPS = 60
# Boss level
BOSS_LVL = random.randint(5,10)
# Alien Size, includes ALIEN1 and ALIEN2
ALIEN_SIZE = 70
# Alien Spawn Rate
# Increase this to make the spawn timer longer,
# Or decrease to make the aliens spawn quickly
ALIEN1_SPAWN_RATE = 60
ALIEN2_SPAWN_RATE = ALIEN1_SPAWN_RATE
# Keep track of the player, aliens, bullets, and reload speed
# Player Speed
PLAYER_SPEED = 15
# Alien Speed
ALIEN1_SPEED = 2
ALIEN2_SPEED = ALIEN1_SPEED / 2
BOSS_SPEED = ALIEN1_SPEED / 2
# Speed of the bullet, and reload speed
BULLET_SPEED = 10
ALIEN_BULLET_SPEED = 10
RELOAD_SPEED = 15
# Keep a list of all of the aliens and ammo
ALIEN1 = []
ALIEN2 = []
BULLETS = []
ALIEN_BULLETS = []
# Set up images for the game
# Player image
PLAYER_IMG = pygame.image.load('player_ship.png')
PLAYER_RECT = PLAYER_IMG.get_rect()
# Alien images
ALIEN1_IMG = pygame.image.load('alien1.png')
ALIEN2_IMG = pygame.image.load('alien2.png')
BOSS_IMG = pygame.image.load('boss.png')
BOSS_RECT = BOSS_IMG.get_rect()
# Player ammo
BULLET_IMG = pygame.Surface([10, 2])
BULLET_IMG.fill(RED)
BULLET_RECT = BULLET_IMG.get_rect()
# Alien ammo
ALIEN_BULLET_IMG = pygame.Surface([10, 2])
ALIEN_BULLET_IMG.fill(GREEN)
ALIEN_BULLET_RECT = BULLET_IMG.get_rect()
# Explosion images
EXPLOSION_IMG = pygame.image.load('explosion.png')
BIG_EXPLOSION_IMG = pygame.image.load('big_explosion.png')
# Title screen image
TITLE_IMG = pygame.image.load("title_screen.jpg")
#TITLE_IMG = pygame.transform.scale(TITLE_IMG, (GAME_WINDOW_WIDTH, GAME_WINDOW_HEIGHT))
# Instructions screen image
INSTRUCTIONS_IMG = pygame.image.load('instructions_bg.jpg')
INSTRUCTIONS_IMG = pygame.transform.scale(INSTRUCTIONS_IMG, (GAME_WINDOW_WIDTH, GAME_WINDOW_HEIGHT))
# In game background image
BACKGROUND_IMG = pygame.image.load('sky.jpg')
BACKGROUND_IMG = pygame.transform.scale(BACKGROUND_IMG, (GAME_WINDOW_WIDTH, GAME_WINDOW_HEIGHT))
# main() function
def main():
global FPS_CLOCK, GAME_DISPLAY, SMALL_FONT, LRG_FONT, XTRA_LRG_FONT
pygame.init()
FPS_CLOCK = pygame.time.Clock()
GAME_DISPLAY = pygame.display.set_mode((GAME_WINDOW_WIDTH, GAME_WINDOW_HEIGHT))
SMALL_FONT = pygame.font.SysFont('freesansbold.ttf', 30)
LRG_FONT = pygame.font.SysFont('freesansbold.ttf', 60)
XTRA_LRG_FONT = pygame.font.SysFont('freesansbold.ttf', 120)
# There's no mouse input for this game, so don't show the pointer
pygame.mouse.set_visible(False)
# Play non in-game music
pygame.mixer.music.load('death.mid')
pygame.mixer.music.play(-1, 0.0)
# Show the title screen
GAME_DISPLAY.blit(TITLE_IMG, (0, 0))
drawText('ARMADA', XTRA_LRG_FONT, GAME_DISPLAY, 0, 0 , TEXT_COLOR)
drawText('Press Enter', LRG_FONT, GAME_DISPLAY, 600, 750, TEXT_COLOR)
drawText('Game by <NAME>', SMALL_FONT, GAME_DISPLAY, 1190, 780, TEXT_COLOR)
pygame.display.update()
getLoadingScreenInput()
# Show the instructions screen
GAME_DISPLAY.blit(INSTRUCTIONS_IMG, (0, 0))
drawText('INSTRUCTIONS:', LRG_FONT, GAME_DISPLAY, 10 , 10, TEXT_COLOR) # Display at top left corner
drawText('Defeat the mothership to win the game', SMALL_FONT, GAME_DISPLAY, 10 , 50, TEXT_COLOR)
drawText('Don\'t let the mothership reach Earth. If you do, then it\'s Game Over', SMALL_FONT, GAME_DISPLAY, 10 , 70, TEXT_COLOR)
drawText('Avoid all aliens, if an alien gets close enough to you, then it\'s Game Over', SMALL_FONT, GAME_DISPLAY, 10 , 90, TEXT_COLOR)
drawText('If your HP falls to zero, then it\'s Game Over', SMALL_FONT, GAME_DISPLAY, 10 , 110, TEXT_COLOR)
drawText('Each time an alien reaches Earth, the Earth\'s defense drops 5 percent', SMALL_FONT, GAME_DISPLAY, 10 , 130, TEXT_COLOR)
drawText('If Earth\'s defense drops to 0 you lose the game (i.e., 20 aliens reaching Earth results in Game Over)', SMALL_FONT, GAME_DISPLAY, 10 , 150, TEXT_COLOR)
drawText('CONTROLS:', LRG_FONT, GAME_DISPLAY, 10 , 210, TEXT_COLOR)
drawText('To move: W,A,S,D or arrow keys', SMALL_FONT, GAME_DISPLAY, 10 , 250, TEXT_COLOR)
drawText('To shoot: Spacebar', SMALL_FONT, GAME_DISPLAY, 10 , 270, TEXT_COLOR)
drawText('To quit: Esc', SMALL_FONT, GAME_DISPLAY, 10 , 290, TEXT_COLOR)
drawText('Press Enter', LRG_FONT, GAME_DISPLAY, 600, 750, TEXT_COLOR)
pygame.display.update()
getLoadingScreenInput()
# Stop music
pygame.mixer.music.stop()
# Limit to 60 frames per second
FPS_CLOCK.tick(FPS)
#################################
# main() GAME LOOP #
#################################
while True:
pygame.mixer.music.load('boss.mid')
pygame.mixer.music.play(-1)
runGame()
pygame.mixer.music.stop()
if (EARTH_DEFENSE <= 0):
GAME_DISPLAY.blit(TITLE_IMG, (0, 0))
drawText('DEFEAT', XTRA_LRG_FONT, GAME_DISPLAY, (GAME_WINDOW_WIDTH / 3) + 50, (GAME_WINDOW_HEIGHT / 3), RED)
drawText('EARTH HAS BEEN DESTROYED', LRG_FONT, GAME_DISPLAY, (GAME_WINDOW_WIDTH / 3)- 130, (GAME_WINDOW_HEIGHT / 3) + 100, TEXT_COLOR)
drawText('Press enter to play again or esc to quit', LRG_FONT, GAME_DISPLAY, 300, 750, TEXT_COLOR)
pygame.display.update()
getLoadingScreenInput()
cleanUp(BULLETS,ALIEN1,ALIEN2) # Clear screen for the next game
if playerCollision(PLAYER_RECT, ALIEN1) or playerCollision(PLAYER_RECT, ALIEN2) or PLAYER_RECT.colliderect(BOSS_RECT):
GAME_DISPLAY.blit(TITLE_IMG, (0, 0))
drawText('DEFEAT', XTRA_LRG_FONT, GAME_DISPLAY, (GAME_WINDOW_WIDTH / 3) + 50, (GAME_WINDOW_HEIGHT / 3), RED)
drawText('YOU HAVE BEEN CAPTURED BY THE ALIENS', LRG_FONT, GAME_DISPLAY, (GAME_WINDOW_WIDTH / 3) - 230, (GAME_WINDOW_HEIGHT / 3) +100, TEXT_COLOR)
drawText('Press enter to play again or esc to quit', LRG_FONT, GAME_DISPLAY, 300, 750, TEXT_COLOR)
pygame.display.update()
getLoadingScreenInput()
cleanUp(BULLETS,ALIEN1,ALIEN2) # Clear screen for the next game
if (PLAYER_WON == True):
GAME_DISPLAY.blit(TITLE_IMG, (0, 0))
drawText('VICTORY', XTRA_LRG_FONT, GAME_DISPLAY, (GAME_WINDOW_WIDTH / 3) + 50, (GAME_WINDOW_HEIGHT / 3), GREEN)
drawText('THE ALIENS HAVE BEEN DEFEATED', LRG_FONT, GAME_DISPLAY, (GAME_WINDOW_WIDTH / 3) - 130, (GAME_WINDOW_HEIGHT / 3) +100, TEXT_COLOR)
drawText('Press enter to play again or esc to quit', LRG_FONT, GAME_DISPLAY, 300, 750, TEXT_COLOR)
pygame.display.update()
getLoadingScreenInput()
cleanUp(BULLETS,ALIEN1,ALIEN2) # Clear screen for the next game
def runGame():
# Set up the start of the game
global EARTH_DEFENSE, PLAYER_HP, BOSS_HP, SCORE, PLAYER_WON
PLAYER_WON = False
EARTH_DEFENSE = 100 # If this reaches 0, game over
PLAYER_HP = 100 # If this reaches 0, game over
BOSS_HP = 100 # If the player defeats the boss they win
# Set the score, lvl, and frequency at which the alien moves
SCORE = 0
lvl, ALIEN1_SPEED = calcLvlAndAlienSpeed(SCORE) # Note: only change speed of Alien1
PLAYER_RECT.topleft = (50, GAME_WINDOW_HEIGHT /2)
#Put boss rect off the screen to start with
BOSS_RECT.topright = (1600, GAME_WINDOW_HEIGHT /2)
moveLeft = False
moveRight = False
moveUp = False
moveDown = False
shoot = False
alien1_spawn_counter = 0
alien2_spawn_counter = 0
player_bullet_spawn_rate = 40
alien_bullet_spawn_rate = 40
effect = pygame.mixer.Sound('laser_fire.wav')
#################################
# runGame() GAME LOOP #
#################################
while True: # the game loop runs while the game part is playing
# Calculate level and enemy speed,
if lvl != BOSS_LVL:
lvl, ALIEN1_SPEED = calcLvlAndAlienSpeed(SCORE) # Note: only change speed of Alien1
for event in pygame.event.get():
if event.type == QUIT:
terminate()
# Check if the key was pressed down
elif event.type == KEYDOWN:
if event.key == K_UP or event.key == K_w:
moveDown = False
moveUp = True
moveRight = False
moveLeft = False
elif event.key == K_DOWN or event.key == K_s:
moveUp = False
moveDown = True
moveRight = False
moveLeft = False
elif event.key == K_RIGHT or event.key == K_d:
moveUp = False
moveDown = False
moveRight = True
moveLeft = False
elif event.key == K_LEFT or event.key == K_a:
moveUp = False
moveDown = False
moveRight = False
moveLeft = True
elif event.key == K_SPACE:
shoot = True
# Check if the key was released
# If you release the key, you are no longer moving
# Set vars to false and terminate game if player last hit esc
elif event.type == KEYUP:
if event.key == K_ESCAPE:
terminate()
elif event.key == K_UP or event.key == K_w:
moveUp = False
elif event.key == K_DOWN or event.key == K_s:
moveDown = False
elif event.key == K_RIGHT or event.key == K_d:
moveRight = False
elif event.key == K_LEFT or event.key == K_a:
moveLeft = False
elif event.key == K_SPACE:
shoot = False
# Add new ALIEN1 at the top of the screen, if needed.
alien1_spawn_counter += 1
if alien1_spawn_counter == ALIEN1_SPAWN_RATE:
alien1_spawn_counter = 0
ALIEN1_SIZE = ALIEN_SIZE
rand_y1 = random.randint(10,GAME_WINDOW_HEIGHT-ALIEN1_SIZE-10)
while rand_y1 == GAME_WINDOW_HEIGHT /2:
rand_y1 = random.randint(10,GAME_WINDOW_HEIGHT-ALIEN1_SIZE-10)
newAlien1 = {'rect': pygame.Rect(GAME_WINDOW_WIDTH, rand_y1, ALIEN1_SIZE, ALIEN1_SIZE),
'surface':pygame.transform.scale(ALIEN1_IMG, (ALIEN1_SIZE, ALIEN1_SIZE))}
ALIEN1.append(newAlien1)
# Add new ALIEN2 at the top of the screen, if needed.
alien2_spawn_counter += 1
if alien2_spawn_counter == ALIEN2_SPAWN_RATE:
alien2_spawn_counter = 0
ALIEN2_SIZE = ALIEN_SIZE
rand_y2 = random.randint(10,GAME_WINDOW_HEIGHT-ALIEN2_SIZE-10)
while rand_y2 == GAME_WINDOW_HEIGHT /2:
rand_y1 = random.randint(10,GAME_WINDOW_HEIGHT-ALIEN1_SIZE-10)
newAlien2 = {'rect': pygame.Rect(GAME_WINDOW_WIDTH, rand_y2, ALIEN2_SIZE, ALIEN2_SIZE),
'surface':pygame.transform.scale(ALIEN2_IMG, (ALIEN2_SIZE, ALIEN2_SIZE))}
ALIEN2.append(newAlien2)
# add new bullet
player_bullet_spawn_rate += 1
if player_bullet_spawn_rate >= RELOAD_SPEED * 2 and shoot == True:
player_bullet_spawn_rate = 0
newBullet1 = {'rect':pygame.Rect(PLAYER_RECT.centerx+10, PLAYER_RECT.centery-25, BULLET_RECT.width, BULLET_RECT.height),
'surface':pygame.transform.scale(BULLET_IMG, (BULLET_RECT.width, BULLET_RECT.height))}
newBullet2 = {'rect':pygame.Rect(PLAYER_RECT.centerx+10, PLAYER_RECT.centery+25, BULLET_RECT.width, BULLET_RECT.height),
'surface':pygame.transform.scale(BULLET_IMG, (BULLET_RECT.width, BULLET_RECT.height))}
BULLETS.append(newBullet1)
BULLETS.append(newBullet2)
effect.play(1)
# Move the player around.
if moveLeft and PLAYER_RECT.left > 0:
PLAYER_RECT.move_ip(-1 * PLAYER_SPEED, 0)
if moveRight and PLAYER_RECT.right < GAME_WINDOW_WIDTH-10:
PLAYER_RECT.move_ip(PLAYER_SPEED, 0)
if moveUp and PLAYER_RECT.top > 30:
PLAYER_RECT.move_ip(0, -1 * PLAYER_SPEED)
if moveDown and PLAYER_RECT.bottom < GAME_WINDOW_HEIGHT-10:
PLAYER_RECT.move_ip(0, PLAYER_SPEED)
# Move the ALIEN1 down and add the bullets
for a1 in ALIEN1:
a1['rect'].move_ip(-1*ALIEN1_SPEED, 0)
# add new alien1 bullets
alien_bullet_spawn_rate +=1
if alien_bullet_spawn_rate >= RELOAD_SPEED * 30: # Include multiplier to slow reload speed
alien_bullet_spawn_rate = 0
alienBullet = {'rect':pygame.Rect(a1['rect'].centerx, a1['rect'].centery, ALIEN_BULLET_RECT.width, ALIEN_BULLET_RECT.height),
'surface':pygame.transform.scale(ALIEN_BULLET_IMG, (ALIEN_BULLET_RECT.width, ALIEN_BULLET_RECT.height))}
ALIEN_BULLETS.append(alienBullet)
# Move the ALIEN2 down and add the bullets
for a2 in ALIEN2:
a2['rect'].move_ip(-1*ALIEN2_SPEED,0)
# add new alien1 bullets
alien_bullet_spawn_rate +=1
if alien_bullet_spawn_rate >= RELOAD_SPEED * 30: # Include multiplier to slow reload speed
alien_bullet_spawn_rate = 0
alienBullet = {'rect':pygame.Rect(a2['rect'].centerx, a2['rect'].centery, ALIEN_BULLET_RECT.width, ALIEN_BULLET_RECT.height),
'surface':pygame.transform.scale(ALIEN_BULLET_IMG, (ALIEN_BULLET_RECT.width, ALIEN_BULLET_RECT.height))}
ALIEN_BULLETS.append(alienBullet)
# Move the boss around.
if lvl == BOSS_LVL:
if BOSS_RECT.left > 0:
BOSS_RECT.move_ip(-1 * BOSS_SPEED, 0)
# add new boss bullets
alien_bullet_spawn_rate += 1
if alien_bullet_spawn_rate >= RELOAD_SPEED * 20: # Include multiplier to slow reload speed
alien_bullet_spawn_rate = 0
alienBullet = {'rect':pygame.Rect(BOSS_RECT.centerx, BOSS_RECT.centery, ALIEN_BULLET_RECT.width, ALIEN_BULLET_RECT.height),
'surface':pygame.transform.scale(ALIEN_BULLET_IMG, (ALIEN_BULLET_RECT.width, ALIEN_BULLET_RECT.height))}
ALIEN_BULLETS.append(alienBullet)
# move the player bullet
for b in BULLETS:
b['rect'].move_ip(1 * BULLET_SPEED, 0)
# move the alien bullet
for b2 in ALIEN_BULLETS:
b2['rect'].move_ip(-1 * ALIEN_BULLET_SPEED, 0)
# If boss reaches Earth, game over
if BOSS_RECT.left < 20:
EARTH_DEFENSE = 0
break
# Delete ALIEN1 that continued past the screen.
for a1 in ALIEN1[:]:
if a1['rect'].left < 0:
ALIEN1.remove(a1)
EARTH_DEFENSE -= 5
# Delete ALIEN2 that continued past the screen.
for a2 in ALIEN2[:]:
if a2['rect'].left < 0:
ALIEN2.remove(a2)
EARTH_DEFENSE -= 5
# Delete all player bullets that continued past the screen
for b in BULLETS[:]:
if b['rect'].right>GAME_WINDOW_WIDTH:
BULLETS.remove(b)
# Check if the alien bullet hit the player
if hitPlayer(ALIEN_BULLETS, PLAYER_RECT):
PLAYER_HP -= 5
if (PLAYER_HP <= 0):
EARTH_DEFENSE = 0 # The game ends because player died
break
# Check if the player bullet hit the boss
if lvl == BOSS_LVL:
if hitBoss(BULLETS, BOSS_RECT):
BOSS_HP -= 5
if (BOSS_HP <= 0):
PLAYER_WON = True
GAME_DISPLAY.blit(BIG_EXPLOSION_IMG, BOSS_RECT)
pygame.display.update(BOSS_RECT)
# Limit to 60 frames per second
FPS_CLOCK.tick(FPS)
pygame.time.delay(30)
break
# Check if the player bullet hit the aliens
for a1 in ALIEN1:
if hitAlien1(BULLETS, ALIEN1, a1):
SCORE += 1
GAME_DISPLAY.blit(EXPLOSION_IMG, a1['rect'])
pygame.display.update(a1['rect'])
# Limit to 60 frames per second
FPS_CLOCK.tick(FPS)
ALIEN1.remove(a1)
for a2 in ALIEN2:
if hitAlien2(BULLETS, ALIEN2, a2):
SCORE += 1
GAME_DISPLAY.blit(EXPLOSION_IMG, a2['rect'])
pygame.display.update(a2['rect'])
# Limit to 60 frames per second
FPS_CLOCK.tick(FPS)
ALIEN2.remove(a2)
# Display the background in-game image
GAME_DISPLAY.blit(BACKGROUND_IMG, (0, 0))
# Draw the player
GAME_DISPLAY.blit(PLAYER_IMG, PLAYER_RECT)
# Check if we need to draw the boss and bullets
if lvl == BOSS_LVL:
# Draw the boss
GAME_DISPLAY.blit(BOSS_IMG, BOSS_RECT)
# Draw each alien
for a1 in ALIEN1:
GAME_DISPLAY.blit(a1['surface'], a1['rect'])
for a2 in ALIEN2:
GAME_DISPLAY.blit(a2['surface'], a2['rect'])
# Draw each bullet
for b in BULLETS:
GAME_DISPLAY.blit(b['surface'], b['rect'])
# Draw each bullet
for b2 in ALIEN_BULLETS:
GAME_DISPLAY.blit(b2['surface'], b2['rect'])
# Draw the score and how many Aliens got past your defenses
drawText('Earth Defense: %s percent' % (EARTH_DEFENSE), SMALL_FONT, GAME_DISPLAY, 290, 20, TEXT_COLOR)
drawText('Aliens eliminated: %s' % (SCORE), SMALL_FONT, GAME_DISPLAY, 625, 20, TEXT_COLOR)
drawText('Level: %s' % (lvl), SMALL_FONT, GAME_DISPLAY, 895, 20, TEXT_COLOR)
drawText('HP: %s' % (PLAYER_HP), SMALL_FONT, GAME_DISPLAY, 1050, 20, TEXT_COLOR)
drawText('_______________________________________________________________________________________________________________________________',
SMALL_FONT, GAME_DISPLAY, 0, 30, TEXT_COLOR)
# update the display
pygame.display.update()
# Check if any of the aliens ran into the player.
if playerCollision(PLAYER_RECT, ALIEN1):
break
if playerCollision(PLAYER_RECT, ALIEN2):
break
if PLAYER_RECT.colliderect(BOSS_RECT):
break
# check if Earth's defense is depleted, resulting in game over
if EARTH_DEFENSE <= 0:
break
FPS_CLOCK.tick(FPS)
# Calculate the level and the alien speed
def calcLvlAndAlienSpeed(SCORE):
# Based on the score, return the level the player is on and
# how many seconds pass until a enemy moves one space.
lvl = int(SCORE / 50) + 1
alien_speed = ALIEN1_SPEED + (lvl * 0.5)
return lvl, alien_speed
# Clean up aliens and bullets
def cleanUp(BULLETS, ALIEN1, ALIEN2):
for i in range(GAME_WINDOW_WIDTH):
for b in BULLETS:
BULLETS.remove(b)
for b2 in ALIEN_BULLETS:
ALIEN_BULLETS.remove(b2)
for a1 in ALIEN1:
ALIEN1.remove(a1)
for a2 in ALIEN2:
ALIEN2.remove(a2)
# Draw text on the screen
def drawText(text, font, surface, width, height, text_color):
txt_obj = font.render(text, True, text_color)
txt_rect = txt_obj.get_rect()
txt_rect.topleft = (width, height)
surface.blit(txt_obj, txt_rect)
# Get user input when not in game
def getLoadingScreenInput():
while True:
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYDOWN:
if event.key == K_ESCAPE: # pressing esc quits
terminate()
if event.key == K_RETURN:
return
# Check to see if the player hit the boss
def hitBoss(BULLETS, BOSS_RECT):
for b in BULLETS:
if b['rect'].colliderect(BOSS_RECT):
BULLETS.remove(b)
return True
return False
# Check to see if the player hit alien1
def hitAlien1(BULLETS, ALIEN1, a1):
for b in BULLETS:
if b['rect'].colliderect(a1['rect']):
BULLETS.remove(b)
return True
return False
# Check to see if the player hit alien2
def hitAlien2(BULLETS, ALIEN2, a2):
for b in BULLETS:
if b['rect'].colliderect(a2['rect']):
BULLETS.remove(b)
return True
return False
# Check if the player was hit by an enemy bullet
def hitPlayer(ALIEN_BULLETS, PLAYER_RECT):
for b2 in ALIEN_BULLETS:
if b2['rect'].colliderect(PLAYER_RECT):
ALIEN_BULLETS.remove(b2)
return True
return False
# Check to see if the player collided into an alien
def playerCollision(PLAYER_RECT, Alien):
for i in Alien:
if PLAYER_RECT.colliderect(i['rect']):
return True
return False
def terminate():
pygame.quit()
sys.exit()
# Call the main function, start up the game
if __name__ == '__main__':
main()
|
# Script for generating .xml files containing information about government coalitions. Based on Wikipedia data and far from complete. Needs manual checking after running!!
import re, string,os
from glob import glob as gb
import pandas as pd
from tqdm import tqdm
from collections import Counter
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import matplotlib
import wikipedia
from bs4 import BeautifulSoup as bs
import requests
from datetime import datetime
import xml.etree.cElementTree as ET
from dateutil.parser import parse
from fuzzywuzzy import process
from functions import *
base_path = "/home/ruben/Documents/GitHub/ParlaMintCase"
wiki_links = {"bg":["Second_Borisov_Government","Gerdzhikov_Government","Third_Borisov_Government"],
"pl":["Cabinet_of_Beata_Szydło","First_Cabinet_of_Mateusz_Morawiecki","Second_Cabinet_of_Mateusz_Morawiecki"],
"sl":["12th_Government_of_Slovenia","13th_Government_of_Slovenia","14th_Government_of_Slovenia"],
"cz":["Bohuslav_Sobotka%27s_Cabinet","Andrej_Babiš%27_First_Cabinet","Andrej_Babiš%27_Second_Cabinet"],
"en":["Second_May_ministry","First_Johnson_ministry","Second_Johnson_ministry"],
"nl":["First_Rutte_cabinet","Second_Rutte_cabinet","Third_Rutte_cabinet"],
"is":["Cabinet_of_Bjarni_Benediktsson_(2017)","Cabinet_of_Katrín_Jakobsdóttir"],
"lt":["Skvernelis_Cabinet","Šimonytė_Cabinet"],
"it":["Gentiloni_Cabinet","Conte_I_Cabinet","Conte_II_Cabinet","Draghi_Cabinet"],
"tr":["Yıldırım_Cabinet","Cabinet_Erdoğan_IV"],
"da":["Lars_Løkke_Rasmussen_III_Cabinet","Frederiksen_Cabinet"],
"hu":["Second_Orbán_Government","Third_Orbán_Government","Fourth_Orbán_Government"],
"fr":["First_Philippe_government","Second_Philippe_government","Castex_government"],
"lv":["Second_Straujuma_cabinet","Kučinskis_cabinet","Kariņš_cabinet"],
"ro":["Dăncilă_Cabinet","First_Orban_Cabinet","Second_Orban_Cabinet","Cîțu_Cabinet"],
"be":["Michel_I_Government","Michel_II_Government","Wilmès_I_Government","Wilmès_II_Government","De_Croo_Government"]}
def parse_url(url):
c = requests.get(url)
return bs(c.content)
def find_party_name(short_url,language):
url = 'https://en.wikipedia.org/'+ short_url
s = parse_url(url)
wikidata_link = [x for x in s.find_all('a') if x.text == "Wikidata item"][0].attrs['href']
wikidata_soup = parse_url(wikidata_link)
try:
party_page = [x for x in wikidata_soup.find_all('span') if x.text == f"{language}wiki"][0].findParent().findParent().find('a').attrs['href']
return parse_url(party_page).find("h1",{"id":"firstHeading"}).text
except Exception as e:
try:
return wikidata_soup.find('li',{"class":"wikibase-entitytermsview-aliases-alias"}).text
except:
return "na"
def wikiparser(language,urls):
# data = data_loader.full(language)
# data = utils.add_metadata(data,language)
# p = list(zip(data['speaker_party'],data['speaker_party_name']))
# up = []
# for x in p:
# name = x[0].split(';')
# abv = x[1].split(';')
# if len(name) > 1:
# for c,n in enumerate(name):
# up.append([n,abv[c]])
# else:
# up.append([name[0],abv[0]])
# up = pd.DataFrame(up,columns=["abv","name"]).groupby(["abv","name"]).sum().reset_index()
root = ET.Element("relations")
for url in urls:
url = 'https://en.wikipedia.org/wiki/'+url
s = parse_url(url)
date_formed = [x for x in s.find_all('th') if x.text == "Date formed"][0].findParent().find('td').text
date_formed = re.sub("[\(\[].*?[\)\]]", "", date_formed)
date_formed = parse(date_formed)
date_formed = date_formed.strftime('%Y-%m-%d')
try:
date_dissolved = [x for x in s.find_all('th') if x.text == "Date dissolved"][0].findParent().find('td').text
date_dissolved = re.sub("[\(\[].*?[\)\]]", "", date_dissolved)
date_dissolved = parse(date_dissolved)
date_dissolved = date_dissolved.strftime('%Y-%m-%d')
except:
date_dissolved = "present"
# Member parties
try:
member_parties = [x for x in s.find_all('th') if x.text == "Member party"]
if len(member_parties) == 0:
member_parties = [x for x in s.find_all('th') if x.text == "Member parties"]
member_parties = member_parties[0].findParent().find('td').find_all('a')
names = [find_party_name(x.attrs['href'],language) for x in member_parties]
member_parties = names#[x.text for x in member_parties]
except Exception as e:
member_parties = []
# Opposition Parties
try:
opp_parties = [x for x in s.find_all('th') if x.text == "Opposition party"]
if len(opp_parties) == 0:
opp_parties = [x for x in s.find_all('th') if x.text == "Opposition parties"]
opp_parties = opp_parties[0].findParent().find('td').find_all('a')
names = [find_party_name(x.attrs['href'],language) for x in opp_parties]
opp_parties = names#[x.text for x in member_parties]
except Exception as e:
opp_parties = []
w = {"start":date_formed,"end":date_dissolved,"member_parties":member_parties,"opposition_parties":opp_parties}
# write xml
period = ET.Element('relation')
period.set("name","coalition")
period.set("mutual"," ".join([f"party.{x}" for x in w['member_parties']]))
period.set("from",w["start"])
period.set("to",w["end"])
period.set("gov_name",s.find('h1').text)
root.append(period)
period = ET.Element('relation')
period.set("name","opposition")
period.set("mutual"," ".join([f"#party.{x}" for x in w['opposition_parties']]))
period.set("from",w["start"])
period.set("to",w["end"])
period.set("gov_name",s.find('h1').text)
root.append(period)
tree = ET.ElementTree(root)
tree.write(f"/home/ruben/Documents/GitHub/ParlaMintCase/resources/coalitions/{language}.xml",encoding="UTF-8")
for k,v in wiki_links.items():
wikiparser(k,v) |
<reponame>jaimecruz21/lifeloopweb<filename>lifeloopweb/decorators.py<gh_stars>0
from functools import wraps
import flask
from lifeloopweb import logging
from lifeloopweb.db import models
LOG = logging.get_logger(__name__)
def can_search_users(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.can_add_group:
flask.flash("You are unauthorized to do this task.",
'danger')
return flask.redirect(flask.url_for('index'))
return f(*args, **kwargs)
return decorated_function
return decorator
def can_add_group(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.can_add_group:
flask.flash("You are unauthorized to do this task.",
'danger')
return flask.redirect(flask.url_for('index'))
return f(*args, **kwargs)
return decorated_function
return decorator
def is_group_member(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
group_id = kwargs.get('group_id')
g = models.Group.get(group_id)
if not g:
flask.flash('Group not found, please try again.', 'danger')
return flask.redirect(flask.url_for('index'))
if not current_user.is_group_member(group_id):
flask.flash("You are unauthorized to do this task.",
'danger')
return flask.redirect(flask.url_for(
'groups.show', group_id=group_id))
return f(*args, **kwargs)
return decorated_function
return decorator
def can_edit_group(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
group_id = kwargs.get('group_id')
g = models.Group.get(group_id)
if not g:
flask.flash('Group not found, please try again.', 'danger')
return flask.redirect(flask.url_for('index'))
if not current_user.can_edit_group(group_id):
flask.flash("You are unauthorized to do this task.",
'danger')
return flask.redirect(
flask.url_for('groups.show', group_id=group_id))
return f(*args, **kwargs)
return decorated_function
return decorator
def can_change_group_members_role(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
group_id = kwargs.get('group_id')
group = models.Group.get(group_id)
if not group:
flask.flash('Group not found, please try again.', 'danger')
return flask.redirect(flask.url_for('index'))
if not current_user.can_change_group_members_role(group):
flask.flash(
"You are unauthorized to do this task.", 'danger')
return flask.redirect(
flask.url_for('groups.edit', group_id=group_id))
return f(*args, **kwargs)
return decorated_function
return decorator
def can_edit_group_api(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
group_id = kwargs.get('group_id')
g = models.Group.get(group_id)
if not g:
return '', 404
if not current_user.can_edit_group(group_id):
return '', 403
return f(*args, **kwargs)
return decorated_function
return decorator
def can_edit_org(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
org_id = kwargs.get('org_id')
o = models.Organization.get(org_id)
if not o or not o.is_active:
flask.flash('Organization not found, please try again.',
'danger')
return flask.redirect(flask.url_for('index'))
if not current_user.can_edit_org(org_id):
flask.flash("You are unauthorized to do this task.",
'danger')
return flask.redirect(
flask.url_for('orgs.show', org_id=org_id))
return f(*args, **kwargs)
return decorated_function
return decorator
def can_edit_org_api(current_user):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
org_id = kwargs.get('org_id')
o = models.Organization.get(org_id)
if not o or not o.is_active:
return '', 404
if not current_user.can_edit_org(org_id):
return '', 403
return f(*args, **kwargs)
return decorated_function
return decorator
|
from qaoa.util.finite_difference import check_derivative
import numpy
def check_gradient(obj,theta,dtheta,h,order=1):
"""
Evaluate the discrepency of a circuit objective's first
directional derivative using finite differences
Using an m-point finite difference approximation with offsets d1,..,dm
and weights w1,...,wm, the directional derivative is approximated by
.. math:
f_{\\h\\theta}(\theta) = h^{1-m} \sum\limits_{k=1}^m w_k f(\\theta+d_k h\\h\\theta) + O(h^{m-1}) = \\bar f_{\\h\\theta}(\theta) + r(h)
For the gradient to correctly correspond to the objective value, the
residual norm must decrease at the appropriate power for at least some
range of step sizes. The error will ultimately increase at small step sizes
due to numerical round-off.
Parameters
----------
obj : qaoa.circuit.QuantumCircuit
A quantum circuit objective function that provides value
and gradient methods
theta : numpy.ndarray
A vector of control angles corresponding to each of the circuit
stages
dtheta : numpy.ndarray
A vector of differential changes to control angles
h : real scalar, array-like, or iterable
Finite difference step size(s) to use in approximating the
first directional derivative
order : unsigned int
Finite difference approximation order.
Returns
-------
rnorm : same type as h
Residual norm
Example
-------
>>> obj = qaoa.circuit.load_maxcut()
>>> theta = numpy.random.randn(obj.num_stages)
>>> dtheta = numpy.random.randn(obj.num_stages)
>>> nsteps = 10
>>> h = [(10)**(-k) for k in range(nsteps)]
>>> res = check_gradient(obj,theta,dtheta,h)
>>> for k,r in enumerate(res):
... print(h[k],r)
1 7.404420469731904
0.1 0.487594551752089
0.01 0.07499866545689926
0.001 0.008702780633479179
0.0001 0.0008822235938517053
1e-05 8.834160148474268e-05
1e-06 8.833390358731208e-06
1e-07 8.714259376318978e-07
1e-08 6.762446780328446e-08
1e-09 1.2888697948909567e-06
"""
theta_old = numpy.copy(obj.get_control())
dtheta_old = numpy.copy(obj.get_differential_control())
rnorm = check_derivative(obj.value,\
dtheta.dot(obj.gradient(theta)), \
theta,dtheta,h,order)
obj.set_control(theta_old)
obj.set_differential_control(dtheta_old)
return rnorm
def check_hess_vec(obj,theta,dtheta,h,order=1):
theta_old = numpy.copy(obj.get_control())
dtheta_old = numpy.copy(obj.get_differential_control())
rnorm = check_derivative(obj.gradient,\
obj.hess_vec(theta,dtheta), \
theta,dtheta,h,order)
obj.set_control(theta_old)
obj.set_differential_control(dtheta_old)
return rnorm
def check_dpsi(obj,theta,dtheta,h,order=1):
theta_old = numpy.copy(obj.get_control())
dtheta_old = numpy.copy(obj.get_differential_control())
obj.set_control(theta)
obj.set_differential_control(dtheta)
obj.stage[-2].dpsi()
dpsi = numpy.copy(obj.dpsi)
def psi(x):
obj.set_control(x)
obj.stage[-2].psi()
return numpy.copy(obj.psi)
rnorm = check_derivative(psi,dpsi,theta,dtheta,h,order)
obj.set_control(theta_old)
obj.set_differential_control(dtheta_old)
return rnorm
def check_dlam(obj,theta,dtheta,h,order=1):
theta_old = numpy.copy(obj.get_control())
dtheta_old = numpy.copy(obj.get_differential_control())
obj.set_control(theta)
obj.set_differential_control(dtheta)
obj.stage[1].dlam()
dlam = numpy.copy(obj.dlam)
def lam(x):
obj.set_control(x)
obj.stage[1].lam()
return numpy.copy(obj.lam)
rnorm = check_derivative(lam,dlam,theta,dtheta,h,order)
obj.set_control(theta_old)
obj.set_differential_control(dtheta_old)
return rnorm
|
from uuid import uuid4
from flask_cors import CORS
import requests
from flask import Flask, jsonify, request
import json
import datetime
from blockchain import Blockchain
# Instantiate the Node
app = Flask(__name__)
CORS(app)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
typeUser = 1
userName = "A"
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
# /register - register new product
# {
# param: upc
# param: manufacturer
# param: item_no
# param: new_owner
# }
@app.route('/register', methods=['POST'])
def register_product():
#print("helloooo",typeUser)
if typeUser ==1:
last_block = blockchain.last_block
proof = blockchain.proof_of_work(last_block)
values = request.get_json()
required = ['upc', 'item_no', 'new_owner']
if not all(k in values for k in required):
return 'Missing values', 400
#catch info into product
c_product ={'upc': values['upc'],
'manufacturer': node_identifier,
'item_no': values['item_no']}
#TODO: call validate
exist_block = blockchain.identifyProduct(blockchain.chain ,c_product)
if exist_block == False:
blockchain.new_product(upc=values['upc'], manufacturer= node_identifier, item_no=values['item_no'])
blockchain.new_owner(owner_history=[], owner=values['new_owner'])
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
'message': "New Block Forged",
'index': block['index'],
'product': block['product'],
'owner_history': block['owner_history'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 201
else:
return jsonify({"message": "this Item can not mine twince",
"upc": values['upc'],
"item_no": values['item_no']}), 400
else:
if typeUser == 2:
return jsonify({"message": "you are not authorized to make new product",
"UserName": userName,
"UserID":node_identifier,
"UserType": "Agency"}), 400
else:
return jsonify({"message": "Unknown User type, you are not authorized to make new product",
"UserName": userName,
"UserID":node_identifier,
"UserType": typeUser}), 400
# /transaction - create new transaction
# {
# param: upc
# param: manufacturer
# param: item_no
# param: current_owner
# param: new_owner
# }
@app.route('/transaction', methods=['POST'])
def transaction():
#update the list
replaced = blockchain.resolve_conflicts()
if replaced:
print("BlockChain updated")
#do transation
last_block = blockchain.last_block
proof = blockchain.proof_of_work(last_block)
# validate request parameters
values = request.get_json()
required = ['upc', 'manufacturer', 'item_no', 'current_owner', 'new_owner']
if not all(k in values for k in required):
return 'Missing values', 400
if values['current_owner']==values['new_owner']:
return 'You own this product, no need to do transfer', 400
check_product ={'upc':values['upc'],
'manufacturer': values['manufacturer'],
'item_no':values['item_no']}
# call validate
verified_block = blockchain.valid_trans(blockchain,check_product,values['current_owner'])
if verified_block:
#print("right here",verified_block["owner_history"])
blockchain.new_product(upc=values['upc'], manufacturer=values['manufacturer'], item_no=values['item_no'])
blockchain.new_owner(owner_history = list(verified_block["owner_history"]),owner=values['new_owner'])
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
'message': "New Block Forged",
'index': block['index'],
'product': block['product'],
'owner_history': block['owner_history'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
else:
response = {'message': f'Transaction cannot be added to chain invalide verify'}
return jsonify(response), 201
# {
# param: upc
# param: item_no
# param: current_owner
# }
@app.route('/validate/<string:cur_upc>/<string:manufacturer>/<int:cur_item_no>/<string:cur_owner>', methods=['GET'])
def validate(cur_upc,manufacturer,cur_item_no,cur_owner):
#TODO: validation; iterate through blockchain; verify by upc, item_no, current_owner for most recent
check_product ={'upc':cur_upc,
'manufacturer': manufacturer,
'item_no':cur_item_no}
result = blockchain.valid_trans(blockchain,check_product,cur_owner)
if result:
response = {
'message': 'validated successfully',
'chain': result,
'length': len(blockchain.chain),
}
return jsonify(response), 200
else:
response = {
'message': 'invalid',
}
return jsonify(response), 400
@app.route('/nodes/register', methods=['POST'])
def register_nodes():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
if str(userName) in node:
print("Adding yourself is skipped node Address: 127.0.0.1:",userName)
else:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
'ID':node_identifier
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
parser.add_argument('-t', '--type', default=1, type=int, help='type of user')
args = parser.parse_args()
port = args.port
typeUser = args.type
userName = port
app.run(host='0.0.0.0', port=port)
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: direct.controls.GravityWalker
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase import DirectObject
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase.InputStateGlobal import inputState
from direct.task.Task import Task
from pandac.PandaModules import *
import math
class GravityWalker(DirectObject.DirectObject):
notify = directNotify.newCategory('GravityWalker')
wantDebugIndicator = base.config.GetBool('want-avatar-physics-indicator', 0)
wantFloorSphere = base.config.GetBool('want-floor-sphere', 0)
earlyEventSphere = base.config.GetBool('early-event-sphere', 0)
DiagonalFactor = math.sqrt(2.0) / 2.0
def __init__(self, gravity=64.348, standableGround=0.707, hardLandingForce=16.0, legacyLifter=False):
DirectObject.DirectObject.__init__(self)
self.__gravity = gravity
self.__standableGround = standableGround
self.__hardLandingForce = hardLandingForce
self._legacyLifter = legacyLifter
self.mayJump = 1
self.jumpDelayTask = None
self.controlsTask = None
self.indicatorTask = None
self.falling = 0
self.needToDeltaPos = 0
self.physVelocityIndicator = None
self.avatarControlForwardSpeed = 0
self.avatarControlJumpForce = 0
self.avatarControlReverseSpeed = 0
self.avatarControlRotateSpeed = 0
self.getAirborneHeight = None
self.priorParent = Vec3(0)
self.__oldPosDelta = Vec3(0)
self.__oldDt = 0
self.moving = 0
self.speed = 0.0
self.rotationSpeed = 0.0
self.slideSpeed = 0.0
self.vel = Vec3(0.0)
self.collisionsActive = 0
self.isAirborne = 0
self.highMark = 0
return
def setWalkSpeed(self, forward, jump, reverse, rotate):
self.avatarControlForwardSpeed = forward
self.avatarControlJumpForce = jump
self.avatarControlReverseSpeed = reverse
self.avatarControlRotateSpeed = rotate
def getSpeeds(self):
return (
self.speed, self.rotationSpeed, self.slideSpeed)
def getIsAirborne(self):
return self.isAirborne
def setAvatar(self, avatar):
self.avatar = avatar
if avatar is not None:
pass
return
def setupRay(self, bitmask, floorOffset, reach):
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('GW.cRayNode')
cRayNode.addSolid(self.cRay)
self.cRayNodePath = self.avatarNodePath.attachNewNode(cRayNode)
cRayNode.setFromCollideMask(bitmask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
self.lifter = CollisionHandlerGravity()
self.lifter.setLegacyMode(self._legacyLifter)
self.lifter.setGravity(self.__gravity)
self.lifter.addInPattern('enter%in')
self.lifter.addAgainPattern('again%in')
self.lifter.addOutPattern('exit%in')
self.lifter.setOffset(floorOffset)
self.lifter.setReach(reach)
self.lifter.addCollider(self.cRayNodePath, self.avatarNodePath)
def setupWallSphere(self, bitmask, avatarRadius):
self.avatarRadius = avatarRadius
cSphere = CollisionSphere(0.0, 0.0, avatarRadius, avatarRadius)
cSphereNode = CollisionNode('GW.cWallSphereNode')
cSphereNode.addSolid(cSphere)
cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(bitmask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
if config.GetBool('want-fluid-pusher', 0):
self.pusher = CollisionHandlerFluidPusher()
else:
self.pusher = CollisionHandlerPusher()
self.pusher.addCollider(cSphereNodePath, self.avatarNodePath)
self.cWallSphereNodePath = cSphereNodePath
def setupEventSphere(self, bitmask, avatarRadius):
self.avatarRadius = avatarRadius
cSphere = CollisionSphere(0.0, 0.0, avatarRadius - 0.1, avatarRadius * 1.04)
cSphere.setTangible(0)
cSphereNode = CollisionNode('GW.cEventSphereNode')
cSphereNode.addSolid(cSphere)
cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(bitmask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
self.event = CollisionHandlerEvent()
self.event.addInPattern('enter%in')
self.event.addOutPattern('exit%in')
self.cEventSphereNodePath = cSphereNodePath
def setupFloorSphere(self, bitmask, avatarRadius):
self.avatarRadius = avatarRadius
cSphere = CollisionSphere(0.0, 0.0, avatarRadius, 0.01)
cSphereNode = CollisionNode('GW.cFloorSphereNode')
cSphereNode.addSolid(cSphere)
cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(bitmask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
self.pusherFloorhandler = CollisionHandlerPusher()
self.pusherFloor.addCollider(cSphereNodePath, self.avatarNodePath)
self.cFloorSphereNodePath = cSphereNodePath
def setWallBitMask(self, bitMask):
self.wallBitmask = bitMask
def setFloorBitMask(self, bitMask):
self.floorBitmask = bitMask
def swapFloorBitMask(self, oldMask, newMask):
self.floorBitmask = self.floorBitmask & ~oldMask
self.floorBitmask |= newMask
if self.cRayNodePath and not self.cRayNodePath.isEmpty():
self.cRayNodePath.node().setFromCollideMask(self.floorBitmask)
def setGravity(self, gravity):
self.__gravity = gravity
self.lifter.setGravity(self.__gravity)
def getGravity(self, gravity):
return self.__gravity
def initializeCollisions(self, collisionTraverser, avatarNodePath, avatarRadius=1.4, floorOffset=1.0, reach=1.0):
self.avatarNodePath = avatarNodePath
self.cTrav = collisionTraverser
self.setupRay(self.floorBitmask, floorOffset, reach)
self.setupWallSphere(self.wallBitmask, avatarRadius)
self.setupEventSphere(self.wallBitmask, avatarRadius)
if self.wantFloorSphere:
self.setupFloorSphere(self.floorBitmask, avatarRadius)
self.setCollisionsActive(1)
def setTag(self, key, value):
self.cEventSphereNodePath.setTag(key, value)
def setAirborneHeightFunc(self, unused_parameter):
self.getAirborneHeight = self.lifter.getAirborneHeight
def getAirborneHeight(self):
self.lifter.getAirborneHeight()
def setAvatarPhysicsIndicator(self, indicator):
self.cWallSphereNodePath.show()
def deleteCollisions(self):
del self.cTrav
self.cWallSphereNodePath.removeNode()
del self.cWallSphereNodePath
if self.wantFloorSphere:
self.cFloorSphereNodePath.removeNode()
del self.cFloorSphereNodePath
del self.pusher
del self.event
del self.lifter
del self.getAirborneHeight
def setCollisionsActive(self, active=1):
if self.collisionsActive != active:
self.collisionsActive = active
self.oneTimeCollide()
base.initShadowTrav()
if active:
self.avatarNodePath.setP(0.0)
self.avatarNodePath.setR(0.0)
self.cTrav.addCollider(self.cWallSphereNodePath, self.pusher)
if self.wantFloorSphere:
self.cTrav.addCollider(self.cFloorSphereNodePath, self.pusherFloor)
base.shadowTrav.addCollider(self.cRayNodePath, self.lifter)
if self.earlyEventSphere:
self.cTrav.addCollider(self.cEventSphereNodePath, self.event)
else:
base.shadowTrav.addCollider(self.cEventSphereNodePath, self.event)
else:
if hasattr(self, 'cTrav'):
self.cTrav.removeCollider(self.cWallSphereNodePath)
if self.wantFloorSphere:
self.cTrav.removeCollider(self.cFloorSphereNodePath)
self.cTrav.removeCollider(self.cEventSphereNodePath)
base.shadowTrav.removeCollider(self.cEventSphereNodePath)
base.shadowTrav.removeCollider(self.cRayNodePath)
def getCollisionsActive(self):
return self.collisionsActive
def placeOnFloor(self):
self.oneTimeCollide()
self.avatarNodePath.setZ(self.avatarNodePath.getZ() - self.lifter.getAirborneHeight())
def oneTimeCollide(self):
if not hasattr(self, 'cWallSphereNodePath'):
return
self.isAirborne = 0
self.mayJump = 1
tempCTrav = CollisionTraverser('oneTimeCollide')
tempCTrav.addCollider(self.cWallSphereNodePath, self.pusher)
if self.wantFloorSphere:
tempCTrav.addCollider(self.cFloorSphereNodePath, self.event)
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
tempCTrav.traverse(render)
def setMayJump(self, task):
self.mayJump = 1
return Task.done
def startJumpDelay(self, delay):
if self.jumpDelayTask:
self.jumpDelayTask.remove()
self.mayJump = 0
self.jumpDelayTask = taskMgr.doMethodLater(delay, self.setMayJump, 'jumpDelay-%s' % id(self))
def addBlastForce(self, vector):
self.lifter.addVelocity(vector.length())
def displayDebugInfo(self):
onScreenDebug.add('w controls', 'GravityWalker')
onScreenDebug.add('w airborneHeight', self.lifter.getAirborneHeight())
onScreenDebug.add('w falling', self.falling)
onScreenDebug.add('w isOnGround', self.lifter.isOnGround())
onScreenDebug.add('w contact normal', self.lifter.getContactNormal().pPrintValues())
onScreenDebug.add('w mayJump', self.mayJump)
onScreenDebug.add('w impact', self.lifter.getImpactVelocity())
onScreenDebug.add('w velocity', self.lifter.getVelocity())
onScreenDebug.add('w isAirborne', self.isAirborne)
onScreenDebug.add('w hasContact', self.lifter.hasContact())
def handleAvatarControls(self, task):
run = inputState.isSet('run')
forward = inputState.isSet('forward')
reverse = inputState.isSet('reverse')
turnLeft = inputState.isSet('turnLeft')
turnRight = inputState.isSet('turnRight')
slideLeft = inputState.isSet('slideLeft')
slideRight = inputState.isSet('slideRight')
jump = inputState.isSet('jump')
self.speed = forward and self.avatarControlForwardSpeed or reverse and -self.avatarControlReverseSpeed
self.slideSpeed = reverse and slideLeft and -self.avatarControlReverseSpeed * 0.75 or reverse and slideRight and self.avatarControlReverseSpeed * 0.75 or slideLeft and -self.avatarControlForwardSpeed * 0.75 or slideRight and self.avatarControlForwardSpeed * 0.75
self.rotationSpeed = not (slideLeft or slideRight) and (turnLeft and self.avatarControlRotateSpeed or turnRight and -self.avatarControlRotateSpeed)
if self.speed and self.slideSpeed:
self.speed *= GravityWalker.DiagonalFactor
self.slideSpeed *= GravityWalker.DiagonalFactor
debugRunning = inputState.isSet('debugRunning')
if debugRunning:
self.speed *= base.debugRunningMultiplier
self.slideSpeed *= base.debugRunningMultiplier
self.rotationSpeed *= 1.25
if self.needToDeltaPos:
self.setPriorParentVector()
self.needToDeltaPos = 0
if self.wantDebugIndicator:
self.displayDebugInfo()
if self.lifter.isOnGround():
if self.isAirborne:
self.isAirborne = 0
impact = self.lifter.getImpactVelocity()
if impact < -30.0:
messenger.send('jumpHardLand')
self.startJumpDelay(0.3)
else:
messenger.send('jumpLand')
if impact < -5.0:
self.startJumpDelay(0.2)
self.priorParent = Vec3.zero()
if jump and self.mayJump:
self.lifter.addVelocity(self.avatarControlJumpForce)
messenger.send('jumpStart')
self.isAirborne = 1
else:
if self.isAirborne == 0:
pass
self.isAirborne = 1
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
self.__oldDt = ClockObject.getGlobalClock().getDt()
dt = self.__oldDt
self.moving = self.speed or self.slideSpeed or self.rotationSpeed or self.priorParent != Vec3.zero()
if self.moving:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
if distance or slideDistance or self.priorParent != Vec3.zero():
rotMat = Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
if self.isAirborne:
forward = Vec3.forward()
else:
contact = self.lifter.getContactNormal()
forward = contact.cross(Vec3.right())
forward.normalize()
self.vel = Vec3(forward * distance)
if slideDistance:
if self.isAirborne:
right = Vec3.right()
else:
right = forward.cross(contact)
right.normalize()
self.vel = Vec3(self.vel + right * slideDistance)
self.vel = Vec3(rotMat.xform(self.vel))
step = self.vel + self.priorParent * dt
self.avatarNodePath.setFluidPos(Point3(self.avatarNodePath.getPos() + step))
self.avatarNodePath.setH(self.avatarNodePath.getH() + rotation)
else:
self.vel.set(0.0, 0.0, 0.0)
if self.moving or jump:
messenger.send('avatarMoving')
return Task.cont
def doDeltaPos(self):
self.needToDeltaPos = 1
def setPriorParentVector(self):
if self.__oldDt == 0:
velocity = 0
else:
velocity = self.__oldPosDelta * (1.0 / self.__oldDt)
self.priorParent = Vec3(velocity)
def reset(self):
self.lifter.setVelocity(0.0)
self.priorParent = Vec3.zero()
def getVelocity(self):
return self.vel
def enableAvatarControls(self):
if self.controlsTask:
self.controlsTask.remove()
taskName = 'AvatarControls-%s' % (id(self),)
self.controlsTask = taskMgr.add(self.handleAvatarControls, taskName, 25)
self.isAirborne = 0
self.mayJump = 1
if self.physVelocityIndicator:
if self.indicatorTask:
self.indicatorTask.remove()
self.indicatorTask = taskMgr.add(self.avatarPhysicsIndicator, 'AvatarControlsIndicator-%s' % (id(self),), 35)
def disableAvatarControls(self):
if self.controlsTask:
self.controlsTask.remove()
self.controlsTask = None
if self.indicatorTask:
self.indicatorTask.remove()
self.indicatorTask = None
if self.jumpDelayTask:
self.jumpDelayTask.remove()
self.jumpDelayTask = None
return
def flushEventHandlers(self):
if hasattr(self, 'cTrav'):
self.pusher.flush()
if self.wantFloorSphere:
self.floorPusher.flush()
self.event.flush()
self.lifter.flush()
def setCollisionRayHeight(self, height):
self.cRay.setOrigin(0.0, 0.0, height) |
<reponame>alpv95/MemeProject
import os
import numpy as np
from PIL import Image
import tensorflow as tf
from alexnet import AlexNet
from random import shuffle
#mean of imagenet dataset in BGR
imagenet_mean = np.array([104., 117., 124.], dtype=np.float32)
current_dir = os.getcwd()
image_dir = os.path.join(current_dir, 'memes')
#image_dir = current_dir
#placeholder for input and dropout rate
x = tf.placeholder(tf.float32, [1, 227, 227, 3])
keep_prob = tf.placeholder(tf.float32)
#create model with default config ( == no skip_layer and 1000 units in the last layer)
model = AlexNet(x, keep_prob, 1000,[],['fc7','fc8'],512) #maybe need to put fc8 in skip_layers
#define activation of last layer as score
score = model.fc6
img_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('jpg')]
with open('captions.txt','r') as f:
captions = f.readlines()
captions = list(set(captions))
captions = [s.lower() for s in captions]
data_memes = []
data_captions = []
#Doing everything in one script: (the fc6 vectors are quite sparse), will have to change this up to not get repeats
with tf.Session() as sess:
# Initialize all variables
sess.run(tf.global_variables_initializer())
# Load the pretrained weights into the model
model.load_initial_weights(sess)
for i,meme in enumerate(img_files):
meme_name = meme.replace('/Users/ALP/Desktop/Stanford/CS224n/MemeProject/memes/','')
meme_name = meme_name.replace('.jpg','').lower()
meme_name = meme_name.replace('-',' ')
img = Image.open(meme)
try:
img.thumbnail((227, 227), Image.ANTIALIAS)
#img = img.resize((227,227))
#use img.thumbnail for square images, img.resize for non square
assert np.shape(img) == (227, 227, 3)
except AssertionError:
img = img.resize((227,227))
print('sizing error')
# Subtract the ImageNet mean
img = img - imagenet_mean #should probably change this
# Reshape as needed to feed into model
img = img.reshape((1,227,227,3))
meme_vector = sess.run(score, feed_dict={x: img, keep_prob: 1}) #[1,4096]
meme_vector = np.reshape(meme_vector,[4096])
assert np.shape(meme_vector) == (4096,)
match = [s.split('-',1)[-1].lstrip() for s in captions if meme_name in s]
#now save in tfrecords format, or prepare for that action
meme_vectors = [meme_vector for cap in match]
assert len(meme_vectors) == len(match)
data_memes.extend(meme_vectors)
data_captions.extend(match)
if i % 100 == 0:
print(i,len(data_memes),len(data_captions))
#deleting bad examples from data
deleters = []
for i,ting in enumerate(data_captions):
if ting == '':
deleters.append(i)
for i,ting in enumerate(deleters):
del data_captions[ting-i]
del data_memes[ting-i]
#splitting into list of lists of words
import re
word_captions = []
for capt in data_captions:
words = re.findall(r'[\w]+|[.,!?;><(){}%$#£@-_+=|\/~`^&*]', capt)
word_captions.append(words)
#create Vocabulary
from collections import Counter
print("Creating vocabulary.")
counter = Counter()
for c in word_captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= 3]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
#unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
#LOAD PRE TRAINED GLOVE VECTORS and get tokenizer
EMBEDDING_DIMENSION=300 # Available dimensions for 6B data is 50, 100, 200, 300
data_directory = '~/Desktop/Stanford/CS224n/MemeProject'
PAD_TOKEN = 0
word2idx = { 'PAD': PAD_TOKEN } # dict so we can lookup indices for tokenising our text later from string to sequence of integers
weights = []
index_counter = 0
with open('glove.42B.300d.txt','r') as file:
for index, line in enumerate(file):
values = line.split() # Word and weights separated by space
word = values[0] # Word is first symbol on each line
if word in vocab_dict:
index_counter += 1
word_weights = np.asarray(values[1:], dtype=np.float32) # Remainder of line is weights for word
word2idx[word] = index_counter # PAD is our zeroth index so shift by one
weights.append(word_weights)
if index % 20000 == 0:
print(index)
if index + 1 == 1500000:
# Limit vocabulary to top 40k terms
break
EMBEDDING_DIMENSION = len(weights[0])
# Insert the PAD weights at index 0 now we know the embedding dimension
weights.insert(0, np.random.randn(EMBEDDING_DIMENSION))
# Append unknown and pad to end of vocab and initialize as random #maybe include start and end token here
UNKNOWN_TOKEN=len(weights)
word2idx['UNK'] = UNKNOWN_TOKEN
word2idx['<S>'] = UNKNOWN_TOKEN + 1
word2idx['</S>'] = UNKNOWN_TOKEN + 2
weights.append(np.random.randn(EMBEDDING_DIMENSION))
weights.append(np.random.randn(EMBEDDING_DIMENSION))
weights.append(np.random.randn(EMBEDDING_DIMENSION))
# Construct our final vocab
weights = np.asarray(weights, dtype=np.float32)
VOCAB_SIZE=weights.shape[0]
#Save Vocabulary
with tf.gfile.FastGFile('vocab.txt', "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word2idx.iteritems()]))
print("Wrote vocabulary file:", 'vocab.txt')
#save embedding matrix
#np.savetxt('embedding_matrix2',weights)
'''
#Tokenize all the captions
import re
token_captions = []
for capt in data_captions:
token_caption = []
token_caption.append(word2idx['<S>'])
words = re.findall(r"[\w']+|[.,!?;'><(){}%$#£@-_+=|\/~`^&*]", capt)
for word in words:
try:
token = word2idx[word]
except KeyError:
token = word2idx['UNK']
token_caption.append(token)
token_caption.append(word2idx['</S>'])
token_captions.append(token_caption)
#potentially another filering step
deleters = []
for i,ting in enumerate(token_captions):
if len(ting) == 2:
deleters.append(i)
for i,ting in enumerate(deleters):
del data_captions[ting-i]
del data_memes[ting-i]
del token_captions[ting-i]
#shuffle data
c = list(zip(data_memes, token_captions))
shuffle(c)
memes_shuffled, captions_shuffled = zip(*c)
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
#Tranform meme embeddings into integers for easy conversion to tfrecords file
memes_shuffled_int = []
for i,meme in enumerate(memes_shuffled):
memes_shuffled_int.append(np.int_(meme*1000000000))
print(memes_shuffled_int[0][:100])
#write tfrecords file as joint sequence of images embeddings and captions
import sys
train_filename = 'train.tfrecords4' # address to save the TFRecords file
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(train_filename)
for i in range(len(memes_shuffled_int)):
if not i % 20000:
print 'Train data: {}/{}'.format(i, len(memes_shuffled_int))
sys.stdout.flush()
context = tf.train.Features(feature={
"train/meme": _bytes_feature(memes_shuffled_int[i].tostring()), #this is the part that needs to be a float save
})
feature_lists = tf.train.FeatureLists(feature_list={
"train/captions": _int64_feature_list(captions_shuffled[i])
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
writer.write(sequence_example.SerializeToString())
writer.close()
sys.stdout.flush()
''' |
import os
import json
import joblib
import matplotlib.pyplot as plt
import numpy as np
import gym
import torch
from .data_structures.Config import Config
from environments import CPS_env
from environments.Open_AI_Wrappers import TimeLimit
def init_config(args):
config = Config()
config.seed = args.seed
if args.task == 'cps':
env = CPS_env(stage=args.stage, noise_sigma=args.noise_sigma, obs_noise_sigma=args.obs_noise_sigma,
init_nonoptimal=args.init_nonoptimal,action_scale=args.action_scale,obs_step=args.obs_step,
obs_signal=args.obs_signal,normalized_action=args.normalized,normalized_observation=args.normalized,
max_pzm=args.max_pzm,seed=args.seed,reward_threshold=args.reward_threshold,
spgd=args.spgd, perturb_scale=args.perturb_scale)
env = TimeLimit(env, max_episode_steps=args.max_steps)
else:
env = gym.make(args.task)
config.environment = env
config.num_episodes_to_run = args.episodes
config.file_to_save_data_results = os.path.join(args.save_path, 'data_results.pkl')
config.file_to_save_results_graph = os.path.join(args.save_path, 'results_graph.png')
config.show_solution_score = False # args.render>0
config.visualise_individual_results = False# args.render>0
config.visualise_overall_agent_results = args.visualize
config.test_render = args.test_render
config.save_per_episode = args.save_per_episode
config.standard_deviation_results = 1.0
config.runs_per_agent = args.runs_per_agent
config.use_GPU = torch.cuda.is_available()
config.overwrite_existing_results_file = False
config.randomise_random_seed = True
config.save_model = True
config.hyperparameters = {
"Actor_Critic_Agents": {
"Actor": {
"learning_rate": args.actor_lr,
"linear_hidden_units": args.hidden_sizes,
"final_layer_activation": "TANH",#None,
"batch_norm": False,
"tau": args.tau,
"gradient_clipping_norm": 5,
"initialiser": "Xavier",
#"y_range":(-1,1), # ()if args.agent=='sac': () else:
},
"Critic": {
"learning_rate": args.critic_lr,
"linear_hidden_units": args.hidden_sizes,
"final_layer_activation": None,
"batch_norm": False,
"buffer_size": args.buffer_size,
"tau": args.tau,
"gradient_clipping_norm": 5,
"initialiser": "Xavier"
},
"min_steps_before_learning": 1000, # for SAC only
"batch_size": args.batch_size,
"discount_rate": args.discount_rate,
"mu": 0.0, # for O-H noise
"theta": args.theta, # for O-H noise
"sigma": args.sigma, # for O-H noise
"action_noise_std": args.action_noise, # for TD3
"action_noise_clipping_range": args.noise_clip, # for TD3
"update_every_n_steps":args.update_actor_freq,
"learning_updates_per_learning_session": args.update_per_session,
"automatically_tune_entropy_hyperparameter": True,
"entropy_term_weight": None,
"add_extra_noise": True,
"do_evaluation_iterations": True,
"clip_rewards": False,
"HER_sample_proportion": 0.8,
"exploration_worker_difference": 1.0,
"save_path":args.save_path,
"spgd_factor":args.spgd_factor,
"spgd_begin": args.spgd_begin,
"spgd_warmup": args.spgd_warmup,
"spgd_lr": args.spgd_lr,
"spgd_momentum": args.spgd_momentum,
}
}
return config
def init_trial_path(logdir):
"""Initialize the path for a hyperparameter setting
"""
os.makedirs(logdir, exist_ok=True)
trial_id = 0
path_exists = True
path_to_results = logdir + '/{:d}'.format(trial_id)
while path_exists:
trial_id += 1
path_to_results = logdir + '/{:d}'.format(trial_id)
path_exists = os.path.exists(path_to_results)
save_path = path_to_results
os.makedirs(save_path, exist_ok=True)
return save_path
def subplot(R, P, Q, S,save_name=''):
joblib.dump([R, P, Q, S],save_name.replace('png','pkl'))
r = list(zip(*R))
p = list(zip(*P))
q = list(zip(*Q))
s = list(zip(*S))
plt.figure()
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
ax[0, 0].plot(list(r[1]), list(r[0]), 'r') # row=0, col=0
ax[1, 0].plot(list(p[1]), list(p[0]), 'b') # row=1, col=0
ax[0, 1].plot(list(q[1]), list(q[0]), 'g') # row=0, col=1
ax[1, 1].plot(list(s[1]), list(s[0]), 'k') # row=1, col=1
ax[0, 0].title.set_text('Reward')
ax[1, 0].title.set_text('Policy loss')
ax[0, 1].title.set_text('Q loss')
ax[1, 1].title.set_text('Max steps')
plt.savefig(save_name)
plt.show()
def plot_pulse(pulses=[],save_dir='', name=''):
plt.figure()
T_ps = pulses[-1]['T_ps']
max_power = max(pulses[-1]['I'])
for ii in range(len(pulses)):
AT = pulses[ii]['I']/max_power
plt.plot(T_ps, AT,label='Pulse_trains_'+str(ii))
plt.legend()
plt.title('Pulses')
plt.xlabel('T_ps')
plt.ylabel('A.U.')
plt.title(name)
#plt.ylim((0,1))
if len(save_dir)>0 and len(name)>0:
plt.savefig(save_dir+name+'pulse.png')
plt.show()
plt.close() |
#!/usr/bin/env python3
"""Unit tests for spawneditor."""
import itertools
import os
import pathlib
import tempfile
import typing
import unittest
import unittest.mock
import spawneditor
class FakePosixPath(pathlib.PurePosixPath):
"""
Fake version of `pathlib.PosixPath` that can run on non-POSIX systems.
"""
def exists(self) -> bool:
"""
Implementation of `pathlib.PosixPath.exists` that ignores the existence
of `/usr/bin/editor`.
"""
return os.path.abspath(self) != "/usr/bin/editor"
def expect_edit_file(file_path: str,
*,
line_number: typing.Optional[int],
environment: typing.Dict[str, str],
expected_command: typing.Iterable[str],
editor: typing.Optional[str] = None,
os_name: str = "posix") -> None:
"""
Verifies the behavior of `spawneditor.edit_file`, setting up necessary
mocks.
"""
with unittest.mock.patch("os.environ", environment), \
unittest.mock.patch("os.name", os_name), \
unittest.mock.patch("pathlib.Path", FakePosixPath), \
unittest.mock.patch("subprocess.run") as mock_run:
spawneditor.edit_file(file_path,
line_number=line_number,
editor=editor)
mock_run.assert_called_once_with(expected_command,
stdin=None,
check=True)
def expect_edit_temporary(
test_case: unittest.TestCase,
*,
content_lines: typing.Optional[typing.Iterable[str]] = None,
temporary_prefix: typing.Optional[str] = None,
line_number: typing.Optional[int] = None,
editor: typing.Optional[str] = None,
stdin: typing.Optional[typing.TextIO] = None) -> None:
"""
Verifies the behavior of `spawn_editor.edit_tempoary`, setting up necessary
mocks.
"""
temp_file: typing.Optional[typing.IO[typing.Any]] = None
original_temp_file = tempfile.NamedTemporaryFile
expected_line_number = line_number
expected_editor = editor
expected_stdin = stdin
output_lines = [
"Lorem ipsum dolor sit amet,\n",
"consectetur adipiscing elit.\n",
"Cras dictum libero magna,\n",
"at aliquet quam accumsan ultricies.\n",
"Vestibulum efficitur eu.", # Newline intentionally omitted.
]
def temp_file_wrapper(*args: typing.Any,
**kwargs: typing.Any) -> typing.IO[typing.Any]:
"""
A wrapper around `tempfile.NamedTemporaryFile` that captures the path
to the temporary file.
"""
nonlocal temp_file
# pylint: disable=consider-using-with
temp_file = original_temp_file(*args, **kwargs)
return temp_file
def fake_edit_file(file_path: str,
*,
line_number: typing.Optional[int] = None,
editor: typing.Optional[str] = None,
stdin: typing.Optional[typing.TextIO] = None) -> None:
"""
Fake version of `spawneditor.edit_file` that verifies that it was
called with the expected arguments, that the edited file has the
expected content, and that writes predetermined output to the edited
file.
"""
if temporary_prefix is not None:
test_case.assertTrue(
os.path.basename(file_path).startswith(temporary_prefix))
assert temp_file is not None
test_case.assertEqual(file_path, temp_file.name)
test_case.assertEqual(line_number, expected_line_number)
test_case.assertEqual(editor, expected_editor)
test_case.assertEqual(stdin, expected_stdin)
# Verify the initial file contents.
test_case.assertTrue(os.path.isfile(file_path))
with open(file_path, "r") as f:
test_case.assertEqual(
f.read(),
"\n".join(itertools.chain(content_lines, [""]))
if content_lines else "")
with open(file_path, "w") as f:
f.writelines(output_lines)
with unittest.mock.patch("tempfile.NamedTemporaryFile",
temp_file_wrapper), \
unittest.mock.patch("spawneditor.edit_file",
side_effect=fake_edit_file,
autospec=True) as mock_edit:
mock_manager = unittest.mock.Mock()
mock_manager.attach_mock(mock_edit, "edit_file")
lines = spawneditor.edit_temporary(content_lines,
temporary_prefix=temporary_prefix,
line_number=line_number,
editor=editor)
mock_manager.edit_file.assert_called_once()
assert temp_file is not None
test_case.assertTrue(temp_file.closed)
test_case.assertFalse(os.path.isfile(temp_file.name))
test_case.assertEqual(lines, output_lines)
# pylint: disable=no-self-use
class TestEditFile(unittest.TestCase):
"""Tests `spawneditor.edit_file`."""
def test_basic_without_line(self) -> None:
"""Tests basic usage without a line number."""
editor = "vi"
file_path = "some_file.txt"
expect_edit_file(file_path,
line_number=None,
environment={"EDITOR": editor},
expected_command=(editor, file_path))
def test_basic_with_line(self) -> None:
"""Tests basic usage with a line number."""
editor = "vi"
file_path = "some_file.txt"
line_number = 42
expect_edit_file(file_path,
line_number=line_number,
environment={"EDITOR": editor},
expected_command=(editor,
f"+{line_number}",
file_path))
def test_unrecognized_editor_with_line(self) -> None:
"""Tests that line numbers are ignored for unrecognized editors."""
editor = "some_unrecognized_editor"
file_path = "some_file.txt"
line_number = 42
expect_edit_file(file_path,
line_number=line_number,
environment={"EDITOR": editor},
expected_command=(editor, file_path))
def test_arguments(self) -> None:
"""Tests that a full path and arguments to the editor are preserved."""
editor = "/some/path with spaces/vi"
file_path = "some_file.txt"
line_number: typing.Optional[int] = None
expect_edit_file(
file_path,
line_number=line_number,
environment={"EDITOR": f"\"{editor}\" --one -2 three"},
expected_command=(editor, "--one", "-2", "three", file_path))
line_number = 42
expect_edit_file(
file_path,
line_number=line_number,
environment={"EDITOR": f"\"{editor}\" --one -2 three"},
expected_command=(editor,
"--one",
"-2",
"three",
f"+{line_number}",
file_path))
def test_hyphen_prefix(self) -> None:
"""
Tests that file paths are tweaked to prevent file paths from starting
with a hyphen.
"""
editor = "vi"
file_path = "-some_file.txt"
line_number: typing.Optional[int] = None
expect_edit_file(file_path,
line_number=line_number,
environment={"EDITOR": editor},
expected_command=(editor, f"./{file_path}"))
line_number = 42
expect_edit_file(file_path,
line_number=line_number,
environment={"EDITOR": editor},
expected_command=(editor,
f"+{line_number}",
f"./{file_path}"))
def test_editor_identification(self) -> None:
"""
Tests that file extensions and directories are ignored when identifying
editors.
"""
editor = "C:/Program Files/Sublime Text/subl.exe"
file_path = "some_file.txt"
line_number = 42
expect_edit_file(file_path,
line_number=line_number,
environment={"EDITOR": f"\"{editor}\" --wait"},
expected_command=(editor,
"--wait",
f"{file_path}:{line_number}"))
def test_precedence(self) -> None:
"""Tests that the editor is chosen in the expected order."""
file_path = "some_file.txt"
line_number = 42
editor = "some_editor"
visual = "some_visual_editor"
explicit_editor = "explicit_editor"
fake_environment: typing.Dict[str, str] = {}
expect_edit_file(file_path,
line_number=line_number,
environment=fake_environment,
expected_command=("vi", f"+{line_number}", file_path))
fake_environment["EDITOR"] = editor
expect_edit_file(file_path,
line_number=line_number,
environment=fake_environment,
expected_command=("some_editor", file_path))
fake_environment["VISUAL"] = visual
expect_edit_file(file_path,
line_number=line_number,
environment=fake_environment,
expected_command=(editor, file_path))
fake_environment["DISPLAY"] = ":0.0"
expect_edit_file(file_path,
line_number=line_number,
environment=fake_environment,
expected_command=(visual, file_path))
expect_edit_file(file_path,
line_number=line_number,
environment=fake_environment,
expected_command=(explicit_editor, file_path),
editor=explicit_editor)
class TestEditTemporary(unittest.TestCase):
"""Tests `spawneditor.edit_temporary`."""
def test_basic(self) -> None:
"""Tests basic usage with default arguments."""
expect_edit_temporary(self)
def test_with_content(self) -> None:
"""Tests usage with initial instructions."""
instructions = ["Do some stuff below the line.", "---"]
expect_edit_temporary(self,
content_lines=instructions,
line_number=len(instructions) + 1)
if __name__ == "__main__":
unittest.main()
|
#! /use/bin/env python
import numpy as np
import tables as tb
from enthought.mayavi import mlab
pippo = tb.openFile('prova.h5', 'r')
maxes = pippo.root.tree._v_attrs.maxes
mins = pippo.root.tree._v_attrs.mins
x_M = maxes[0]
y_M = maxes[1]
z_M = maxes[2]
x_m = mins[0]
y_m = mins[1]
z_m = mins[2]
floor_x = np.array([x_m, x_M, x_M, x_m, x_m])
floor_y = np.array([y_m, y_m, y_M, y_M, y_m])
floor_z = np.array([z_m, z_m, z_m, z_m, z_m])
roof_x = np.array([x_m, x_M, x_M, x_m, x_m])
roof_y = np.array([y_m, y_m, y_M, y_M, y_m])
roof_z = np.array([z_M, z_M, z_M, z_M, z_M])
edge1x = np.array([x_m, x_m])
edge2x = np.array([x_M, x_M])
edge3x = np.array([x_M, x_M])
edge4x = np.array([x_m, x_m])
edge1y = np.array([y_m, y_m])
edge2y = np.array([y_m, y_m])
edge3y = np.array([y_M, y_M])
edge4y = np.array([y_M, y_M])
edge1z = np.array([z_m, z_M])
edge2z = np.array([z_m, z_M])
edge3z = np.array([z_m, z_M])
edge4z = np.array([z_m, z_M])
mlab.plot3d(floor_x, floor_y, floor_z)
mlab.plot3d(roof_x, roof_y, roof_z)
mlab.plot3d(edge1x, edge1y, edge1z)
mlab.plot3d(edge2x, edge2y, edge2z)
mlab.plot3d(edge3x, edge3y, edge3z)
mlab.plot3d(edge4x, edge4y, edge4z)
idxs = np.array([])
for leaf in self.h5file.walkNodes(node, classname='Array'):
idxs = np.hstack((idxs, leaf.read()))
points = self.h5file.root.data.read()[idxs.astype(int)]
mlab.points3d(points[:,0], points[:,1],points[:,2])
#####################################################################
import numpy as np
import read_snapshots as rs
from enthought.mayavi import mlab
import random as rnd
#BOX
x_M = 100
y_M = 100
z_M = 100
x_m = 0
y_m = 0
z_m = 0
floor_x = np.array([x_m, x_M, x_M, x_m, x_m])
floor_y = np.array([y_m, y_m, y_M, y_M, y_m])
floor_z = np.array([z_m, z_m, z_m, z_m, z_m])
roof_x = np.array([x_m, x_M, x_M, x_m, x_m])
roof_y = np.array([y_m, y_m, y_M, y_M, y_m])
roof_z = np.array([z_M, z_M, z_M, z_M, z_M])
edge1x = np.array([x_m, x_m])
edge2x = np.array([x_M, x_M])
edge3x = np.array([x_M, x_M])
edge4x = np.array([x_m, x_m])
edge1y = np.array([y_m, y_m])
edge2y = np.array([y_m, y_m])
edge3y = np.array([y_M, y_M])
edge4y = np.array([y_M, y_M])
edge1z = np.array([z_m, z_M])
edge2z = np.array([z_m, z_M])
edge3z = np.array([z_m, z_M])
edge4z = np.array([z_m, z_M])
mlab.plot3d(floor_x, floor_y, floor_z)
mlab.plot3d(roof_x, roof_y, roof_z)
mlab.plot3d(edge1x, edge1y, edge1z)
mlab.plot3d(edge2x, edge2y, edge2z)
mlab.plot3d(edge3x, edge3y, edge3z)
mlab.plot3d(edge4x, edge4y, edge4z)
snap_1=rs.read_snapshot('snap_newMillen_subidorder_067.0')
pos_1 = snap_1['pos'][rnd.sample(xrange(snap_1['pos'].shape[0]),1000), :]
del snap_1
snap_2=rs.read_snapshot('snap_newMillen_subidorder_067.100')
pos_2 = snap_2['pos'][rnd.sample(xrange(snap_2['pos'].shape[0]),1000), :]
del snap_2
snap_3=rs.read_snapshot('snap_newMillen_subidorder_067.200')
pos_3 = snap_3['pos'][rnd.sample(xrange(snap_3['pos'].shape[0]),1000), :]
del snap_3
snap_4=rs.read_snapshot('snap_newMillen_subidorder_067.300')
pos_4 = snap_4['pos'][rnd.sample(xrange(snap_4['pos'].shape[0]),1000), :]
del snap_4
snap_5=rs.read_snapshot('snap_newMillen_subidorder_067.511')
pos_5 = snap_5['pos'][rnd.sample(xrange(snap_5['pos'].shape[0]),1000), :]
del snap_5
mlab.points3d(pos_1[:,0], pos_1[:,1],pos_1[:,2], scale_factor=2)
mlab.points3d(pos_2[:,0], pos_2[:,1],pos_2[:,2], scale_factor=2)
mlab.points3d(pos_3[:,0], pos_3[:,1],pos_3[:,2], scale_factor=2)
mlab.points3d(pos_4[:,0], pos_4[:,1],pos_4[:,2], scale_factor=2)
mlab.points3d(pos_5[:,0], pos_5[:,1],pos_5[:,2], scale_factor=2)
h5file = tb.openFile('../mill2_web/mill2_fof_snap67.h5', mode = "r")
pos=h5file.root.all_fof_pos.read()
fof = pos['pos'][rnd.sample(xrange(pos.shape[0]),200000), :]
mlab.points3d(fof[:,0], fof[:,1],fof[:,2], scale_factor=2)
********************
import matplotlib.pyplot as plt
import random as rnd
from mpl_toolkits.mplot3d import Axes3D
pos=pos[rnd.sample(xrange(pos.shape[0]),10000), :]
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(pos[:,0], pos[:,1], pos[:,2], color=colours[i%6])
|
<reponame>joelnb/duckyPad<filename>resources/color_tests/randomcolour.py
import os
import random
import colorsys
# for x in range(1,13):
# os.system("echo F" + str(x) + ' > key' + str(x) + '_F' + str(x) + '.txt')
# for x in range(1,13):
# os.system('touch key' + str(x) + '_test' + str(x) + '.txt')
# for x in range(128):
# print(chr(x), end='')
# ----
# special_keys = ["ESCAPE", "ESC", "ENTER", "UP", "DOWN", "LEFT", "RIGHT", "UPARROW", "DOWNARROW", "LEFTARROW", "RIGHTARROW", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10", "F11", "F12", "BACKSPACE", "TAB", "CAPSLOCK", "PRINTSCREEN", "SCROLLLOCK", "PAUSE", "BREAK", "INSERT", "HOME", "PAGEUP", "PAGEDOWN", "DELETE", "END", "SPACE", "SHIFT", "ALT", "GUI", "WINDOWS", "CONTROL", "CTRL"]
# print(special_keys)
# for x in range(1,13):
# filename = 'key' + str(x) + '_test' + str(x) + '.txt'
# os.system('touch ' + filename)
# content = ""
# for item in random.sample(special_keys, 4):
# content += item + ' '
# os.system('echo ' + content.rstrip() + ' > ' + filename)
# -----
# for x in range(1,13):
# filename = 'key' + str(x) + '_' + str(x) + '.txt'
# os.system('touch ' + filename)
# content = "GUI F" + str(x)
# os.system('echo ' + content.rstrip() + ' > ' + filename)
# for x in range(13,16):
# filename = 'key' + str(x) + '_' + str(x) + '.txt'
# os.system('touch ' + filename)
# content = "CONTROL F" + str(x-3)
# os.system('echo ' + content.rstrip() + ' > ' + filename)
# -----
# colour_values = [16, 127, 255]
# def make_color():
# return (random.choice(colour_values), random.choice(colour_values), random.choice(colour_values))
# already_done = set()
# this_color = set((0, 0, 0))
# for x in range(1,16):
# while 1:
# this_color = make_color()
# if this_color not in already_done:
# break
# print("SWCOLOR_" + str(x) + ' ' + str(this_color[0]) + ' ' + str(this_color[1]) + ' ' + str(this_color[2]))
# already_done.add(this_color)
# -----
colour_values = [16, 127, 255]
def make_color():
rrr = random.choice(colour_values)
ggg = random.choice(colour_values)
bbb = random.choice(colour_values)
hsv = colorsys.rgb_to_hsv(rrr/256, ggg/256, bbb/256)
return (rrr, ggg, bbb, hsv[0], hsv[1], hsv[2])
already_done = set()
this_color = set((0, 0, 0))
for x in range(1,17):
while 1:
this_color = make_color()
if this_color[0] == this_color[1] == this_color[2]:
continue
if this_color not in already_done:
break
already_done.add(this_color)
sss = sorted(already_done, key=lambda x: (x[3], x[4]))
for x in range(1,16):
print("SWCOLOR_" + str(x) + ' ' + str(sss[x][0]) + ' ' + str(sss[x][1]) + ' ' + str(sss[x][2]))
|
from datalab_cohorts import (
StudyDefinition,
patients,
codelist_from_csv,
codelist,
filter_codes_by_category,
)
## CODE LISTS
# All codelist are held within the codelist/ folder.
covid_codelist = codelist(["U071", "U072"], system="icd10")
aplastic_codes = codelist_from_csv(
"codelists/opensafely-aplastic-anaemia.csv", system="ctv3", column="CTV3ID"
)
hiv_codes = codelist_from_csv(
"codelists/opensafely-hiv.csv", system="ctv3", column="CTV3ID"
)
permanent_immune_codes = codelist_from_csv(
"codelists/opensafely-permanent-immunosuppression.csv",
system="ctv3",
column="CTV3ID",
)
temp_immune_codes = codelist_from_csv(
"codelists/opensafely-temporary-immunosuppression.csv",
system="ctv3",
column="CTV3ID",
)
stroke = codelist_from_csv(
"codelists/opensafely-stroke-updated.csv", system="ctv3", column="CTV3ID"
)
dementia = codelist_from_csv(
"codelists/opensafely-dementia.csv", system="ctv3", column="CTV3ID"
)
clear_smoking_codes = codelist_from_csv(
"codelists/opensafely-smoking-clear.csv",
system="ctv3",
column="CTV3Code",
category_column="Category",
)
unclear_smoking_codes = codelist_from_csv(
"codelists/opensafely-smoking-unclear.csv",
system="ctv3",
column="CTV3Code",
category_column="Category",
)
other_neuro = codelist_from_csv(
"codelists/opensafely-other-neurological-conditions.csv",
system="ctv3",
column="CTV3ID",
)
ethnicity_codes = codelist_from_csv(
"codelists/opensafely-ethnicity.csv",
system="ctv3",
column="Code",
category_column="Grouping_6",
)
ethnicity_codes_16 = codelist_from_csv(
"codelists/opensafely-ethnicity.csv",
system="ctv3",
column="Code",
category_column="Grouping_16",
)
chronic_respiratory_disease_codes = codelist_from_csv(
"codelists/opensafely-chronic-respiratory-disease.csv",
system="ctv3",
column="CTV3ID",
)
asthma_codes = codelist_from_csv(
"codelists/opensafely-asthma-diagnosis.csv", system="ctv3", column="CTV3ID"
)
salbutamol_codes = codelist_from_csv(
"codelists/opensafely-asthma-inhaler-salbutamol-medication.csv",
system="snomed",
column="id",
)
ics_codes = codelist_from_csv(
"codelists/opensafely-asthma-inhaler-steroid-medication.csv",
system="snomed",
column="id",
)
pred_codes = codelist_from_csv(
"codelists/opensafely-asthma-oral-prednisolone-medication.csv",
system="snomed",
column="snomed_id",
)
chronic_cardiac_disease_codes = codelist_from_csv(
"codelists/opensafely-chronic-cardiac-disease.csv", system="ctv3", column="CTV3ID"
)
diabetes_codes = codelist_from_csv(
"codelists/opensafely-diabetes.csv", system="ctv3", column="CTV3ID"
)
lung_cancer_codes = codelist_from_csv(
"codelists/opensafely-lung-cancer.csv", system="ctv3", column="CTV3ID"
)
haem_cancer_codes = codelist_from_csv(
"codelists/opensafely-haematological-cancer.csv", system="ctv3", column="CTV3ID"
)
other_cancer_codes = codelist_from_csv(
"codelists/opensafely-cancer-excluding-lung-and-haematological.csv",
system="ctv3",
column="CTV3ID",
)
bone_marrow_transplant_codes = codelist_from_csv(
"codelists/opensafely-bone-marrow-transplant.csv", system="ctv3", column="CTV3ID"
)
chemo_radio_therapy_codes = codelist_from_csv(
"codelists/opensafely-chemotherapy-or-radiotherapy-updated.csv",
system="ctv3",
column="CTV3ID",
)
chronic_liver_disease_codes = codelist_from_csv(
"codelists/opensafely-chronic-liver-disease.csv", system="ctv3", column="CTV3ID"
)
gi_bleed_and_ulcer_codes = codelist_from_csv(
"codelists/opensafely-gi-bleed-or-ulcer.csv", system="ctv3", column="CTV3ID"
)
inflammatory_bowel_disease_codes = codelist_from_csv(
"codelists/opensafely-inflammatory-bowel-disease.csv",
system="ctv3",
column="CTV3ID",
)
creatinine_codes = codelist(["XE2q5"], system="ctv3")
hba1c_new_codes = codelist(["XaPbt", "Xaeze", "Xaezd"], system="ctv3")
hba1c_old_codes = codelist(["X772q", "XaERo", "XaERp"], system="ctv3")
dialysis_codes = codelist_from_csv(
"codelists/opensafely-chronic-kidney-disease.csv", system="ctv3", column="CTV3ID"
)
organ_transplant_codes = codelist_from_csv(
"codelists/opensafely-solid-organ-transplantation.csv",
system="ctv3",
column="CTV3ID",
)
spleen_codes = codelist_from_csv(
"codelists/opensafely-asplenia.csv", system="ctv3", column="CTV3ID"
)
sickle_cell_codes = codelist_from_csv(
"codelists/opensafely-sickle-cell-disease.csv", system="ctv3", column="CTV3ID"
)
ra_sle_psoriasis_codes = codelist_from_csv(
"codelists/opensafely-ra-sle-psoriasis.csv", system="ctv3", column="CTV3ID"
)
systolic_blood_pressure_codes = codelist(["2469."], system="ctv3")
diastolic_blood_pressure_codes = codelist(["246A."], system="ctv3")
hypertension_codes = codelist_from_csv(
"codelists/opensafely-hypertension.csv", system="ctv3", column="CTV3ID"
)
## STUDY POPULATION
# Defines both the study population and points to the important covariates
study = StudyDefinition(
default_expectations={
"date": {"earliest": "1970-01-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.2,
},
# This line defines the study population
population=patients.registered_with_one_practice_between(
"2019-02-01", "2020-02-01"
),
# Outcomes
icu_date_admitted=patients.admitted_to_icu(
on_or_after="2020-02-01",
include_day=True,
returning="date_admitted",
find_first_match_in_period=True,
),
died_date_cpns=patients.with_death_recorded_in_cpns(
on_or_before="2020-06-01",
returning="date_of_death",
include_month=True,
include_day=True,
),
died_ons_covid_flag_any=patients.with_these_codes_on_death_certificate(
covid_codelist, on_or_before="2020-06-01", match_only_underlying_cause=False,
return_expectations={"date": {"earliest": "2020-03-01"}},
),
died_ons_covid_flag_underlying=patients.with_these_codes_on_death_certificate(
covid_codelist, on_or_before="2020-06-01", match_only_underlying_cause=True,
return_expectations={"date": {"earliest": "2020-03-01"}},
),
died_date_ons=patients.died_from_any_cause(
on_or_before="2020-06-01",
returning="date_of_death",
include_month=True,
include_day=True,
return_expectations={"date": {"earliest": "2020-03-01"}},
),
# The rest of the lines define the covariates with associated GitHub issues
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/33
age=patients.age_as_of("2020-02-01",
return_expectations={
"rate": "universal",
"int": {"distribution": "population_ages"},
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/46
sex=patients.sex(
return_expectations={
"rate": "universal",
"category": {"ratios": {"M": 0.49, "F": 0.51}},
}
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/52
imd=patients.address_as_of(
"2020-02-01", returning="index_of_multiple_deprivation", round_to_nearest=100,
return_expectations={
"rate": "universal",
"category": {"ratios": {"100": 0.1, "200": 0.2, "300": 0.7}},
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/37
rural_urban=patients.address_as_of(
"2020-02-01", returning="rural_urban_classification",
return_expectations={
"rate": "universal",
"category": {"ratios": {"rural": 0.1, "urban": 0.9}},
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/54
stp=patients.registered_practice_as_of("2020-02-01", returning="stp_code",
return_expectations={
"rate": "universal",
"category": {
"ratios": {
"STP1": 0.1,
"STP2": 0.1,
"STP3": 0.1,
"STP4": 0.1,
"STP5": 0.1,
"STP6": 0.1,
"STP7": 0.1,
"STP8": 0.1,
"STP9": 0.1,
"STP10": 0.1,
}
},
},
),
# region - one of NHS England 9 regions
region=patients.registered_practice_as_of(
"2020-02-01", returning="nhse_region_name",
return_expectations={
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and the Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East of England": 0.1,
"London": 0.2,
"South East": 0.2,
},
},
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/10
bmi=patients.most_recent_bmi(
on_or_after="2010-02-01",
minimum_age_at_measurement=16,
include_measurement_date=True,
include_month=True,
return_expectations={
"date": {},
"float": {"distribution": "normal", "mean": 35, "stddev": 10},
"incidence": 0.95,
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/6
smoking_status=patients.categorised_as(
{
"S": "most_recent_smoking_code = 'S'",
"E": """
most_recent_smoking_code = 'E' OR (
most_recent_smoking_code = 'N' AND ever_smoked
)
""",
"N": "most_recent_smoking_code = 'N' AND NOT ever_smoked",
"M": "DEFAULT",
},
return_expectations={
"category": {"ratios": {"S": 0.6, "E": 0.1, "N": 0.2, "M": 0.1}}
},
most_recent_smoking_code=patients.with_these_clinical_events(
clear_smoking_codes,
find_last_match_in_period=True,
on_or_before="2020-02-01",
returning="category",
),
ever_smoked=patients.with_these_clinical_events(
filter_codes_by_category(clear_smoking_codes, include=["S", "E"]),
on_or_before="2020-02-01",
),
),
smoking_status_date=patients.with_these_clinical_events(
clear_smoking_codes,
on_or_before="2020-02-01",
return_last_date_in_period=True,
include_month=True,
),
most_recent_unclear_smoking_cat_date=patients.with_these_clinical_events(
unclear_smoking_codes,
on_or_before="2020-02-01",
return_last_date_in_period=True,
include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/27
ethnicity=patients.with_these_clinical_events(
ethnicity_codes,
returning="category",
find_last_match_in_period=True,
include_date_of_match=True,
return_expectations={
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.75,
},
),
ethnicity_16=patients.with_these_clinical_events(
ethnicity_codes_16,
returning="category",
find_last_match_in_period=True,
include_date_of_match=True,
return_expectations={
"category": {
"ratios": {
"1": 0.0625,
"2": 0.0625,
"3": 0.0625,
"4": 0.0625,
"5": 0.0625,
"6": 0.0625,
"7": 0.0625,
"8": 0.0625,
"9": 0.0625,
"10": 0.0625,
"11": 0.0625,
"12": 0.0625,
"13": 0.0625,
"14": 0.0625,
"15": 0.0625,
"16": 0.0625,
}
},
"incidence": 0.75,
},
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/21
chronic_respiratory_disease=patients.with_these_clinical_events(
chronic_respiratory_disease_codes,
return_first_date_in_period=True,
include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/55
asthma=patients.categorised_as(
{
"0": "DEFAULT",
"1": """
(
recent_asthma_code OR (
asthma_code_ever AND NOT
copd_code_ever
)
) AND (
prednisolone_last_year = 0 OR
prednisolone_last_year > 4
)
""",
"2": """
(
recent_asthma_code OR (
asthma_code_ever AND NOT
copd_code_ever
)
) AND
prednisolone_last_year > 0 AND
prednisolone_last_year < 5
""",
},
return_expectations={"category": {"ratios": {"0": 0.8, "1": 0.1, "2": 0.1}},},
recent_asthma_code=patients.with_these_clinical_events(
asthma_codes, between=["2017-02-01", "2020-02-01"],
),
asthma_code_ever=patients.with_these_clinical_events(asthma_codes),
copd_code_ever=patients.with_these_clinical_events(
chronic_respiratory_disease_codes
),
prednisolone_last_year=patients.with_these_medications(
pred_codes,
between=["2019-02-01", "2020-02-01"],
returning="number_of_matches_in_period",
),
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/7
chronic_cardiac_disease=patients.with_these_clinical_events(
chronic_cardiac_disease_codes,
return_first_date_in_period=True,
include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/30
diabetes=patients.with_these_clinical_events(
diabetes_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/32
lung_cancer=patients.with_these_clinical_events(
lung_cancer_codes, return_first_date_in_period=True, include_month=True,
),
haem_cancer=patients.with_these_clinical_events(
haem_cancer_codes, return_first_date_in_period=True, include_month=True,
),
other_cancer=patients.with_these_clinical_events(
other_cancer_codes, return_first_date_in_period=True, include_month=True,
),
bone_marrow_transplant=patients.with_these_clinical_events(
bone_marrow_transplant_codes,
return_last_date_in_period=True,
include_month=True,
),
chemo_radio_therapy=patients.with_these_clinical_events(
chemo_radio_therapy_codes, return_last_date_in_period=True, include_month=True,
),
# # https://github.com/ebmdatalab/tpp-sql-notebook/issues/12
chronic_liver_disease=patients.with_these_clinical_events(
chronic_liver_disease_codes,
return_first_date_in_period=True,
include_month=True,
),
# # https://github.com/ebmdatalab/tpp-sql-notebook/issues/14
other_neuro=patients.with_these_clinical_events(
other_neuro, return_first_date_in_period=True, include_month=True,
),
stroke=patients.with_these_clinical_events(
stroke, return_first_date_in_period=True, include_month=True,
),
dementia=patients.with_these_clinical_events(
dementia, return_first_date_in_period=True, include_month=True,
),
# # Chronic kidney disease
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/17
creatinine=patients.with_these_clinical_events(
creatinine_codes,
find_last_match_in_period=True,
on_or_before="2020-02-01",
returning="numeric_value",
include_date_of_match=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 60.0, "stddev": 15},
"incidence": 0.95,
},
),
dialysis=patients.with_these_clinical_events(
dialysis_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/31
organ_transplant=patients.with_these_clinical_events(
organ_transplant_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/13
dysplenia=patients.with_these_clinical_events(
spleen_codes, return_first_date_in_period=True, include_month=True,
),
sickle_cell=patients.with_these_clinical_events(
sickle_cell_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/36
aplastic_anaemia=patients.with_these_clinical_events(
aplastic_codes, return_last_date_in_period=True, include_month=True,
),
hiv=patients.with_these_clinical_events(
hiv_codes, return_first_date_in_period=True, include_month=True,
),
permanent_immunodeficiency=patients.with_these_clinical_events(
permanent_immune_codes, return_first_date_in_period=True, include_month=True,
),
temporary_immunodeficiency=patients.with_these_clinical_events(
temp_immune_codes, return_last_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/23
# immunosuppressant_med=
# hypertension
hypertension=patients.with_these_clinical_events(
hypertension_codes, return_first_date_in_period=True, include_month=True,
),
# Blood pressure
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/35
bp_sys=patients.mean_recorded_value(
systolic_blood_pressure_codes,
on_most_recent_day_of_measurement=True,
on_or_before="2020-02-01",
include_measurement_date=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 80, "stddev": 10},
"date": {"latest": "2020-02-15"},
"incidence": 0.95,
},
),
bp_dias=patients.mean_recorded_value(
diastolic_blood_pressure_codes,
on_most_recent_day_of_measurement=True,
on_or_before="2020-02-01",
include_measurement_date=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 120, "stddev": 10},
"date": {"latest": "2020-02-15"},
"incidence": 0.95,
},
),
hba1c_mmol_per_mol=patients.with_these_clinical_events(
hba1c_new_codes,
find_last_match_in_period=True,
on_or_before="2020-02-01",
returning="numeric_value",
include_date_of_match=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 40.0, "stddev": 20},
"incidence": 0.95,
},
),
hba1c_percentage=patients.with_these_clinical_events(
hba1c_old_codes,
find_last_match_in_period=True,
on_or_before="2020-02-01",
returning="numeric_value",
include_date_of_match=True,
include_month=True,
return_expectations={
"float": {"distribution": "normal", "mean": 5, "stddev": 2},
"incidence": 0.95,
},
),
# # https://github.com/ebmdatalab/tpp-sql-notebook/issues/49
ra_sle_psoriasis=patients.with_these_clinical_events(
ra_sle_psoriasis_codes, return_first_date_in_period=True, include_month=True,
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/51
gi_bleed_and_ulcer=patients.with_these_clinical_events(
gi_bleed_and_ulcer_codes, return_first_date_in_period=True, include_month=True
),
# https://github.com/ebmdatalab/tpp-sql-notebook/issues/50
inflammatory_bowel_disease=patients.with_these_clinical_events(
inflammatory_bowel_disease_codes,
return_first_date_in_period=True,
include_month=True,
),
)
|
<reponame>SergeyRom-23/MB-Lab-master-RU
# MB-Lab
#
# Сайт ветки MB-Lab: https://github.com/animate1978/MB-Lab
# Сайт ветки перевода на русский язык MB-Lab: https://github.com/SergeyRom-23/MB-Lab-master-RU
#
# ##### НАЧАЛО ЛИЦЕНЗИОННОГО БЛОКА GPL #####
#
# Эта программа является свободным программным обеспечением; Вы можете распространять его и / или
# изменить его в соответствии с условиями GNU General Public License
# как опубликовано Фондом свободного программного обеспечения; либо версия 3
# Лицензии или (по вашему выбору) любой более поздней версии.
#
# Эта программа распространяется в надежде, что она будет полезна,
# но БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ; даже без подразумеваемой гарантии
# ИЗДЕЛИЯ или ПРИГОДНОСТЬ ДЛЯ ОСОБЫХ ЦЕЛЕЙ. Смотрите
# GNU General Public License для более подробной информации.
#
# Вам надо принять Стандартнуюй общественную лицензию GNU
# вместе с этой программой; если нет, напишите в Фонд свободного программного обеспечения,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### КОНЕЦ ЛИЦЕНЗИОННОГО БЛОКА GPL #####
#
# ManuelbastioniLAB - Авторские права (C) 2015-2018 <NAME>
# Перевод (C) 2019 <NAME> 23
import bpy
from math import radians, degrees
# ------------------------------------------------------------------------
# Initializations
# ------------------------------------------------------------------------
MB_list = [
"root", #0
"head", #1
"neck", #2
"clavicle_L", #3
"clavicle_R", #4
"breast_L", #5
"breast_R", #6
"spine03", #7
"spine02", #8
"spine01", #9
"pelvis", #10
"thigh_L", #11
"calf_L", #12
"foot_L", #13
"thigh_R", #14
"calf_R", #15
"foot_R", #16
"upperarm_L", #17
"lowerarm_L", #18
"hand_L", #19
"upperarm_R", #20
"lowerarm_R", #21
"hand_R" #22
]
#left hand
thumb_l = ["thumb01_L", "thumb02_L", "thumb03_L"]
index_l = ["index00_L", "index01_L", "index02_L", "index03_L"]
middle_l = ["middle00_L", "middle01_L", "middle02_L", "middle03_L"]
ring_l = ["ring00_L", "ring01_L", "ring02_L", "ring03_L"]
pinky_l = ["pinky00_L", "pinky01_L", "pinky02_L", "pinky03_L"]
#right hand
thumb_r = ["thumb01_R", "thumb02_R", "thumb03_R"]
index_r = ["index00_R", "index01_R", "index02_R", "index03_R"]
middle_r = ["middle00_R", "middle01_R", "middle02_R", "middle03_R"]
ring_r = ["ring00_R", "ring01_R", "ring02_R", "ring03_R"]
pinky_r = ["pinky00_R", "pinky01_R", "pinky02_R", "pinky03_R"]
fingers = thumb_l + index_l + middle_l + ring_l + pinky_l + thumb_r + index_r + middle_r + ring_r + pinky_r
ragdoll_dict = {
MB_list[1]: [-22, 37, -45, 45, -30, 30],
MB_list[2]: [-22, 37, -45, 45, -30, 30],
MB_list[3]: [-30, 30, 0, 0, -30, 10],
MB_list[4]: [-30, 30, 0, 0, -10, 30],
MB_list[10]: [-22, 45, -45, 45, -15, 15],
MB_list[9]: [-45, 68, -45, 45, -30, 30],
MB_list[8]: [-45, 68, -45, 45, -30, 30],
MB_list[7]: [-45, 22, -45, 45, -30, 30],
MB_list[17]: [-58, 95, -30, 15, -60, 105],
MB_list[20]: [-58, 95, -30, 15, -60, 105],
MB_list[18]: [-146, 0, -15, 0, 0, 0],
MB_list[21]: [-146, 0, -15, 0, 0, 0],
MB_list[19]: [-45, 45, -90, 86, -25, 36],
MB_list[22]: [-45, 45, -86, 90, -36, 25],
MB_list[11]: [-90, 45, -15, 15, -22, 17],
MB_list[14]: [-90, 45, -15, 15, -22, 17],
MB_list[12]: [0, 150, 0, 0, 0, 0],
MB_list[15]: [0, 150, 0, 0, 0, 0],
MB_list[13]: [-44, 45, -26, 26, -15, 74],
MB_list[16]: [-45, 44, -26, 26, -74, 15]
}
#Dictionary for fingers
def finger_dict(fingers):
fd = {}
for finger in fingers:
fd.update({finger: [-90,0,0,0,-5,5]})
return fd
fd = finger_dict(fingers)
###############################################################################################################################
#CONSTRAINT OPS
#************************************************************ADD LIMIT ROTATION
def limit_bone_rotation(Dict, pb):
for bone in pb:
if bone.name in Dict:
bc = bone.constraints.new(type='LIMIT_ROTATION')
bc.owner_space = 'LOCAL'
bc.use_limit_x = True
bc.use_limit_y = True
bc.use_limit_z = True
bc.min_x = radians(Dict[bone.name][0])
bc.max_x = radians(Dict[bone.name][1])
bc.min_y = radians(Dict[bone.name][2])
bc.max_y = radians(Dict[bone.name][3])
bc.min_z = radians(Dict[bone.name][4])
bc.max_z = radians(Dict[bone.name][5])
def limit_finger_rotation(Dict, pb):
for bone in pb:
if bone.name in Dict:
bc = bone.constraints.new(type='LIMIT_ROTATION')
bc.owner_space = 'LOCAL'
bc.use_limit_x = True
bc.use_limit_y = True
bc.use_limit_z = True
bc.min_x = radians(Dict[bone.name][0])
bc.max_x = radians(Dict[bone.name][1])
bc.min_y = radians(Dict[bone.name][2])
bc.max_y = radians(Dict[bone.name][3])
bc.min_z = radians(Dict[bone.name][4])
bc.max_z = radians(Dict[bone.name][5])
#************************************************************REMOVE BONE CONSTRAINT
def remove_bone_constraints(constraint, pb):
for bone in pb:
rbc = [r for r in bone.constraints if r.type == constraint]
for r in rbc:
bone.constraints.remove(r)
###############################################################################################################################
#MAIN
def get_skeleton():
if bpy.context.object.type == 'ARMATURE':
return bpy.context.object
else:
return bpy.context.object.parent
# def humanoid_rot_limits():
# armature = get_skeleton()
# pb = armature.pose.bones
# limit_bone_rotation(ragdoll_dict, pb)
# limit_finger_rotation(fd, pb)
|
from __future__ import annotations
from abc import abstractproperty, ABCMeta
from datetime import datetime, timedelta
from json import JSONDecodeError
import os
import typing as t
import warnings
from piccolo.apps.user.tables import BaseUser
from starlette.exceptions import HTTPException
from starlette.endpoints import HTTPEndpoint, Request
from starlette.responses import (
HTMLResponse,
RedirectResponse,
PlainTextResponse,
JSONResponse,
)
from starlette.status import HTTP_303_SEE_OTHER
from starlette.templating import Jinja2Templates
from piccolo_api.session_auth.tables import SessionsBase
if t.TYPE_CHECKING:
from starlette.responses import Response
TEMPLATES = Jinja2Templates(
directory=os.path.join(os.path.dirname(__file__), "templates")
)
class SessionLogoutEndpoint(HTTPEndpoint, metaclass=ABCMeta):
@abstractproperty
def _session_table(self) -> t.Type[SessionsBase]:
raise NotImplementedError
@abstractproperty
def _cookie_name(self) -> str:
raise NotImplementedError
async def post(self, request: Request) -> PlainTextResponse:
cookie = request.cookies.get(self._cookie_name, None)
if not cookie:
raise HTTPException(
status_code=401, detail="The session cookie wasn't found."
)
await self._session_table.remove_session(token=cookie)
response = PlainTextResponse("Successfully logged out")
response.set_cookie(self._cookie_name, "", max_age=0)
return response
class SessionLoginEndpoint(HTTPEndpoint, metaclass=ABCMeta):
@abstractproperty
def _auth_table(self) -> t.Type[BaseUser]:
raise NotImplementedError
@abstractproperty
def _session_table(self) -> t.Type[SessionsBase]:
raise NotImplementedError
@abstractproperty
def _session_expiry(self) -> timedelta:
raise NotImplementedError
@abstractproperty
def _max_session_expiry(self) -> timedelta:
raise NotImplementedError
@abstractproperty
def _cookie_name(self) -> str:
raise NotImplementedError
@abstractproperty
def _redirect_to(self) -> t.Optional[str]:
"""
Where to redirect to after login is successful. It's the name of a
Starlette route.
"""
raise NotImplementedError
@abstractproperty
def _production(self) -> bool:
"""
If True, apply more stringent security.
"""
raise NotImplementedError
async def get(self, request: Request) -> HTMLResponse:
template = TEMPLATES.get_template("login.html")
# If CSRF middleware is present, we have to include a form field with
# the CSRF token. It only works if CSRFMiddleware has
# allow_form_param=True, otherwise it only looks for the token in the
# header.
csrftoken = request.scope.get("csrftoken")
csrf_cookie_name = request.scope.get("csrf_cookie_name")
return HTMLResponse(
template.render(
csrftoken=csrftoken, csrf_cookie_name=csrf_cookie_name
)
)
async def post(self, request: Request) -> Response:
# Some middleware (for example CSRF) has already awaited the request
# body, and adds it to the request.
body = request.scope.get("form")
if not body:
try:
body = await request.json()
except JSONDecodeError:
body = await request.form()
username = body.get("username", None)
password = body.get("password", None)
if (not username) or (not password):
raise HTTPException(
status_code=401, detail="Missing username or password"
)
user_id = await self._auth_table.login(
username=username, password=password
)
if not user_id:
raise HTTPException(status_code=401, detail="Login failed")
now = datetime.now()
expiry_date = now + self._session_expiry
max_expiry_date = now + self._max_session_expiry
session: SessionsBase = await self._session_table.create_session(
user_id=user_id,
expiry_date=expiry_date,
max_expiry_date=max_expiry_date,
)
if self._redirect_to is not None:
response: Response = RedirectResponse(
url=self._redirect_to, status_code=HTTP_303_SEE_OTHER
)
else:
response = JSONResponse(
content={"message": "logged in"}, status_code=200
)
if not self._production:
message = (
"If running sessions in production, make sure 'production' "
"is set to True, and serve under HTTPS."
)
warnings.warn(message)
response.set_cookie(
key=self._cookie_name,
value=session.token,
httponly=True,
secure=self._production,
max_age=int(self._max_session_expiry.total_seconds()),
samesite="lax",
)
return response
def session_login(
auth_table: t.Type[BaseUser] = BaseUser,
session_table: t.Type[SessionsBase] = SessionsBase,
session_expiry: timedelta = timedelta(hours=1),
max_session_expiry: timedelta = timedelta(days=7),
redirect_to: t.Optional[str] = "/",
production: bool = False,
cookie_name: str = "id",
) -> t.Type[SessionLoginEndpoint]:
"""
An endpoint for creating a user session.
:param auth_table:
Which table to authenticate the username and password with.
:param session_table:
Which table to store the session in.
:param session_expiry:
How long the session will last.
:param max_session_expiry:
If the session is refreshed (see the ``increase_expiry`` parameter for
``SessionsAuthBackend``), it can only be refreshed up to a certain
limit, after which the session is void.
:param redirect_to:
Where to redirect to after successful login.
:param production:
Adds additional security measures. Use this in production, when serving
your app over HTTPS.
:param cookie_name:
The name of the cookie used to store the session token. Only override
this if the name of the cookie clashes with other cookies.
"""
class _SessionLoginEndpoint(SessionLoginEndpoint):
_auth_table = auth_table
_session_table = session_table
_session_expiry = session_expiry
_max_session_expiry = max_session_expiry
_redirect_to = redirect_to
_production = production
_cookie_name = cookie_name
return _SessionLoginEndpoint
def session_logout(
session_table: t.Type[SessionsBase] = SessionsBase,
cookie_name: str = "id",
) -> t.Type[SessionLogoutEndpoint]:
"""
An endpoint for clearing a user session.
:param session_table:
Which table to store the session in.
:param cookie_name:
The name of the cookie used to store the session token. Only override
this if the name of the cookie clashes with other cookies.
"""
class _SessionLogoutEndpoint(SessionLogoutEndpoint):
_session_table = session_table
_cookie_name = cookie_name
return _SessionLogoutEndpoint
|
#!/usr/bin/env python3
import argparse
import copy
import sys
import os
import hglib
import git
import logging
import importlib.machinery
import types
import re
from multiprocessing import Pool
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
format = '[%(asctime)s.%(msecs)03d] [%(name)s] [%(levelname)s] ' \
'%(message)s'
formatter = logging.Formatter(format, '%Y-%m-%d %H:%M:%S')
cons_handler = logging.StreamHandler(sys.stdout)
cons_handler.setFormatter(formatter)
LOGGER.addHandler(cons_handler)
def goto_version(repos_path, app_name, app_version, git_remote,
mercurial_remote):
if app_name is None:
_goto_version(
repos_path,
{'git': git_remote, 'mercurial': mercurial_remote}
)
return
versions_client = hglib.open('{0}/{1}'.format(repos_path, 'versions'))
versions_client.revert([], all=True)
versions_client.pull(update=True, branch='default')
root = versions_client.root().decode()
tags = versions_client.tags()
tip_app_tag = None
tip_app_tag_index = None
app_tag = None
app_tag_index = None
for idx, tag in enumerate(tags):
if not tag[0].decode().startswith(app_name):
continue
if tip_app_tag is None:
tip_app_tag = tag
tip_app_tag_index = idx
tag_ver = tag[0].decode().split('{0}_'.format(app_name))[1]
if app_version == tag_ver:
app_tag = tag
app_tag_index = idx
break
if app_tag is None:
LOGGER.info('Tag with app: {0} with version: {1} was not found'.format(
app_name, app_version
))
return 1
app_ver_path = None
paths = versions_client.status(change=tip_app_tag[0])
for path in paths:
if not path[1].decode().endswith('/main_version.py'):
continue
# Retrieve the tag of the service
app_tag = tags[app_tag_index - 1]
res = re.search('(.+)_(.+)', app_tag[0].decode())
app_name = res.groups()[0]
app_version = res.groups()[1]
main_ver_path = '{0}/{1}'.format(root, path[1].decode())
loader = importlib.machinery.SourceFileLoader(
'main_version', main_ver_path)
mod_ver = types.ModuleType(loader.name)
loader.exec_module(mod_ver)
services = mod_ver.services
app_ver_path = '{0}/{1}'.format(root, services[app_name]['path'])
break
for path in paths:
if app_ver_path is None:
if not path[1].decode().endswith('/version.py'):
continue
app_ver_path = '{0}/{1}'.format(root, path[1].decode())
app_ver_dir_path = os.path.dirname(app_ver_path)
loader = importlib.machinery.SourceFileLoader(
'version', app_ver_path)
mod_ver = types.ModuleType(loader.name)
loader.exec_module(mod_ver)
current_changesets = mod_ver.changesets
if tip_app_tag_index == app_tag_index:
_goto_version(
repos_path,
{'git': git_remote, 'mercurial': mercurial_remote},
current_changesets,
)
return 0
app_hist_ver_path = '{0}/{1}'.format(
app_ver_dir_path, '_version_history.py')
if not os.path.isfile(app_hist_ver_path):
LOGGER.info(
'Missing history version file: {0}'.format(app_hist_ver_path)
)
return 1
loader = importlib.machinery.SourceFileLoader(
'_version_history', app_hist_ver_path)
mod_ver = types.ModuleType(loader.name)
loader.exec_module(mod_ver)
hist_changesets = mod_ver.changesets
if app_version not in hist_changesets:
LOGGER.info(
'App: {0} with version: {1} was not found in hist file'.format(
app_name, app_version))
return 1
_goto_version(
repos_path,
{'git': git_remote, 'mercurial': mercurial_remote},
hist_changesets[app_version],
)
return 0
LOGGER.info('App: {0} with version: {1} was not found'.format(
app_name, app_version
))
return 1
def _mercurial_pull(cur_path, repo, changesets):
client = None
try:
client = hglib.open(cur_path)
diff = client.diff()
if diff != b'':
err = 'Pending changes in {0}. Aborting'.format(cur_path)
LOGGER.info('{0}. Will abort operation'.format(err))
return {'repo': repo, 'status': 1, 'description': err}
except hglib.error.ServerError as exc:
LOGGER.exception('Skipping "{0}" directory reason:\n{1}\n'.format(
cur_path, exc)
)
return {'repo': repo, 'status': 0, 'description': None}
finally:
if client is not None:
client.close()
try:
sum = client.summary()
cur_branch = sum['branch'.encode('utf-8')].decode()
out = client.outgoing(branch=cur_branch)
if out:
err = 'Outgoing changes in {0}. Aborting'.format(cur_path)
LOGGER.info('{0}. Will abort operation'.format(err))
return {'repo': repo, 'status': 1, 'description': err}
LOGGER.info('Pulling from {0}'.format(repo))
client.pull(update=True)
# If no changesets were given - update to default
if changesets is None:
heads = client.heads()
tip_changeset = None
for head in heads:
if head[3] != b'default':
continue
tip_changeset = head[1]
break
client.update(rev=tip_changeset)
LOGGER.info('Updated {0} to {1} tip'.format(repo, tip_changeset))
elif repo in changesets:
client.update(rev=changesets[repo]['hash'])
LOGGER.info('Updated {0} to {1}'.format(
repo, changesets[repo]['hash']))
return {'repo': repo, 'status': 0, 'description': None}
except Exception as exc:
LOGGER.exception(
'Aborting directory {0} '
'PLEASE FIX. Reason:\n{1}\n'.format(cur_path, exc)
)
return {'repo': repo, 'status': 1, 'description': None}
finally:
client.close()
def _git_pull(cur_path, repo, changesets):
client = None
try:
client = git.Repo(cur_path)
if client.is_dirty():
err = 'Pending changes in {0}. Aborting'.format(cur_path)
LOGGER.info('{0}. Will abort operation'.format(err))
return {'repo': repo, 'status': 1, 'description': err}
except Exception as exc:
LOGGER.exception('Skipping "{0}" directory reason:\n{1}\n'.format(
cur_path, exc)
)
return {'repo': repo, 'status': 0, 'description': None}
finally:
if client is not None:
client.close()
try:
cur_branch = client.active_branch
except TypeError:
err = 'Failed to retrieve active_branch. Probably in detached ' \
'head in {0}'.format(cur_path)
LOGGER.exception('{0}. Will abort operation'.format(err))
return {'repo': repo, 'status': 1, 'description': err}
finally:
client.close()
try:
commits_ahead = client.iter_commits(
'origin/{0}..{0}'.format(cur_branch.name)
)
if sum(1 for c in commits_ahead):
err = 'Outgoing changes in {0}. Aborting'.format(cur_path)
LOGGER.info('{0}. Will abort operation'.format(err))
return {'repo': repo, 'status': 1, 'description': err}
LOGGER.info('Pulling from {0}'.format(repo))
remote = client.remote(name='origin')
remote.pull()
if repo in changesets:
client.git.checkout(changesets[repo]['hash'])
LOGGER.info('Checked out {0} to {1}'.format(
repo, changesets[repo]['hash']))
return {'repo': repo, 'status': 0, 'description': None}
except Exception as exc:
LOGGER.exception(
'Aborting directory {0} '
'PLEASE FIX. Reason:\n{1}\n'.format(cur_path, exc)
)
return {'repo': repo, 'status': 1, 'description': None}
finally:
client.close()
def _pull_repo(args):
repos_path, repo, changesets = args
if changesets is not None and repo not in changesets:
LOGGER.debug('Nothing to do for repo {0} because our application does '
'not depend on it'.format(repo))
return {'repo': repo, 'status': 0, 'description': None}
if repo == 'versions':
return {'repo': repo, 'status': 0, 'description': None}
cur_path = '{0}/{1}'.format(repos_path, repo)
if changesets[repo]['vcs_type'] == 'mercurial':
return _mercurial_pull(cur_path, repo, changesets)
elif changesets[repo]['vcs_type'] == 'git':
return _git_pull(cur_path, repo, changesets)
def _mercurial_clone(repos_path, repo, remote):
LOGGER.info('Cloning {0}..'.format(repo))
try:
hglib.clone(
'{0}/{1}'.format(remote, repo),
'{0}/{1}'.format(repos_path, repo)
)
except Exception as exc:
err = 'Failed to clone {0} repository. ' \
'Description: {1}'.format(repo, exc.args)
return {'repo': repo, 'status': 1, 'description': err}
return {'repo': repo, 'status': 0, 'description': None}
def _git_clone(repos_path, repo, remote):
LOGGER.info('Cloning {0}..'.format(repo))
try:
git.Repo().clone_from(
'{0}/{1}'.format(remote, repo),
'{0}/{1}'.format(repos_path, repo)
)
except Exception as exc:
err = 'Failed to clone {0} repository. ' \
'Description: {1}'.format(repo, exc.args)
return {'repo': repo, 'status': 1, 'description': err}
return {'repo': repo, 'status': 0, 'description': None}
def _clone_repo(args):
repos_path, repo, remote, vcs_type = args
dirs = [name for name in os.listdir(repos_path)
if os.path.isdir(os.path.join(repos_path, name))]
if repo in dirs:
return {'repo': repo, 'status': 0, 'description': None}
if remote is None:
return {'repo': repo, 'status': 1,
'description': 'remote is None. Will not clone'}
if vcs_type == 'mercurial':
return _mercurial_clone(repos_path, repo, remote)
elif vcs_type == 'git':
return _git_clone(repos_path, repo, remote)
def _goto_version(repos_path, remotes, changesets=None):
repos_path = os.path.abspath(repos_path)
args = [[repos_path, name, changesets] for name in os.listdir(repos_path)
if os.path.isdir(os.path.join(repos_path, name))]
with Pool(min(len(args), 20)) as p:
results = p.map(_pull_repo, args)
err = False
for res in results:
if res['status'] == 1:
err = True
if res['repo'] is None and res['description'] is None:
continue
msg = 'Failed to pull '
if res['repo'] is not None:
msg += 'from {0} '.format(res['repo'])
if res['description'] is not None:
msg += 'because {0}'.format(res['description'])
LOGGER.warning(msg)
if err:
raise RuntimeError(
'Failed to pull all the required repos. See log above'
)
if changesets is None:
return
args = [[repos_path, name, remotes[changesets[name]['vcs_type']],
changesets[name]['vcs_type']] for name in changesets.keys()]
with Pool(min(len(args), 10)) as p:
results = p.map(_clone_repo, args)
err = False
for res in results:
if res['status'] == 1:
err = True
if res['repo'] is None and res['description'] is None:
continue
msg = 'Failed to clone '
if res['repo'] is not None:
msg += 'from {0} '.format(res['repo'])
if res['description'] is not None:
msg += 'because {0}'.format(res['description'])
LOGGER.warning(msg)
if err:
raise RuntimeError(
'Failed to clone all the required repos. See log above'
)
def main():
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'--repos_path',
required=True,
help='The path to the repos base dir for changeset generation'
)
parser.add_argument(
'--app_name', default=None, help="The application's name"
)
parser.add_argument(
'--app_version', default=None, help="The application's version"
)
parser.add_argument(
'--git_remote', default=None,
help="The remote url to pull from git"
)
parser.add_argument(
'--mercurial_remote', default=None,
help="The remote url to pull from mercurial"
)
args = parser.parse_args(args)
params = copy.deepcopy(vars(args))
res = goto_version(**params)
return res
if __name__ == '__main__':
sys.exit(main())
|
<filename>zstacklib/zstacklib/utils/jsonobject.py
'''
@author: frank
'''
import simplejson
import json
import types
import inspect
class NoneSupportedTypeError(Exception):
'''not supported type error'''
class JsonObject(object):
def __init__(self):
pass
def put(self, name, val):
setattr(self, name, val)
def dump(self):
return simplejson.dumps(self.__dict__, ensure_ascii=True)
def hasattr(self, name):
if getattr(self, name):
return True
return False
def __getitem__(self, name):
return getattr(self, name)
def __getattr__(self, name):
if name.endswith('_'):
n = name[:-1]
if hasattr(self, n):
return getattr(self, n)
else:
return None
else:
return None
# covers long as well
def _is_int(val):
try:
int(val)
return True
except ValueError:
return False
def _is_float(val):
try:
float(val)
return True
except ValueError:
return False
def _is_bool(val):
return val in ['True', 'true', 'False', 'false']
def _to_proper_type(val):
if _is_bool(val):
return bool(val)
elif _is_float(val):
return float(val)
elif _is_int(val):
return int(val)
else:
return str(val)
def _parse_list(lst):
vals = []
for l in lst:
if _is_unsupported_type(l):
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, list dump: %s" % (l, type(l), lst))
if _is_primitive_types(l):
vals.append(l)
elif isinstance(l, types.DictType):
dobj = _parse_dict(l)
vals.append(dobj)
elif isinstance(l, types.ListType):
lobj = _parse_list(l)
vals.append(lobj)
else:
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, list dump: %s" % (l, type(l), lst))
return vals
def _parse_dict(d):
dobj = JsonObject()
for key in d.keys():
val = d[key]
if _is_unsupported_type(val):
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, dict dump: %s" % (val, type(val), d))
if _is_primitive_types(val):
setattr(dobj, key, val)
elif isinstance(val, types.ListType):
lst = _parse_list(val)
setattr(dobj, key, lst)
elif isinstance(val, types.DictType):
nobj = _parse_dict(val)
setattr(dobj, key, nobj)
else:
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, dict dump: %s" % (val, type(val), d))
return dobj
def loads(jstr):
try:
root = simplejson.loads(jstr)
except Exception as e:
raise NoneSupportedTypeError("Cannot compile string: %s to a jsonobject" % jstr)
if isinstance(root, types.DictType):
return _parse_dict(root)
if isinstance(root, types.ListType):
return _parse_list(root)
else:
return root
def _new_json_object():
return JsonObject()
def nj():
return _new_json_object()
def _is_unsupported_type(obj):
return isinstance(obj, (types.ComplexType, types.TupleType, types.FunctionType, types.LambdaType,
types.GeneratorType, types.MethodType, types.UnboundMethodType, types.BuiltinFunctionType, types.BuiltinMethodType, types.FileType,
types.XRangeType, types.TracebackType, types.FrameType, types.DictProxyType, types.NotImplementedType, types.GetSetDescriptorType,
types.MemberDescriptorType))
def _is_primitive_types(obj):
return isinstance(obj, (types.BooleanType, types.LongType, types.IntType, types.FloatType, types.StringType, types.UnicodeType))
def _dump_list(lst):
nlst = []
for val in lst:
if _is_unsupported_type(val):
raise NoneSupportedTypeError('Cannot dump val: %s, type: %s, list dump: %s' % (val, type(val), lst))
if _is_primitive_types(val):
nlst.append(val)
elif isinstance(val, types.DictType):
nlst.append(val)
elif isinstance(val, types.ListType):
tlst = _dump_list(val)
nlst.append(tlst)
elif isinstance(val, types.NoneType):
pass
else:
nmap = _dump(val)
nlst.append(nmap)
return nlst
def _dump_super(obj):
s = super(type(obj), obj)
def _dump(obj):
if _is_primitive_types(obj): return simplejson.dumps(obj, ensure_ascii=True)
ret = {}
items = obj.iteritems() if isinstance(obj, types.DictionaryType) else obj.__dict__.iteritems()
#items = inspect.getmembers(obj)
for key, val in items:
if key.startswith('_'): continue
if _is_unsupported_type(obj):
raise NoneSupportedTypeError('cannot dump %s, type:%s, object dict: %s' % (val, type(val), obj.__dict__))
if _is_primitive_types(val):
ret[key] = val
elif isinstance(val, types.DictType):
ret[key] = val
elif isinstance(val, types.ListType):
nlst = _dump_list(val)
ret[key] = nlst
elif isinstance(val, types.NoneType):
pass
else:
nmap = _dump(val)
ret[key] = nmap
return ret
def dumps(obj, pretty=False):
jsonmap = _dump(obj)
if pretty:
return simplejson.dumps(jsonmap, ensure_ascii=True, sort_keys=True, indent=4)
else:
return simplejson.dumps(jsonmap, ensure_ascii=True)
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-05 23:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='District',
fields=[
('short_code', models.CharField(max_length=3, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
'verbose_name_plural': 'districts',
'verbose_name': 'district',
'ordering': ['short_code'],
},
),
migrations.CreateModel(
name='Election',
fields=[
('short_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('short_name_text', models.CharField(max_length=50, null=True)),
('full_name', models.CharField(max_length=200)),
('election_type', models.CharField(max_length=100)),
('election_id', models.CharField(max_length=20, null=True)),
('wikidata_id', models.CharField(max_length=20, null=True, unique=True)),
('administrative_level', models.CharField(max_length=100)),
('election_day', models.DateTimeField(verbose_name='timestamp of election day')),
('status', models.CharField(default='init', max_length=200)),
],
options={
'verbose_name_plural': 'elections',
'ordering': ['short_name'],
'verbose_name': 'election',
'get_latest_by': 'election_day',
},
),
migrations.CreateModel(
name='List',
fields=[
('short_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('short_name_text', models.CharField(default=None, max_length=50)),
('full_name', models.CharField(default=None, max_length=200)),
],
options={
'verbose_name_plural': 'lists',
'verbose_name': 'list',
'ordering': ['short_name'],
},
),
migrations.CreateModel(
name='ListResult',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('votes', models.IntegerField(default=-1, null=True)),
],
options={
'verbose_name_plural': 'list results',
'verbose_name': 'list result',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Municipality',
fields=[
('code', models.CharField(max_length=5, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('kennzahl', models.CharField(default=None, max_length=5)),
],
options={
'verbose_name_plural': 'municipalities',
'verbose_name': 'municipality',
'ordering': ['code'],
},
),
migrations.CreateModel(
name='Party',
fields=[
('short_name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('short_name_text', models.CharField(default=None, max_length=50, unique=True)),
('full_name', models.CharField(max_length=200, unique=True)),
('family', models.CharField(default=None, max_length=200, null=True)),
('wikidata_id', models.CharField(default=None, max_length=20, null=True, unique=True)),
('website', models.CharField(default=None, max_length=100, null=True)),
('location', models.CharField(default=None, max_length=100, null=True)),
],
options={
'verbose_name_plural': 'parties',
'verbose_name': 'party',
'ordering': ['short_name'],
},
),
migrations.CreateModel(
name='PollingStation',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(default=None, max_length=200, null=True)),
('type', models.CharField(max_length=30)),
],
options={
'verbose_name_plural': 'polling stations',
'verbose_name': 'polling station',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='PollingStationResult',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('eligible_voters', models.IntegerField(default=-1, null=True)),
('votes', models.IntegerField(default=-1)),
('valid', models.IntegerField(default=-1)),
('invalid', models.IntegerField(default=-1)),
('ts_result', models.DateTimeField(verbose_name='timestamp of bmi result')),
],
options={
'verbose_name_plural': 'polling station results',
'ordering': ['id'],
'verbose_name': 'polling station result',
'get_latest_by': 'ts_result',
},
),
migrations.CreateModel(
name='RawData',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('ts_file', models.DateTimeField(null=True, verbose_name='creation date of original file')),
('ts_import', models.DateTimeField(default=None, verbose_name='import date of file into database')),
('hash', models.CharField(max_length=100)),
('content', models.TextField()),
('header', models.TextField(default=None, null=True)),
('dataformat', models.CharField(max_length=50)),
('description', models.TextField(default=None)),
],
options={
'verbose_name_plural': 'raw data',
'ordering': ['id'],
'verbose_name': 'raw data',
'get_latest_by': 'ts_file',
},
),
migrations.CreateModel(
name='RegionalElectoralDistrict',
fields=[
('short_code', models.CharField(max_length=2, primary_key=True, serialize=False)),
('name', models.CharField(default=None, max_length=100)),
],
options={
'verbose_name_plural': 'regional electoral districts',
'verbose_name': 'regional electoral district',
'ordering': ['short_code'],
},
),
migrations.CreateModel(
name='State',
fields=[
('short_code', models.CharField(max_length=1, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
'verbose_name_plural': 'states',
'verbose_name': 'state',
'ordering': ['short_code'],
},
),
migrations.AddIndex(
model_name='state',
index=models.Index(fields=['name', 'short_code'], name='austria_sta_name_6993c1_idx'),
),
migrations.AddIndex(
model_name='regionalelectoraldistrict',
index=models.Index(fields=['name', 'short_code'], name='austria_reg_name_b53223_idx'),
),
migrations.AddField(
model_name='rawdata',
name='election',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.Election'),
),
migrations.AddField(
model_name='pollingstationresult',
name='election',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.Election'),
),
migrations.AddField(
model_name='pollingstationresult',
name='polling_station',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.PollingStation'),
),
migrations.AddField(
model_name='pollingstation',
name='municipality',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.Municipality'),
),
migrations.AddIndex(
model_name='party',
index=models.Index(fields=['full_name', 'short_name', 'short_name_text'], name='austria_par_full_na_2a1baa_idx'),
),
migrations.AddField(
model_name='municipality',
name='district',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.District'),
),
migrations.AddField(
model_name='municipality',
name='regional_electoral_district',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.RegionalElectoralDistrict'),
),
migrations.AddField(
model_name='listresult',
name='election_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='austria.List'),
),
migrations.AddField(
model_name='listresult',
name='polling_station_result',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='austria.PollingStationResult'),
),
migrations.AddField(
model_name='list',
name='party',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='austria.Party'),
),
migrations.AddIndex(
model_name='election',
index=models.Index(fields=['full_name', 'short_name', 'short_name_text'], name='austria_ele_full_na_bbb3c0_idx'),
),
migrations.AddField(
model_name='district',
name='state',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='austria.State'),
),
migrations.AddIndex(
model_name='pollingstation',
index=models.Index(fields=['name'], name='austria_pol_name_56e525_idx'),
),
migrations.AddIndex(
model_name='municipality',
index=models.Index(fields=['name', 'code', 'kennzahl'], name='austria_mun_name_329503_idx'),
),
migrations.AddIndex(
model_name='list',
index=models.Index(fields=['short_name', 'full_name', 'short_name_text'], name='austria_lis_short_n_4b5871_idx'),
),
migrations.AddIndex(
model_name='district',
index=models.Index(fields=['name', 'short_code'], name='austria_dis_name_e39bb2_idx'),
),
]
|
<reponame>hmedal/speu2<filename>src/objects/experiments.py
'''
Created on Jul 11, 2016
@author: hmedal
'''
import os
import xml.etree.cElementTree as ET
import unittest
import json
from src.objects import computationalresource, outputtable
def convertHoursToTimeString(hours):
seconds = hours * 3600
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def get_params_string_name(parametersDictionary, paramsThatChanged = None):
paramsString = ''
shortNamesDict = json.loads(open('../expr_scripts_for_paper/params_shortNames.json').read())
if paramsThatChanged is None or len(paramsThatChanged) == 0:
paramsString = 'base'
else:
for paramName in paramsThatChanged:
paramsString += '_' + shortNamesDict[paramName] + '-' + str(parametersDictionary[paramName])
return paramsString
def get_filename_from_params_dict(parametersDictionary, paramsThatChanged=None):
infModelName = parametersDictionary['signal']['infModelName']
return infModelName + "_" + get_params_string_name(parametersDictionary, paramsThatChanged = None)
def createOutputFileNameFromParamsDict(parametersDictionary, exprBatchName, paramsThatChanged = None):
return '../output/' + get_filename_from_params_dict(parametersDictionary, paramsThatChanged) + '.out'
def createExprFileFromParamsDict(paramsDict, exprBatchName, paramsThatChanged = None):
print "paramsDict before write", paramsDict
exprFileName = '../exprFiles/' + get_filename_from_params_dict(paramsDict, paramsThatChanged) + '.json'
with open(exprFileName, 'w') as exprFileObject:
json.dump(paramsDict, exprFileObject, indent=4, sort_keys=True)
return exprFileName
class OptimizationExperiment(object):
'''
For the purpose of printing out output
'''
def __init__(self, scriptCall, computationalResource, outputTable, exprName, parametersDictionary = None,
paramsThatChanged = None, outputFileName = None, exprFile = None):
'''
Constructor
'''
self.scriptCall = scriptCall
self.compResource = computationalResource
self.outputTable = outputTable
self.exprName = exprName
if outputFileName is None:
if parametersDictionary is None:
raise Exception("parametersDictionary may not be None if outputFileName is None")
self.outputFileName = createOutputFileNameFromParamsDict(parametersDictionary, exprName, paramsThatChanged)
else:
self.outputFileName = outputFileName
if exprFile is None:
if parametersDictionary is None:
raise Exception("parametersDictionary may not be None if exprFile is None")
self.exprFile = createExprFileFromParamsDict(parametersDictionary, exprName, paramsThatChanged)
else:
self.exprFile = exprFile
self.schedulerCommandFileOutputFilePath = '../runFiles/' + \
get_filename_from_params_dict(parametersDictionary,
paramsThatChanged) + '.pbs'
self.saveSchedulerCommandFile(self.schedulerCommandFileOutputFilePath)
def saveSchedulerCommandFile(self, schedulerCommandFileOutputFilePath, isLastJob = False, fileType = 'pbs'):
if fileType is 'pbs':
print "printing to ", schedulerCommandFileOutputFilePath, " ", self.compResource.orgFund
f = open(schedulerCommandFileOutputFilePath, 'w')
myStr = ""
if self.compResource.orgFund != 'unsponsored':
myStr += "#PBS -A " + self.compResource.orgFund + "\n"
myStr += "#PBS -N " + self.exprName + "\n"
myStr += "#PBS -q " + self.compResource.queue.name + "\n"
myStr += "\n"
myStr += "#PBS -j oe\n"
myStr += "\n"
myStr += "#PBS -M <EMAIL>\n" # send me email when job aborts (with an error)
if isLastJob:
myStr += "#PBS -m ae\n"
else:
myStr += "#PBS -m a\n"
myStr += "#PBS -o " + self.exprName +".$PBS_JOBID\n"
myStr += "#PBS -l nodes=1:ppn=" + str(self.compResource.numThreadsToUse) + "\n"
myStr += "#PBS -l walltime=" + str(convertHoursToTimeString(self.compResource.queue.maxtime)) + "\n"
myStr += "\n"
myStr += "cd $PBS_O_WORKDIR\n"
myStr += 'export PYTHONPATH="$PYTHONPATH:/work/hmedal/code/wnopt_cavs3"' + "\n"
myStr += self.scriptCall + " -e " + self.exprFile + " > " + self.outputFileName
f.write(myStr)
else:
raise Exception('invalid type')
class OptimizationExperimentBatch(object):
''''''
def __init__(self, computationalResource, filepathForBatch):
self.computationalResource = computationalResource
self.experimentsList = []
self.filepathForBatch = filepathForBatch
def addOptimizationExperiment(self, experiment):
self.experimentsList.append(experiment)
def writeBatchScript(self):
if self.computationalResource.type is 'local':
self.writeBatchScript_Local()
elif self.computationalResource.type is 'remote':
self.writeBatchScript_Remote()
else:
raise Exception('type is unknown')
def writeBatchScript_Local(self):
f = open(self.filepathForBatch, 'w')
myStr = "#!/bin/bash\n"
for experiment in self.experimentsList:
myStr += experiment.schedulerCommandFileOutputFilePath + "\n"
f.write(myStr)
os.system("chmod a+x " + self.filepathForBatch)
def writeBatchScript_Remote(self):
f = open(self.filepathForBatch, 'w')
myStr = "#!/bin/sh\n"
myStr += ". ~/.bashrc"
myStr += "\n"
print "printing " + str(len(self.experimentsList)) + " experiments in batch script"
for experiment in self.experimentsList:
print "experiment", experiment, experiment.compResource
myStr += experiment.compResource.schedulerCommand + " " + \
experiment.schedulerCommandFileOutputFilePath + "\n"
print "writing to file"
f.write(myStr)
def runBatchScript(self):
print "running batch script..."
os.system('ssh <EMAIL> "cd /work/hmedal/code/wnopt_cavs/exprBatchScripts; '
+ self.filepathForBatch)
print "...batch script ran"
def writeAndRun(self):
print "printing batch script..."
self.writeBatchScript()
print "...batch script printed"
self.runBatchScript() |
<reponame>Snewmy/swordie
# Staff <NAME> (2010000) | Orbis Park (200000200)
from net.swordie.ms.enums import InvType
import random
exchangeList = [4000073, 4000070, 4000071, 4000072, 4000059, 4000060, 4000061, 4000058, 4000062,
4000081, 4000048, 4000055, 4000050, 4000057, 4000051, 4000052,
4000049, 4000056, 4000053, 4000076, 4000054, 4000069, 4000078, 4000079, 4000080]
rewardsDict = {
4000073: [(2000001, 20), (2000003, 15), (2020001, 15), (2010004, 10), (2030000, 15), (4003001, 15)], # Solid Horns
4000070: [(2000002, 30), (2000006, 15), (2020000, 20), (4003000, 5), (2041005, 1)], # Cellion Tails
4000071: [(2000002, 30), (2000006, 15), (2020000, 20), (4003000, 5), (2041005, 1)], # Lioner Tails
4000072: [(2000002, 30), (2000006, 15), (2020000, 20), (4003000, 5), (2041005, 1)], # Grupin Tails
4000059: [(2000003, 20), (2000001, 30), (2010001, 40), (4003001, 20), (2020001, 15), (2040002, 1)], # Star Pixie's Starpieces
4000060: [(2000002, 25), (2000006, 10), (2022000, 5), (4000030, 15), (2040902, 1)], # Lunar Pixie's Moonpieces
4000061: [(2000002, 30), (2000006, 15), (2020000, 20), (4003000, 5), (2041016, 1)], # Luster Pixie's Sunpieces
4000058: [(2000002, 15), (2010004, 15), (2000003, 25), (4003001, 30), (2040302, 1)], # Nependeath Seeds
4000062: [(2000002, 30), (2000006, 15), (2020000, 20), (4003000, 5), (2040514, 1)], # Dark Nependeath Seeds
4000081: [(2000006, 25), (2020006, 25), (4010004, 8), (4010005, 8), (4010006, 3), (4020007, 2), (4020008, 2), (2040705, 1)], # Firebomb Flames
4000048: [(2000002, 30), (2000006, 15), (2020000, 20), (4003000, 5), (2040402, 1)], # Jr. Yeti Skins
4000055: [(2020005, 30), (2020006, 15), (2022001, 30), (4003003, 1), (2040505, 1)], # Dark Jr. Yeti Skins
4000050: [(2000006, 20), (4010002, 7), (4010001, 7), (4010000, 7), (4010006, 2), (4003000, 5), (2040708, 1)], # Pepe Beaks
4000057: [(2000006, 20), (4010004, 7), (4010005, 7), (4010006, 3), (4020007, 2), (4020008, 2), (2040705, 1)], # Dark Pepe Beaks
4000051: [(2002004, 15), (2002005, 15), (2002003, 10), (4001005, 1), (2040502, 1)], # Hector Tails
4000052: [(2000006, 20), (4010004, 7), (4010003, 7), (4010005, 7), (4003002, 1), (2040602, 1)], # White Fang Tails
4000049: [(2000006, 25), (2020000, 20), (4020000, 7), (4020001, 7), (4020002, 3), (4020007, 2), (2040708, 1)], # Yeti Horns
4000056: [(2000006, 25), (4020005, 7), (4020003, 7), (4020004, 7), (4020008, 2), (2040802, 1)], # Dark Yeti Horns
4000053: [(2000006, 30), (4020006, 7), (4020008, 2), (4020007, 2), (2070010, 1), (2040805, 1)], # Werewolf Toenails
4000076: [(2000001, 30), (2000003, 20), (2010001, 40), (4003001, 20), (2040002, 1)], # Fly-Eye Wings
4000054: [(2000006, 30), (4020006, 7), (4020008, 2), (4020007, 2), (2041020, 1)], # Lycanthrope Toenails
4000069: [(2000006, 20), (2020005, 30), (2020006, 15), (2050004, 30), (4003003, 1), (2041005, 1)], # Zombie's Lost Teeth
4000078: [(2000002, 15), (2010004, 15), (2000003, 25), (2050004, 30), (4003001, 30), (2040302, 1)], # Jr. <NAME>
4000079: [(2000006, 25), (2050004, 30), (2022001, 35), (4020000, 8), (4020001, 8), (4020002, 8), (4020007, 2), (2041023, 1)], # <NAME>
4000080: [(2000006, 35), (4020006, 9), (4020008, 4), (4020007, 4), (2041008, 1), (2070011, 1)] # <NAME>
}
sm.sendNext("Hey, got a little bit of time? Well, my job is to collect items here and sell them elsewhere, "
"but these days the monsters have become much more hostile so it's been difficult getting good items... "
"What do you think? Do you want to do some business with me?")
init = sm.sendNext("The deal is simple. You get me something I need, I get you something you need. "
"The problem is, I deal with a whole bunch of people, so the items I have to offer may change every time you see me. "
"What do you think? Still want to do it? #b\r\n"
"#L0# Exchange items #l\r\n"
"#L1# Check exchange list #l\r\n")
if init == 0:
selString = "Ok! First you need to choose the item that you'll trade with. #b\r\n"
# Construct selectable items to exchange
for index, fodder in enumerate(exchangeList):
selString += "#L"+ str(index) + "##i" + str(fodder) + "# 100 #z" + str(fodder) +"##l\r\n"
selection = sm.sendNext(selString)
# Pull out the matching id from exchangeList
selectedFodder = exchangeList[selection]
response = sm.sendAskYesNo("Let's see, you want to trade your #b100 #z" + str(selectedFodder) + "##k with my stuff right? "
"Before trading make sure you have an empty slot available on your use or etc. inventory. "
"Now, do you want to trade with me?")
if response:
if not sm.hasItem(selectedFodder, 100):
sm.sendSayOkay("Hmmm... are you sure you have #b100 #z" + str(selectedFodder) + "##k?")
elif sm.getEmptyInventorySlots(InvType.CONSUME) == 0 or sm.getEmptyInventorySlots(InvType.ETC) == 0:
sm.sendSayOkay("Please check and see if your Use and Etc. inventories are full or not.")
else:
# Grab a random item tuple from rewardsDict using selectedFodder as the dictionary key
reward = random.choice(rewardsDict[selectedFodder])
rewardItem = reward[0]
rewardQuant = reward[1]
sm.consumeItem(selectedFodder, 100)
sm.giveItem(rewardItem, rewardQuant)
sm.giveExp(500)
sm.sendNext("For your #b100 #z" + str(selectedFodder) + "##k, here's my #b" + str(rewardQuant) + " #z" + str(rewardItem) + "#(s)#k. "
"What do you think? Do you like the items I gave you in return? "
"I plan on being here for a while, so if you gather up more items, I'm always open for a trade...")
else:
sm.sendSayOkay("I'll be here if you change your mind later.")
else:
selString = "Choose the trade item that you want to check. #b\r\n"
# Construct selectable items to exchange
for index, fodder in enumerate(exchangeList):
selString += "#L"+ str(index) + "##i" + str(fodder) + "# #z" + str(fodder) +"##l\r\n"
selection = sm.sendNext(selString)
# Pull out the matching id from exchangeList
selectedFodder = exchangeList[selection]
rewardList = rewardsDict[selectedFodder]
rewardString = "I can give you one of the following items for #b100 #z" + str(selectedFodder) + "##k: #b\r\n"
for index, (reward, quantity) in enumerate(rewardList):
rewardString += "#L"+ str(index) + "##i" + str(reward) + "# " + str(quantity) + " #z" + str(reward) +"#(s)#l\r\n"
sm.sendNext(rewardString) |
<reponame>h-tab/nighthawk
"""@package integration_test_fixtures
Base classes for Nighthawk integration tests
"""
import json
import logging
import os
import requests
import socket
import subprocess
import sys
import threading
import time
import pytest
from test.integration.common import IpVersion, NighthawkException
from test.integration.nighthawk_test_server import NighthawkTestServer
from test.integration.nighthawk_grpc_service import NighthawkGrpcService
def determineIpVersionsFromEnvironment():
env_versions = os.environ.get("ENVOY_IP_TEST_VERSIONS", "all")
if env_versions == "v4only":
versions = [IpVersion.IPV4]
elif env_versions == "v6only":
versions = [IpVersion.IPV6]
elif env_versions == "all":
versions = [IpVersion.IPV4, IpVersion.IPV6]
else:
raise NighthawkException("Unknown ip version: '%s'" % versions)
return versions
class IntegrationTestBase():
"""
IntegrationTestBase facilitates testing against the Nighthawk test server, by determining a free port, and starting it up in a separate process in setUp().
"""
def __init__(self, ip_version):
super(IntegrationTestBase, self).__init__()
self.test_rundir = os.path.join(os.environ["TEST_SRCDIR"], os.environ["TEST_WORKSPACE"])
self.nighthawk_test_server_path = os.path.join(self.test_rundir, "nighthawk_test_server")
self.nighthawk_test_config_path = None
self.nighthawk_client_path = os.path.join(self.test_rundir, "nighthawk_client")
assert ip_version != IpVersion.UNKNOWN
self.server_ip = "::1" if ip_version == IpVersion.IPV6 else "127.0.0.1"
self.socket_type = socket.AF_INET6 if ip_version == IpVersion.IPV6 else socket.AF_INET
self.test_server = None
self.parameters = {}
self.ip_version = ip_version
self.grpc_service = None
# TODO(oschaaf): For the NH test server, add a way to let it determine a port by itself and pull that
# out.
def getFreeListenerPortForAddress(self, address):
"""
Determines a free port and returns that. Theoretically it is possible that another process
will steal the port before our caller is able to leverage it, but we take that chance.
The upside is that we can push the port upon the server we are about to start through configuration
which is compatible accross servers.
"""
with socket.socket(self.socket_type, socket.SOCK_STREAM) as sock:
sock.bind((address, 0))
port = sock.getsockname()[1]
return port
def setUp(self):
"""
Performs sanity checks and starts up the server. Upon exit the server is ready to accept connections.
"""
assert (os.path.exists(self.nighthawk_test_server_path))
assert (os.path.exists(self.nighthawk_client_path))
test_id = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0].replace(
"[", "_").replace("]", "")
self.parameters["test_id"] = test_id
self.test_server = NighthawkTestServer(self.nighthawk_test_server_path,
self.nighthawk_test_config_path, self.server_ip,
self.ip_version, self.parameters)
assert (self.test_server.start())
def tearDown(self):
"""
Stops the server.
"""
assert (self.test_server.stop() == 0)
if not self.grpc_service is None:
assert (self.grpc_service.stop() == 0)
def getNighthawkCounterMapFromJson(self, parsed_json):
"""
Utility method to get the counters from the json indexed by name.
"""
global_results_index = len(parsed_json["results"]) - 1
return {
counter["name"]: int(counter["value"])
for counter in parsed_json["results"][global_results_index]["counters"]
}
def getNighthawkGlobalHistogramsbyIdFromJson(self, parsed_json):
"""
Utility method to get the global histograms from the json indexed by id.
"""
return {statistic["id"]: statistic for statistic in parsed_json["results"][0]["statistics"]}
def getTestServerRootUri(self, https=False):
"""
Utility for getting the http://host:port/ that can be used to query the server we started in setUp()
"""
uri_host = self.server_ip
if self.ip_version == IpVersion.IPV6:
uri_host = "[%s]" % self.server_ip
uri = "%s://%s:%s/" % ("https" if https else "http", uri_host, self.test_server.server_port)
return uri
def getTestServerStatisticsJson(self):
"""
Utility to grab a statistics snapshot from the test server.
"""
return self.test_server.fetchJsonFromAdminInterface("/stats?format=json")
def getServerStatFromJson(self, server_stats_json, name):
counters = server_stats_json["stats"]
for counter in counters:
if counter["name"] == name:
return int(counter["value"])
return None
def runNighthawkClient(self, args, expect_failure=False, timeout=30, as_json=True):
"""
Runs Nighthawk against the test server, returning a json-formatted result
and logs. If the timeout is exceeded an exception will be raised.
"""
# Copy the args so our modifications to it stay local.
args = args.copy()
if self.ip_version == IpVersion.IPV6:
args.insert(0, "--address-family v6")
if as_json:
args.insert(0, "--output-format json")
args.insert(0, self.nighthawk_client_path)
logging.info("Nighthawk client popen() args: [%s]" % args)
client_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = client_process.communicate()
logs = stderr.decode('utf-8')
output = stdout.decode('utf-8')
logging.info("Nighthawk client output: [%s]" % output)
if as_json:
output = json.loads(output)
if expect_failure:
assert (client_process.returncode != 0)
else:
assert (client_process.returncode == 0)
return output, logs
def assertIsSubset(self, subset, superset):
self.assertLessEqual(subset.items(), superset.items())
def startNighthawkGrpcService(self):
host = self.server_ip if self.ip_version == IpVersion.IPV4 else "[%s]" % self.server_ip
self.grpc_service = NighthawkGrpcService(
os.path.join(self.test_rundir, "nighthawk_service"), host, self.ip_version)
assert (self.grpc_service.start())
class HttpIntegrationTestBase(IntegrationTestBase):
"""
Base for running plain http tests against the Nighthawk test server
"""
def __init__(self, ip_version):
super(HttpIntegrationTestBase, self).__init__(ip_version)
self.nighthawk_test_config_path = os.path.join(
self.test_rundir, "test/integration/configurations/nighthawk_http_origin.yaml")
def getTestServerRootUri(self):
return super(HttpIntegrationTestBase, self).getTestServerRootUri(False)
class HttpsIntegrationTestBase(IntegrationTestBase):
"""
Base for https tests against the Nighthawk test server
"""
def __init__(self, ip_version):
super(HttpsIntegrationTestBase, self).__init__(ip_version)
self.parameters["ssl_key_path"] = os.path.join(
self.test_rundir, "external/envoy/test/config/integration/certs/serverkey.pem")
self.parameters["ssl_cert_path"] = os.path.join(
self.test_rundir, "external/envoy/test/config/integration/certs/servercert.pem")
self.nighthawk_test_config_path = os.path.join(
self.test_rundir, "test/integration/configurations/nighthawk_https_origin.yaml")
def getTestServerRootUri(self):
return super(HttpsIntegrationTestBase, self).getTestServerRootUri(True)
@pytest.fixture(params=determineIpVersionsFromEnvironment())
def http_test_server_fixture(request):
f = HttpIntegrationTestBase(request.param)
f.setUp()
yield f
f.tearDown()
@pytest.fixture(params=determineIpVersionsFromEnvironment())
def https_test_server_fixture(request):
f = HttpsIntegrationTestBase(request.param)
f.setUp()
yield f
f.tearDown()
|
<filename>workflow/workflow.py
from os import path
import time
import logging
import argparse
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.feature import RegexTokenizer, Word2Vec
from transformers import StringConcatenator
def main(args):
logging.basicConfig(level=logging.INFO, datefmt="%Y/%m/%d %H:%M:%S")
formatter = logging.Formatter('%(asctime)-15s %(name)s [%(levelname)s] '
'%(message)s')
logger = logging.getLogger('WorkflowLogger')
if args.logpath:
logname = time.ctime()+".log"
logpath = path.join(args.logpath, logname)
fh = logging.FileHandler(logpath)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('Beginning workflow')
conf = SparkConf()
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
########################
# READING RAW FILES
logger.info('Creating base df.')
df = spark.read.json(args.input)
df = df.dropDuplicates(['doi'])
if args.sample:
df = df.sample(False, args.sample, 42)
if args.debug:
df.printSchema()
df.explain(True)
# logger.info('Base df created, papers in sample: %d' % df.count())
#########################
# SPLITTING DF INTO METADATA AND FULLTEXT
fulltexts = df.select('doi', 'abstract', 'fulltext')
#########################
# DEFINING TRANSFORMERS and ESTIMATORS
logger.info('Initializing feature pipeline.')
stringconcat = StringConcatenator(inputCols=["abstract", "fulltext"],
outputCol="texts")
tokenizer = RegexTokenizer(inputCol="texts",
outputCol="words", pattern="\\W")
word2Vec = Word2Vec(vectorSize=500, minCount=20,
maxIter=args.maxIter, numPartitions=args.numPartitions,
windowSize=args.windowSize,
stepSize=args.stepSize,
inputCol="words", outputCol="w2v")
w2vpipeline = Pipeline(stages=[stringconcat,
tokenizer,
word2Vec])
logger.info('Fitting feature pipeline.')
w2vpipeline_model = w2vpipeline.fit(fulltexts)
w2vmodel = w2vpipeline_model.stages[-1]
vectors = w2vmodel.getVectors()
vectors.write.save(args.output+"df.parquet")
logger.info('Saving model.')
logger.info('Ending workflow, shutting down.')
sc.stop()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='do stuff')
parser.add_argument('--input', dest='input', help='relative or absolute '
'path of the input folder')
parser.add_argument('--output', dest='output', help='relative or absolute '
'path of the output folder')
parser.add_argument('--sample', dest='sample', help='fraction of dataset '
'to sample, e.g. 0.01', type=float)
parser.add_argument('--maxIter', dest='maxIter', help='number of '
'maximum iterations, default 1', type=int, default=1)
parser.add_argument('--numPartitions', dest='numPartitions',
help='number of partitions in word2vec step, '
'default 1', type=int, default=1)
parser.add_argument('--windowSize', dest='windowSize',
help='size of window for surrounding words, '
'default 5', type=int, default=5)
parser.add_argument('--stepSize', dest='stepSize',
help='Step size to be used for each iteration of '
'optimization (>= 0), default 0.025',
type=float, default=0.025)
parser.add_argument('--logpath', dest='logpath', help='relative or '
'absolute path of the logfile')
args = parser.parse_args()
main(args)
|
<reponame>apuc/django-rest-framework<filename>userapi/settings.py<gh_stars>0
from datetime import timedelta
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third-party
'crispy_forms',
'rest_framework',
# Local
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'userapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'userapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = 'user:login'
# Crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Authenticate settings
LOGIN_URL = 'user:login'
LOGIN_REDIRECT_URL = 'user:home'
LOGOUT_REDIRECT_URL = 'user:home'
REGISTER_URL = 'user:register'
MIN_PASSWORD_LENGTH = 8
# Media places
MEDIA_URL = '/uploads/'
UPLOAD_ROOT = os.path.join(BASE_DIR, 'uploads')
# REST configuration
REST_FRAMEWORK = {
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
REST_USE_JWT = True
SIMPLE_JWT = {
'ALGORITHM': 'HS256',
'SIGNING_KEY': '<KEY>',
'AUTH_HEADER_TYPES': ('Bearer',),
'REFRESH_TOKEN_LIFETIME': timedelta(hours=1),
}
AUTH_USER_MODEL = 'user.UserProfile'
|
<filename>src/cipolla/game/room/room_map_mode_state.py
from twisted.internet import defer # type: ignore
from twisted.internet.defer import Deferred # type: ignore
from cube2common.constants import INTERMISSIONLEN # type: ignore
from cipolla.game.client.exceptions import GenericError
from cipolla.game.map.map_rotation import MapRotation
from cipolla.protocol import swh
from cipolla.game.map.async_map_meta_data_accessor import AsyncMapMetaDataAccessor
from cipolla.game.timing.game_clock import GameClock
# TODO: fix these
from cipolla.game.gamemode.insta import Insta # type: ignore
from cipolla.game.gamemode.gamemodes import gamemodes # type: ignore
from typing import Iterator, Optional, List, Dict
class RoomMapModeState(object):
def __init__(self, room, map_rotation: Optional[MapRotation] = None, map_meta_data_accessor: Optional[AsyncMapMetaDataAccessor] = None, game_clock: Optional[GameClock] = None) -> None:
from cipolla.game.room.room import Room
self.room: Room = room
self._map_name = ""
self._gamemode = None
self._map_meta_data_accessor = map_meta_data_accessor
self._map_rotation = map_rotation
self._game_clock = game_clock
self._initialized = False
self._initializing = False
self._initializing_deferreds: List = []
@property
def initialized(self) -> bool:
return self._initialized
@property
def map_name(self) -> str:
assert self._map_rotation is not None
if self.gamemode is None:
map_name, _ = self._map_rotation.next_map_mode(peek=True)
return map_name
return self._map_name
@property
def gamemode(self): # return basemode
return self._gamemode
@property
def mode_num(self) -> int:
assert self._map_rotation is not None
if self.gamemode is None:
_, mode_name = self._map_rotation.next_map_mode(peek=True)
return gamemodes[mode_name].clientmodenum
return self.gamemode.clientmodenum
@property
def mode_name(self):
assert self._map_rotation is not None
if self.gamemode is None:
_, mode_name = self._map_rotation.next_map_mode(peek=True)
return gamemodes[mode_name].clientmodename
return self.gamemode.clientmodename
def get_map_names(self):
return self._map_meta_data_accessor.get_map_names()
@property
def rotate_on_first_player(self) -> bool:
assert self._map_rotation is not None
return self._map_rotation.rotate_on_first_player
@defer.inlineCallbacks
def await_map_mode_initialized(self, player_count: int) -> Iterator[Deferred]:
if self._initializing:
deferred = defer.Deferred()
self._initializing_deferreds.append(deferred)
yield deferred
else:
if not self.initialized or (self.rotate_on_first_player and player_count == 0):
self._initializing = True
map_meta_data = yield self.rotate_map_mode()
self._initializing = False
while len(self._initializing_deferreds):
deferred = self._initializing_deferreds.pop()
deferred.callback(map_meta_data)
defer.returnValue(map_meta_data)
def rotate_map_mode(self) -> Deferred:
assert self._map_rotation is not None
map_name, mode_name = self._map_rotation.next_map_mode(peek=False)
return self.change_map_mode(map_name, mode_name)
@defer.inlineCallbacks
def change_map_mode(self, map_name: str, mode_name: str) -> Iterator[Deferred]:
assert self._map_meta_data_accessor is not None
if mode_name not in gamemodes:
raise GenericError("Unsupported game mode.")
self._map_name = map_name
map_meta_data = yield self._map_meta_data_accessor.get_map_data(self._map_name)
self._gamemode = gamemodes[mode_name](room=self.room, map_meta_data=map_meta_data)
self._initialized = True
self._new_map_mode_initialize()
defer.returnValue(map_meta_data)
def _new_map_mode_initialize(self) -> None:
assert self._game_clock is not None
with self.room.broadcastbuffer(1, True) as cds:
swh.put_mapchange(cds, self.map_name, self.gamemode.clientmodenum, hasitems=False)
if self.gamemode.timed:
self._game_clock.start(self.gamemode.timeout, INTERMISSIONLEN)
else:
self._game_clock.start_untimed()
for player in self.room.players:
self.gamemode.initialize_player(cds, player)
self.room.resume()
# TODO: remove deferred from map initialization
# and avoid aving room pause/resume continuosly
for player in self.room.players:
player.state.map_change_reset()
player.state.respawn()
player._team = ""
self.gamemode.spawn_loadout(player)
if self.room.is_teammode:
self.init_teams()
for client in self.room.clients:
with client.sendbuffer(1, True) as cds:
if self.gamemode.timed and self.room.timeleft is not None:
swh.put_timeup(cds, self.room.timeleft)
if self.room.is_paused:
swh.put_pausegame(cds, 1)
for player in client.player_iter():
if not player.state.is_spectator:
swh.put_spawnstate(cds, player)
def init_teams(self) -> None:
from random import randint
from itertools import chain
from math import floor, ceil
allplayers = list(self.room.players)
nplayers = len(allplayers)
decreasing_list = list(range(nplayers))
to_int = (ceil, floor)[randint(0, 1)]
for i in range(to_int(nplayers/2)):
idx = randint(0, len(decreasing_list)-1)
choice = decreasing_list.pop(idx)
allplayers[choice]._team = 'evil'
# rest
for i in decreasing_list:
allplayers[i]._team = 'good'
for player in self.room.players:
self.room.gamemode.on_player_try_set_team(player, player, player._team, player._team)
|
<filename>citysimtoenergyplus.py<gh_stars>1-10
'''
citysimtoenergyplus.py
Extract an EnergyPlus model (IDF) from a CitySim scene.
A template for the HVAC is provided, so this module is just
concerned with geometry, materials and construction.
'''
import numpy as np
from . import polygons
reload(polygons)
def extractidf(citysim, building, template):
'''
this is the main entry point to the module.
citysim: a lxml.etree.ElementTree containing the CitySim scene.
building: a string containing the building/@Name|id to extract.
first, the @Name is checked and if not found, fall back to @id.
template: a eppy.IDF object representing the template to add the geometry
to. the template may contain HVAC etc. extractidf only knows a
single zone called "SINGLE_ZONE".
'''
building_xml = find_building(building, citysim)
idf = idf_from_template(template)
constructions = add_constructions(citysim, building_xml, idf)
add_zones(building_xml, idf)
add_floors(building_xml, idf, constructions)
add_walls(building_xml, idf, constructions)
add_roofs(building_xml, idf, constructions)
add_windows(building_xml, idf)
add_shading(citysim, building_xml, idf)
return idf
def add_zones(building_xml, idf):
'''currently only single-zone models are allowed. the zone name from
the idf is used. if any zones are defined in the template,
an error is raised.'''
zones = building_xml.findall('Zone')
assert len(zones) == 1, 'Exactly one zone required'
assert len(idf.idfobjects['ZONE']) == 0, \
'No zone definitions in template allowed'
zone = idf.newidfobject('ZONE')
zone_xml = building_xml.find('Zone')
zone.Name = 'Zone%s' % zone_xml.get('id')
zone.Direction_of_Relative_North = 0
zone.X_Origin = 0
zone.Y_Origin = 0
zone.Z_Origin = 0
def add_shading(citysim, building_xml, idf):
shading_buildings = [s for s in citysim.findall('/*/Building')
if not s.get('id') == building_xml.get('id')]
for building in shading_buildings:
for surface_xml in building.findall('Zone/Wall'):
vertices = [v for v in surface_xml.getchildren()
if v.tag.startswith('V')]
npvertices = [np.array((float(v.get('x')),
float(v.get('y')),
float(v.get('z'))))
for v in vertices]
if np.isnan(polygons.np_poly_area(npvertices)):
print 'not exporting', surface_xml.get('id')
continue # don't export bad shading...
shading = idf.newidfobject('SHADING:BUILDING:DETAILED')
shading.Name = 'ShadingB%sW%s' % (building.get('id'),
surface_xml.get('id'))
shading.Number_of_Vertices = len(vertices)
for v in vertices:
shading.obj.append(v.get('x'))
shading.obj.append(v.get('y'))
shading.obj.append(v.get('z'))
# JK - adds the Roofs as shading for all buildings including the co-simulated one
shading_buildings = [s for s in citysim.findall('/*/Building')]
for building in shading_buildings:
for surface_xml in building.findall('Zone/Roof'):
vertices = [v for v in surface_xml.getchildren()
if v.tag.startswith('V')]
npvertices = [np.array((float(v.get('x')),
float(v.get('y')),
float(v.get('z'))))
for v in vertices]
if np.isnan(polygons.np_poly_area(npvertices)):
print 'not exporting', surface_xml.get('id')
continue # don't export bad shading...
shading = idf.newidfobject('SHADING:BUILDING:DETAILED')
shading.Name = 'ShadingB%sR%s' % (building.get('id'),
surface_xml.get('id'))
shading.Number_of_Vertices = len(vertices)
for v in vertices:
shading.obj.append(v.get('x'))
shading.obj.append(v.get('y'))
shading.obj.append(v.get('z'))
def add_floors(building_xml, idf, constructions):
for floor_xml in building_xml.findall('Zone/Floor'):
floor_idf = idf.newidfobject('FLOOR:DETAILED')
floor_idf.Name = 'Floor%s' % floor_xml.get('id')
floor_idf.Construction_Name = constructions[floor_xml.get('type')]
floor_idf.Zone_Name = 'Zone%s' % floor_xml.getparent().get('id')
floor_idf.Outside_Boundary_Condition = 'Ground'
floor_idf.Outside_Boundary_Condition_Object = ''
floor_idf.Sun_Exposure = 'NoSun'
floor_idf.Wind_Exposure = 'NoWind'
floor_idf.View_Factor_to_Ground = 'autocalculate'
vertices = [v for v in floor_xml.getchildren()
if v.tag.startswith('V')]
floor_idf.Number_of_Vertices = len(vertices)
for v in vertices:
floor_idf.obj.append(v.get('x'))
floor_idf.obj.append(v.get('y'))
floor_idf.obj.append(v.get('z'))
def add_roofs(building_xml, idf, constructions):
for roof_xml in building_xml.findall('Zone/Roof'):
roof_idf = idf.newidfobject('ROOFCEILING:DETAILED')
roof_idf.Name = 'Roof%s' % roof_xml.get('id')
roof_idf.Construction_Name = constructions[roof_xml.get('type')]
roof_idf.Zone_Name = 'Zone%s' % roof_xml.getparent().get('id')
roof_idf.Outside_Boundary_Condition = 'Outdoors'
roof_idf.Outside_Boundary_Condition_Object = ''
roof_idf.Sun_Exposure = 'SunExposed'
roof_idf.Wind_Exposure = 'WindExposed'
roof_idf.View_Factor_to_Ground = 'autocalculate'
vertices = [v for v in roof_xml.getchildren() if v.tag.startswith('V')]
roof_idf.Number_of_Vertices = len(vertices)
for v in vertices:
roof_idf.obj.append(v.get('x'))
roof_idf.obj.append(v.get('y'))
roof_idf.obj.append(v.get('z'))
def add_walls(building_xml, idf, constructions):
for wall_xml in building_xml.findall('Zone/Wall'):
wall_idf = idf.newidfobject('WALL:DETAILED')
wall_idf.Name = 'Wall%s' % wall_xml.get('id')
wall_idf.Construction_Name = constructions[wall_xml.get('type')]
wall_idf.Zone_Name = 'Zone%s' % wall_xml.getparent().get('id')
wall_idf.Outside_Boundary_Condition = 'Outdoors'
wall_idf.Outside_Boundary_Condition_Object = ''
wall_idf.Sun_Exposure = 'SunExposed'
wall_idf.Wind_Exposure = 'WindExposed'
wall_idf.View_Factor_to_Ground = 'autocalculate'
vertices = [v for v in wall_xml.getchildren() if v.tag.startswith('V')]
wall_idf.Number_of_Vertices = len(vertices)
for v in vertices:
wall_idf.obj.append(v.get('x'))
wall_idf.obj.append(v.get('y'))
wall_idf.obj.append(v.get('z'))
def add_windows(building_xml, idf):
for wall_xml in building_xml.findall('Zone/Wall'):
uvalue = float(wall_xml.get('GlazingUValue', default=0))
gvalue = float(wall_xml.get('GlazingGValue', default=0))
ratio = float(wall_xml.get('GlazingRatio', default=0))
wallid = 'Wall%s' % wall_xml.get('id')
windowid = 'Window%s' % wall_xml.get('id')
if ratio > 0:
construction_name = 'WindowConstructionU%.2fG%.2f' % (
uvalue, gvalue)
material_name = 'WindowMaterialU%.2fG%.2f' % (uvalue, gvalue)
construction = idf.getobject('CONSTRUCTION', construction_name)
if not construction:
construction = idf.newidfobject('CONSTRUCTION')
construction.Name = construction_name
construction.obj.append(material_name)
material = idf.getobject('WINDOWMATERIAL:SIMPLEGLAZINGSYSTEM',
material_name)
if not material:
material = idf.newidfobject(
'WINDOWMATERIAL:SIMPLEGLAZINGSYSTEM')
material.Name = material_name
material.UFactor = uvalue
material.Solar_Heat_Gain_Coefficient = gvalue
wall = idf.getobject('WALL:DETAILED', wallid)
wall_vertices = [float(w)
for w in wall.obj[wall.Number_of_Vertices * -3:]]
wall_polygon = [np.array(p) for p in zip(
wall_vertices[::3],
wall_vertices[1::3],
wall_vertices[2::3])]
if len(wall_polygon) > 4:
raise Exception(
"Can't add windows to wall (too many vertices): %s"
% wall.Name)
window_polygon = polygons.get_vertices_by_area_ratio(
wall_polygon, ratio)
assert window_polygon, 'Could not calculate window vertices'
for i,vertex in enumerate(window_polygon):
print 'i: ', i, '\tvertex: ', vertex
window = idf.newidfobject('FENESTRATIONSURFACE:DETAILED', windowid + "_1")
window.Surface_Type = 'Window'
window.Construction_Name = construction.Name
window.Building_Surface_Name = wallid
window.Number_of_Vertices = 3
for i,vertex in enumerate(window_polygon):
if i < 3:
window.obj.extend(vertex)
window = idf.newidfobject('FENESTRATIONSURFACE:DETAILED', windowid + "_2")
window.Surface_Type = 'Window'
window.Construction_Name = construction.Name
window.Building_Surface_Name = wallid
window.Number_of_Vertices = 3
for i,vertex in enumerate(window_polygon):
if i == 0:
window.obj.extend(vertex)
if i > 1:
window.obj.extend(vertex)
print 'add_windows', len(window_polygon), len(wall_polygon)
def add_constructions(citysim, building_xml, idf):
'''
go through each wall, floor and roof in the building and create
a CONSTRUCTION object for each WallType referenced.
'''
surfaces = [e for e in building_xml.findall('Zone/*')
if e.tag in ('Wall', 'Roof', 'Floor')]
constructions = {}
for surface in surfaces:
id = surface.get('type')
if id not in constructions:
construction_xml = citysim.find('//WallType[@id="%s"]' % id)
if construction_xml is None:
raise Exception('could not find //WallType[@id="%s"]' % id)
construction_idf = idf.newidfobject(
'CONSTRUCTION',
construction_xml.get(
'name',
default='WallType%s' %
id))
for mnr, layer in enumerate(construction_xml.findall('Layer')):
material_idf = idf.newidfobject(
'MATERIAL', '%s_M%i' %
(construction_idf.Name, mnr))
material_idf.Thickness = float(layer.get('Thickness'))
material_idf.Roughness = 'MediumSmooth'
material_idf.Conductivity = float(layer.get('Conductivity'))
material_idf.Density = float(layer.get('Density'))
material_idf.Specific_Heat = float(layer.get('Cp'))
construction_idf.obj.append(material_idf.Name)
constructions[id] = construction_idf.Name
return constructions
def find_building(building, citysim):
building_xml = find_building_by_name(building, citysim)
if building_xml is not None:
return building_xml
else:
return find_building_by_id(building, citysim)
def find_building_by_name(building, citysim):
return citysim.find('/*/Building[@Name="%s"]' % building)
def find_building_by_id(building, citysim):
return citysim.find('/*/Building[@id="%s"]' % building)
def idf_from_template(template):
'''
cloning a whole IDF file is not as easy as I thought it
would be. But we can copy each object...
'''
from eppy import modeleditor
idf = modeleditor.IDF()
idf.initnew()
for key in template.idfobjects.keys():
for value in template.idfobjects[key]:
idf.copyidfobject(value)
return idf
|
"""
Utilities to manage different data input files.
"""
import logging
from collections import namedtuple
from xml.etree.ElementTree import ElementTree
from tqdm import tqdm
from .schema import MentionInstance
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Provenance(namedtuple("Provenance", ["doc_id", "start", "end"])):
@classmethod
def from_str(cls, str_):
doc_id, start_end = str_.split(':', 1)
start, end = map(int, start_end.split('-', 1))
return cls(doc_id, start, end)
def __str__(self):
return "{}:{}-{}".format(self.doc_id, self.start, self.end)
def test_provenance_from_str():
str_ = "ENG_NW_001278_20130214_F00011JDX:1448-1584"
p = Provenance.from_str(str_)
assert p.doc_id == "ENG_NW_001278_20130214_F00011JDX"
assert p.start == 1448
assert p.end == 1584
def test_provenance_cmp():
p1 = Provenance("A", 10, 20)
p2 = Provenance("A", 10, 20)
p3 = Provenance("A", 15, 24)
p4 = Provenance("B", 10, 20)
assert p1 == p2
assert p1 <= p2
assert p1 < p3
assert p3 > p1
assert p1 < p4
class EvaluationEntry(namedtuple("EvaluationEntry", ["id", "query_id", 'ldc_id', "relation", "relation_provenances", "slot_value", "slot_provenances", "slot_value_label", "relation_label", "eq_class", "eq"])):
def __str__(self):
return "{}: {} {} {}".format(self.id, self.query_id, self.relation, self.slot_value)
def __repr__(self):
return "<Entry: {}>".format(str(self))
def is_true(self):
return self.eq > 0
@classmethod
def from_line(cls, line):
parts = line.split("\t")
id_ = parts[0]
query_id, relation = parts[1].split(':', 1)
ldc_id = None
relation_provenances = [Provenance.from_str(s) for s in parts[2].split(',')]
slot_value = parts[3]
slot_provenances = [Provenance.from_str(s) for s in parts[4].split(',')]
slot_value_label = parts[5]
relation_label = parts[6]
eq_class = parts[7]
eq = int(parts[7].split(":")[-1])
return cls(id_, query_id, ldc_id, relation, relation_provenances, slot_value, slot_provenances, slot_value_label, relation_label, eq_class, eq)
def to_list(self):
return [
self.query_id,
self.relation,
self.slot_value,
self.slot_value_label,
self.relation_label,
self.eq_class,]
def test_evaluation_entry():
line = "CS15_ENG_0016_0_001 CS15_ENG_0016:gpe:births_in_country ENG_NW_001278_20130214_F00011JDX:1448-1584 Agriculture ENG_NW_001278_20130214_F00011JDX:1505-1515 W W 0"
entry = EvaluationEntry.from_line(line)
assert entry.id == "CS15_ENG_0016_0_001"
assert entry.query_id == "CS15_ENG_0016"
assert entry.relation == "gpe:births_in_country"
assert entry.relation_provenances == [Provenance("ENG_NW_001278_20130214_F00011JDX", 1448, 1584)]
assert entry.slot_value == "Agriculture"
assert entry.slot_provenances == [Provenance("ENG_NW_001278_20130214_F00011JDX", 1505, 1515)]
assert entry.slot_value_label == "W"
assert entry.relation_label == "W"
assert entry.eq_class == "0"
class OutputEntry(namedtuple("EvaluationEntry", ["query_id", "ldc_id", "relation", "run_id", "relation_provenances", "slot_value", "slot_type", "slot_provenances", "confidence"])):
@classmethod
def from_line(cls, line):
parts = line.split("\t")
query_id = parts[0]
ldc_id = None
relation = parts[1]
run_id = parts[2]
relation_provenances = [Provenance.from_str(s) for s in parts[3].split(',')]
slot_value = parts[4]
slot_type = parts[5]
slot_provenances = [Provenance.from_str(s) for s in parts[6].split(',')]
confidence = float(parts[7])
return cls(query_id, ldc_id, relation, run_id, relation_provenances, slot_value, slot_type, slot_provenances, confidence)
def test_output_entry():
line = "CSSF15_ENG_001e2aa16f gpe:births_in_city KB_BBN1 NYT_ENG_20130513.0090:2476-2540 <NAME> PER NYT_ENG_20130513.0090:2476-2500 0.9"
entry = OutputEntry.from_line(line)
assert entry.query_id == "CSSF15_ENG_001e2aa16f"
assert entry.relation == "gpe:births_in_city"
assert entry.run_id == "KB_BBN1"
assert entry.relation_provenances == [Provenance("NYT_ENG_20130513.0090",2476,2540)]
assert entry.slot_value == "<NAME>"
assert entry.slot_type == "PER"
assert entry.slot_provenances == [Provenance("NYT_ENG_20130513.0090", 2476,2500)]
assert entry.confidence == 0.9
def load_queries(fstream):
Q = {}
for line in fstream:
fields = line.split()
# NOTE: We are considering partial assessments because that's
# what KBP is doing too.
ldc_query, cssf_query = fields[:2]
Q[cssf_query] = ldc_query
return Q
def load_gold(fstream, Q):
gold = []
for line in tqdm(fstream):
entry = EvaluationEntry.from_line(line)
if entry.query_id in Q:
gold.append(entry._replace(ldc_id=Q[entry.query_id]))
logger.info("Loaded %d evaluation entries", len(gold))
return gold
def load_gold_2016(fstream, Q):
gold = []
for line in tqdm(fstream):
parts = line.split("\t")
line = "\t".join(parts[0:6] + parts[7:9])
entry = EvaluationEntry.from_line(line)
if entry.query_id in Q:
gold.append(entry._replace(ldc_id=Q[entry.query_id]))
logger.info("Loaded %d evaluation entries", len(gold))
return gold
def load_output(fstream, Q):
output = []
for line in tqdm(fstream, desc='Loading entries'):
entry = OutputEntry.from_line(line)
if entry.query_id in Q:
output.append(entry._replace(ldc_id=Q[entry.query_id]))
#logger.info("Loaded %d output entries.", len(output))
return output
QueryEntry = namedtuple("QueryEntry", ["id", "gloss", "mention_type", "prov", "slot"])
def load_query_entries(f):
tree = ElementTree()
tree.parse(f)
E = []
for query in tree.getroot().getchildren():
query_id = query.get('id')
gloss = query.find("name").text
mention_type = query.find("enttype").text.upper()
docid = query.find("docid").text
beg, end = int(query.find("beg").text), int(query.find("end").text)+1
prov = Provenance(docid, beg, end)
slot = query.find("slot0").text
E.append(QueryEntry(query_id, gloss, mention_type, prov, slot))
return E
|
<filename>scripts/crawling.py
# Authors: <NAME> <<EMAIL>>
import argparse
import sys
import logging
import os
from geol.utils import utils
from geol.geometry.squaregrid import SquareGrid
from geol.crawler import foursquare_crawler
os.environ['NO_PROXY'] = "nominatim.openstreetmap.org"
logger = logging.getLogger(__name__)
def write_grid(output, size, window_size, crs, area_name, base_shape):
# Create the tessellation and save into the outputfolder.
try:
grid = None
if base_shape is not None:
grid = SquareGrid.from_file(base_shape, meters=size, window_size=window_size, grid_crs=crs)
else:
grid = SquareGrid.from_name(area_name, meters=size, window_size=window_size, grid_crs=crs)
except:
logger.error("Error in creating tessellation " + output, exc_info=True)
sys.exit(0)
return grid
def main(argv):
parser = argparse.ArgumentParser('Foursquare crawler.')
parser.add_argument('-o', '--outputfolder',
help='Output folder where to save the grids.',
action='store',
dest='outputfolder',
required='True',
type=str)
parser.add_argument('-k', '--keys',
help='File (Json) with Foursquare Keys.',
action='store',
dest='keys',
required='True',
type=str)
parser.add_argument('-p', '--prefix',
action='store',
dest='prefix',
help='Prefix for the filename. By the default is <prefix>_<grid_type>_<cell_size>, by default is grid.',
default='grid',
type=str)
parser.add_argument('-a', '--area',
action='store',
dest='area_name',
help='Area name in the format of "Area name, Country"',
default=None,
type=str)
parser.add_argument('-ws', '--window_size',
help='Size of the window around the shape centroid.',
action='store',
dest='window_size',
default=None,
type=int)
parser.add_argument('-r', '--restart',
help='Restarting point.',
action='store',
dest='restart',
default=None,
type=int)
parser.add_argument('-b', '--base_shape', action='store',
help='Path to the shape file used as a base to build the grid over.',
dest='base_shape',
default=None,
type=str)
parser.add_argument('-s', '--size',
help='Cell size, default = 50.',
dest='size',
default=50,
type=int)
parser.add_argument('-an', '--accountNumber',
action='store',
dest='account_number',
help='Foursquare account number',
default="1",
type=str)
parser.add_argument('-v', '--verbose',
help='Level of output verbosity.',
action='store',
dest='verbosity',
default=0,
type=int,
nargs="?")
args = parser.parse_args()
# Get foursquare key from key's file.
foursquare_keys = utils.read_foursqaure_keys(args.keys)
outputfile = os.path.abspath(os.path.join(
args.outputfolder, args.prefix + "_foursquare_pois.csv"))
if (args.verbosity == 1):
logging.basicConfig(
format='[ %(levelname)s: %(message)s ]', level=logging.INFO)
elif (args.verbosity == 2):
logging.basicConfig(
format='[ %(levelname)s: %(message)s ]', level=logging.DEBUG)
logger.info("Outputfile: " + outputfile)
# Crete the tessellation if not passed in input. By default we use a square tessellation.
grid = write_grid(outputfile, args.size, args.window_size,
"epsg:4326", args.area_name, args.base_shape)
logger.info("Loading Foursquare credentials")
client_id = foursquare_keys[args.account_number]['CLIENT_ID']
client_secret = foursquare_keys[args.account_number]['FOURSQUARE_API_TOKEN']
logger.info("Starting the crawler")
c = foursquare_crawler.Foursquare(
client_id=client_id, client_secret=client_secret)
c.start(grid.grid, outputfile, restart=args.restart)
if __name__ == "__main__":
main(sys.argv[1:])
|
"""
Misc utility functions and constants
"""
import functools
import inspect
import os
import warnings
from textwrap import dedent
import six
from hvac import exceptions
def raise_for_error(status_code, message=None, errors=None):
"""Helper method to raise exceptions based on the status code of a response received back from Vault.
:param status_code: Status code received in a response from Vault.
:type status_code: int
:param message: Optional message to include in a resulting exception.
:type message: str
:param errors: Optional errors to include in a resulting exception.
:type errors: list | str
:raises: hvac.exceptions.InvalidRequest | hvac.exceptions.Unauthorized | hvac.exceptions.Forbidden |
hvac.exceptions.InvalidPath | hvac.exceptions.RateLimitExceeded | hvac.exceptions.InternalServerError |
hvac.exceptions.VaultNotInitialized | hvac.exceptions.VaultDown | hvac.exceptions.UnexpectedError
"""
if status_code == 400:
raise exceptions.InvalidRequest(message, errors=errors)
elif status_code == 401:
raise exceptions.Unauthorized(message, errors=errors)
elif status_code == 403:
raise exceptions.Forbidden(message, errors=errors)
elif status_code == 404:
raise exceptions.InvalidPath(message, errors=errors)
elif status_code == 429:
raise exceptions.RateLimitExceeded(message, errors=errors)
elif status_code == 500:
raise exceptions.InternalServerError(message, errors=errors)
elif status_code == 501:
raise exceptions.VaultNotInitialized(message, errors=errors)
elif status_code == 502:
raise exceptions.BadGateway(message, errors=errors)
elif status_code == 503:
raise exceptions.VaultDown(message, errors=errors)
else:
raise exceptions.UnexpectedError(message or errors)
def generate_method_deprecation_message(to_be_removed_in_version, old_method_name, method_name=None, module_name=None):
"""Generate a message to be used when warning about the use of deprecated methods.
:param to_be_removed_in_version: Version of this module the deprecated method will be removed in.
:type to_be_removed_in_version: str
:param old_method_name: Deprecated method name.
:type old_method_name: str
:param method_name: Method intended to replace the deprecated method indicated. This method's docstrings are
included in the decorated method's docstring.
:type method_name: str
:param module_name: Name of the module containing the new method to use.
:type module_name: str
:return: Full deprecation warning message for the indicated method.
:rtype: str
"""
message = "Call to deprecated function '{old_method_name}'. This method will be removed in version '{version}'".format(
old_method_name=old_method_name,
version=to_be_removed_in_version,
)
if method_name is not None and module_name is not None:
message += " Please use the '{method_name}' method on the '{module_name}' class moving forward.".format(
method_name=method_name,
module_name=module_name,
)
return message
def generate_property_deprecation_message(to_be_removed_in_version, old_name, new_name, new_attribute,
module_name='Client'):
"""Generate a message to be used when warning about the use of deprecated properties.
:param to_be_removed_in_version: Version of this module the deprecated property will be removed in.
:type to_be_removed_in_version: str
:param old_name: Deprecated property name.
:type old_name: str
:param new_name: Name of the new property name to use.
:type new_name: str
:param new_attribute: The new attribute where the new property can be found.
:type new_attribute: str
:param module_name: Name of the module containing the new method to use.
:type module_name: str
:return: Full deprecation warning message for the indicated property.
:rtype: str
"""
message = "Call to deprecated property '{name}'. This property will be removed in version '{version}'".format(
name=old_name,
version=to_be_removed_in_version,
)
message += " Please use the '{new_name}' property on the '{module_name}.{new_attribute}' attribute moving forward.".format(
new_name=new_name,
module_name=module_name,
new_attribute=new_attribute,
)
return message
def getattr_with_deprecated_properties(obj, item, deprecated_properties):
"""Helper method to use in the getattr method of a class with deprecated properties.
:param obj: Instance of the Class containing the deprecated properties in question.
:type obj: object
:param item: Name of the attribute being requested.
:type item: str
:param deprecated_properties: List of deprecated properties. Each item in the list is a dict with at least a
"to_be_removed_in_version" and "client_property" key to be used in the displayed deprecation warning.
:type deprecated_properties: List[dict]
:return: The new property indicated where available.
:rtype: object
"""
if item in deprecated_properties:
deprecation_message = generate_property_deprecation_message(
to_be_removed_in_version=deprecated_properties[item]['to_be_removed_in_version'],
old_name=item,
new_name=deprecated_properties[item].get('new_property', item),
new_attribute=deprecated_properties[item]['client_property'],
)
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
message=deprecation_message,
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter('default', DeprecationWarning)
client_property = getattr(obj, deprecated_properties[item]['client_property'])
return getattr(client_property, deprecated_properties[item].get('new_property', item))
raise AttributeError("'{class_name}' has no attribute '{item}'".format(
class_name=obj.__class__.__name__,
item=item,
))
def deprecated_method(to_be_removed_in_version, new_method=None):
"""This is a decorator which can be used to mark methods as deprecated. It will result in a warning being emitted
when the function is used.
:param to_be_removed_in_version: Version of this module the decorated method will be removed in.
:type to_be_removed_in_version: str
:param new_method: Method intended to replace the decorated method. This method's docstrings are included in the
decorated method's docstring.
:type new_method: function
:return: Wrapped function that includes a deprecation warning and update docstrings from the replacement method.
:rtype: types.FunctionType
"""
def decorator(method):
deprecation_message = generate_method_deprecation_message(
to_be_removed_in_version=to_be_removed_in_version,
old_method_name=method.__name__,
method_name=new_method.__name__,
module_name=inspect.getmodule(new_method).__name__,
)
@functools.wraps(method)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
message=deprecation_message,
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return method(*args, **kwargs)
if new_method:
new_func.__doc__ = """\
{message}
Docstring content from this method's replacement copied below:
{new_docstring}
""".format(
message=deprecation_message,
new_docstring=dedent(new_method.__doc__),
)
else:
new_func.__doc__ = deprecation_message
return new_func
return decorator
def validate_list_of_strings_param(param_name, param_argument):
"""Validate that an argument is a list of strings.
:param param_name: The name of the parameter being validated. Used in any resulting exception messages.
:type param_name: str | unicode
:param param_argument: The argument to validate.
:type param_argument: list
:return: True if the argument is validated, False otherwise.
:rtype: bool
"""
if param_argument is None:
param_argument = []
if isinstance(param_argument, str):
param_argument = param_argument.split(',')
if not isinstance(param_argument, list) or not all([isinstance(p, str) for p in param_argument]):
error_msg = 'unsupported {param} argument provided "{arg}" ({arg_type}), required type: List[str]'
raise exceptions.ParamValidationError(error_msg.format(
param=param_name,
arg=param_argument,
arg_type=type(param_argument),
))
def list_to_comma_delimited(list_param):
"""Convert a list of strings into a comma-delimited list / string.
:param list_param: A list of strings.
:type list_param: list
:return: Comma-delimited string.
:rtype: str
"""
if list_param is None:
list_param = []
return ','.join(list_param)
def get_token_from_env():
"""Get the token from env var, VAULT_TOKEN. If not set, attempt to get the token from, ~/.vault-token
:return: The vault token if set, else None
:rtype: str | None
"""
token = os.getenv('VAULT_TOKEN')
if not token:
token_file_path = os.path.expanduser('~/.vault-token')
if os.path.exists(token_file_path):
with open(token_file_path, 'r') as f_in:
token = f_in.read().strip()
if not token:
return None
return token
def comma_delimited_to_list(list_param):
"""Convert comma-delimited list / string into a list of strings
:param list_param: Comma-delimited string
:type list_param: str | unicode
:return: A list of strings
:rtype: list
"""
if isinstance(list_param, list):
return list_param
if isinstance(list_param, str):
return list_param.split(',')
else:
return []
def validate_pem_format(param_name, param_argument):
"""Validate that an argument is a PEM-formatted public key or certificate
:param param_name: The name of the parameter being validate. Used in any resulting exception messages.
:type param_name: str | unicode
:param param_argument: The argument to validate
:type param_argument: str | unicode
:return: True if the argument is validate False otherwise
:rtype: bool
"""
def _check_pem(arg):
arg = arg.strip()
if not arg.startswith('-----BEGIN CERTIFICATE-----') \
or not arg.endswith('-----END CERTIFICATE-----'):
return False
return True
if isinstance(param_argument, str):
param_argument = [param_argument]
if not isinstance(param_argument, list) or not all(_check_pem(p) for p in param_argument):
error_msg = 'unsupported {param} public key / certificate format, required type: PEM'
raise exceptions.ParamValidationError(error_msg.format(param=param_name))
def remove_nones(params):
"""Removes None values from optional arguments in a parameter dictionary.
:param params: The dictionary of parameters to be filtered.
:type params: dict
:return: A filtered copy of the parameter dictionary.
:rtype: dict
"""
return {
key: value
for key, value in params.items()
if value is not None
}
def format_url(format_str, *args, **kwargs):
"""Creates a URL using the specified format after escaping the provided arguments.
:param format_str: The URL containing replacement fields.
:type format_str: str
:param kwargs: Positional replacement field values.
:type kwargs: list
:param kwargs: Named replacement field values.
:type kwargs: dict
:return: The formatted URL path with escaped replacement fields.
:rtype: str
"""
def url_quote(maybe_str):
# Special care must be taken for Python 2 where Unicode characters will break urllib quoting.
# To work around this, we always cast to a Unicode type, then UTF-8 encode it.
# Doing this is version agnostic and returns the same result in Python 2 or 3.
unicode_str = six.text_type(maybe_str)
utf8_str = unicode_str.encode("utf-8")
return six.moves.urllib.parse.quote(utf8_str)
escaped_args = [url_quote(value) for value in args]
escaped_kwargs = {key: url_quote(value) for key, value in kwargs.items()}
return format_str.format(
*escaped_args,
**escaped_kwargs
)
|
<gh_stars>0
# pylint: disable=wildcard-import
# pylint: disable=unused-wildcard-import
import os
import random
import hashlib
import itertools
import pylev
import pytest
from sbk.mnemonic import *
def _print_words(wl):
for i, word in enumerate(sorted(wl)):
print(f"{word:<9} ", end=" ")
if (i + 1) % 8 == 0:
print()
print()
def test_wordlist_constraints():
if sorted(WORDLIST) != WORDLIST:
_print_words(WORDLIST)
raise Exception("Wordlist not sorted!")
# whitespace check
assert [w.strip().replace(" ", "").lower() for w in WORDLIST] == WORDLIST
# length check
assert all(5 <= len(w) <= 8 for w in WORDLIST)
# unique 3 letter prefixes
assert len({w[:3] for w in WORDLIST}) == 256
# no 3 letter prefix a substring of another word
assert all(w1 == w2 or w1[:3] not in w2 for w1, w2 in itertools.product(WORDLIST, WORDLIST))
# no duplicates
assert len(set(WORDLIST)) == 256
@pytest.mark.skipif("slow" in os.getenv('PYTEST_SKIP', ""), reason="Basically this can't fail")
def test_wordlist_distances():
for w1, w2 in itertools.product(WORDLIST, WORDLIST):
if w1 != w2:
d = pylev.damerau_levenshtein(w1, w2)
assert d >= 3, (w1, w2)
def test_bytes2phrase_fail():
try:
bytes2phrase(b"\x00")
assert False, "Expected ValueError"
except ValueError:
pass
def test_phrase2bytes_fail():
assert phrase2bytes("abacus abraham" , msg_len=2) == b"\x00\x01"
assert phrase2bytes("abbakus abbrahame", msg_len=2) == b"\x00\x01"
try:
phrase2bytes("abbakuss xanadu", msg_len=2)
assert False, "Expected ValueError"
except ValueError as ex:
assert "Unknown word" in str(ex)
def test_fuzz_bytes2phrase():
for i in range(2, 100, 2):
msg_len = i % 20
data = os.urandom(msg_len)
phrase = bytes2phrase(data)
assert phrase2bytes(phrase, msg_len=msg_len) == data
# provoke encoding errors
assert phrase.encode('ascii').decode('ascii') == phrase
def test_fuzz_phrase2bytes():
for _ in range(1, 100):
words_1 = random.sample(WORDLIST, 8)
words_2 = random.sample(WORDLIST, 8)
words = "\n".join(w1.ljust(9) + " " + w2.ljust(9) for w1, w2 in zip(words_1, words_2))
data = phrase2bytes(words, msg_len=16)
assert len(data) == 16
assert bytes2phrase(data) == words
@pytest.mark.parametrize("num_typos, max_fail_ratio", [(1, 0.0), (2, 0.1)])
def test_fuzz_phrase2words_fuzzymatch(num_typos, max_fail_ratio):
def _sim_typo(word, num_typos):
for i in range(num_typos):
word = word[:i] + random.choice(word) + word[i + 1 :]
return word
ok = 0
fail = 0
for _ in range(5):
words_1 = random.sample(WORDLIST, 8)
words_2 = random.sample(WORDLIST, 8)
expected_phrase = "\n".join(w1.ljust(9) + " " + w2.ljust(9) for w1, w2 in zip(words_1, words_2))
typo_words = "\n".join(
_sim_typo(w1, num_typos=num_typos).ljust(9) + " " + _sim_typo(w2, num_typos=num_typos).ljust(9)
for w1, w2 in zip(words_1, words_2)
)
data = phrase2bytes(typo_words, msg_len=16)
result_phrase = bytes2phrase(data)
result_words = result_phrase.replace("\n", " ").split()
expected_words = expected_phrase.replace("\n", " ").split()
for result_word, expected_word in zip(result_words, expected_words):
if result_word == expected_word:
ok += 1
else:
fail += 1
assert ok + fail == 80
assert fail <= 80 * max_fail_ratio
def test_phrase_hardcoded():
# this is to prevent accidental changes to the wordlists
wl_text = " ".join(WORDLIST)
assert wl_text.count(" ") == 255
wl_digest = hashlib.sha1(wl_text.encode("ascii")).hexdigest()
assert wl_digest == "0f897109a5bf74607418c64c6dd85baaa2c210d2"
|
#!/usr/bin/env python
from genericpath import exists
import numpy as np
import glob
from distort_calibration import *
from cartesian import *
from registration_3d import *
from optical_tracking import *
from em_tracking import *
from eval import *
from pathlib import Path
import argparse
import csv
from improved_em_tracking import *
from fiducials import *
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"data_dir",
type=str,
help="path to data directory",
)
parser.add_argument(
"runtype",
type=int,
help="0 for debug, 1 for unknown",
)
parser.add_argument(
"letter",
type=int,
help="index of the letter",
)
parser.add_argument(
"output_dir",
type=str,
help="path to output directory(automatically created if does not exist)",
)
parser.add_argument(
"--eval",
type=bool,
default=False,
help="Whether to evaluate or not"
)
return parser.parse_args()
def main():
args = parse_args()
runtype = args.runtype
run = args.letter
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
type = ['debug', 'unknown']
data_dir = args.data_dir
# first read in Calbody (Nd, Na, Nc)
# then readings (ND, NA, NC, nframes)
# the last output determines whether to show the result in terminal
em_pivot = em_tracking( data_dir , "pa2-", type[runtype] , letters[run] , output = 0)
optical_pivot = optical_tracking( data_dir , "pa2-", type[runtype] , letters[run] , output = 0)
#Added improved_em_tracking file instead of the original
em_pivot = improved_em_tracking(data_dir, "pa2-", type[runtype] , letters[run], output=0)[1]
# print(em_pivot)
tmp_ce = distort_calibration( data_dir , "pa2-", type[runtype] , letters[run] ,output = 0)
C_exp = tmp_ce[0]
Nc = tmp_ce[1]
Nframes = tmp_ce[2]
# print(optical_pivot.shape)
ep = np.transpose(em_pivot)
op = np.transpose(optical_pivot)
# print(em_pivot)
# print(optical_pivot)
em_rounded = np.round(em_pivot.reshape(3), decimals=2)
opt_rounded = np.round(optical_pivot.reshape(3), decimals=2)
C_exp_rounded = np.round(C_exp, decimals=2)
#Reading input from the output1 files instead to reduce possible errors in our own code
output_name = 'pa2-' + type[runtype] + '-' + letters[run] + '-output1.txt'
output_dir = args.output_dir
if not (os.path.exists(os.path.join(os.getcwd(), output_dir))):
os.mkdir(output_dir)
output_path = os.path.join(args.output_dir, output_name)
headers = [str(Nc), '\t' + str(Nframes), '\t' + str(output_name)]
with open(output_path, mode='a') as file:
writer = csv.writer(file)
writer.writerow(headers)
writer.writerow(em_rounded)
writer.writerow(opt_rounded)
writer.writerows(C_exp_rounded)
#Calculating correction
output_path = glob.glob(data_dir + "pa2-" + type[runtype] + '-' + letters[run] + '-output1.txt')
output_data = []
read_file = open(output_path[0], mode='r')
lines = read_file.read().splitlines()
for num in range(len(lines)):
output_data.append( lines[num].split(',') )
for i in range(len(output_data[-1])):
output_data[-1][i] = output_data[-1][i].strip()
Nc = int(output_data[0][0])
Nframes = int(output_data[0][1])
output_name = output_data[0][2]
C_expected = np.asarray(output_data[3:]).astype(float)
# print(output_data.shape)
#Calculating F_reg with improved tracking and em_ct
C_exp, Nc, Nframes,C = distort_calibration( data_dir ,"pa2-", type[runtype] , letters[run], output = 0 )
X, qmin, qmax = distortion_correction(C, C_expected, order=5)
#Producing results and saving the initial points for evaluation purposes.
empivot = improved_em_tracking(data_dir, "pa2-", type[runtype] , letters[run], output=0)
F_reg = em_ct(data_dir,"pa2-", type[runtype] , letters[run],X, qmin, qmax,empivot)
ct_results = em_nav2ct( data_dir,"pa2-", type[runtype] , letters[run], F_reg , X, qmin, qmax,empivot, output=0)
ct_points = ct_results[0]
pivot_set = ct_results[1]
ct_rounded = np.round(ct_points, decimals=2)
Nframes = len(ct_rounded)
output_name = 'pa2-' + type[runtype] + '-' + letters[run] + '-output2.txt'
output_dir = args.output_dir
output_path = os.path.join(args.output_dir, output_name)
headers = [str(Nframes), '\t' + str(output_name)]
with open(output_path, mode='a') as file:
writer = csv.writer(file, delimiter=",")
writer.writerow(headers)
# writer.writerow(em_rounded)
# writer.writerow(opt_rounded)
writer.writerows(ct_rounded)
# print(em_pivot, optical_pivot)
# print(Nc, Nframes)
#Evaluating with all obtained values
if args.eval:
tmp = eval( data_dir , type[runtype] , letters[run] , C_exp_rounded, em_rounded.reshape(3, 1), opt_rounded.reshape(3, 1), ct_rounded, F_reg, pivot_set)
str_tmp = [str(float(x)) for x in tmp]
log_dir = "eval_logs"
if not (os.path.exists(os.path.join(os.getcwd(), log_dir))):
os.mkdir(log_dir)
logs_name = 'pa2-' + type[runtype] + '-' + letters[run] + '-eval_logs.txt'
logs_lst = []
logs_lst.append(("Average for difference of C_exp = " + str_tmp[0]))
logs_lst.append(("Variance for difference of C_exp = " + str_tmp[1]))
logs_lst.append(("Max for difference of C_exp = " + str_tmp[2]))
logs_lst.append(("Min for difference of C_exp = " + str_tmp[3]))
logs_lst.append("\n")
logs_lst.append(("Average for difference of em_pivot = " + str_tmp[4]))
logs_lst.append(("Variance for difference of em_pivot = " + str_tmp[5]))
logs_lst.append(("Max for difference of em_pivot = " + str_tmp[6]))
logs_lst.append(("Min for difference of em_pivot = " + str_tmp[7]))
logs_lst.append("\n")
logs_lst.append(("Average for difference of opt_pivot = " + str_tmp[8]))
logs_lst.append(("Variance for difference of opt_pivot = " + str_tmp[9]))
logs_lst.append(("Max for difference of opt_pivot = " + str_tmp[10]))
logs_lst.append(("Min for difference of opt_pivot = " + str_tmp[11]))
logs_lst.append("\n")
logs_lst.append(("Average for difference of ct_points = " + str_tmp[12]))
logs_lst.append(("Variance for difference of ct_points = " + str_tmp[13]))
logs_lst.append(("Max for difference of ct_points = " + str_tmp[14]))
logs_lst.append(("Min for difference of ct_points = " + str_tmp[15]))
logs_lst.append("\n")
logs_lst.append(("Average for difference of F_reg = " + str_tmp[16]))
logs_lst.append(("Variance for difference of F_reg = " + str_tmp[17]))
logs_lst.append(("Max for difference of F_reg = " + str_tmp[18]))
logs_lst.append(("Min for difference of F_reg = " + str_tmp[19]))
logs_path = os.path.join(log_dir, logs_name)
with open(logs_path, mode='a') as file:
for line in logs_lst:
file.write(line)
file.write('\n')
# print("Average for difference of C_exp = " + str_tmp[0])
# print("Variance for difference of C_exp = " + str_tmp[1])
# print("Max for difference of C_exp = " + str_tmp[2])
# print("Min for difference of C_exp = " + str_tmp[3])
# print("Average for difference of em_pivot = " + str_tmp[4])
# print("Variance for difference of em_pivot = " + str_tmp[5])
# print("Max for difference of em_pivot = " + str_tmp[6])
# print("Min for difference of em_pivot = " + str_tmp[7])
# print("Average for difference of opt_pivot = " + str_tmp[8])
# print("Variance for difference of opt_pivot = " + str_tmp[9])
# print("Max for difference of opt_pivot = " + str_tmp[10])
# print("Min for difference of opt_pivot = " + str_tmp[11])
if __name__ == '__main__':
main()
|
import numpy as np
import math
import tempfile
import numba as nb
import pickle, gzip
import os
''' Important functions which can be used throughout the package '''
def num2vect(num,node_num):
'''Converts a binary number into Vector.'''
string = format(num, 'b').zfill(node_num)
arr = np.fromiter(string, dtype=int)
arr = np.where(arr <= 0, -1.0, arr)
return arr.astype(np.float32)
def col2vect(arr,node_num):
vect_arr = []
for num in arr:
num = int(num)
vect_arr.append(num2vect(num,node_num))
return np.array(vect_arr)
################################################################################################################
@nb.jit(nopython=True, cache=True, nogil=True, fastmath=True)
def bin2num(arr):
num = 0
n = len(arr)
for i in range(n):
num = num + arr[i]*(2**(n-i-1))
return num
def vect2num(input):
'''Converts Vector to a binary number for easy Graph creation'''
arr = np.copy(input).astype(np.int8)
arr = np.where(arr < 0, 0 ,arr)
num = bin2num(arr)
return num
def rows2num(arr):
array = []
for i in arr:
array.append(vect2num(i))
return np.array(array)
################################################################################################################
def load_data(file_name): #You may dump multiple objects to pickle file using 'ab' (append binary) attribute in the above function
#This function can retrive multiple objects dumped to a single file
with open(file_name, "rb") as f:
while True:
try:
yield pickle.load(f) #if there's a single object dumped you can retrive it using pickle.load(file)
except EOFError: #Once all the appended data is retrived, we get no data encounter error. So using try except we can break once we get all the data
break
def pickle_file(arr, file_name):
with open(file_name, 'wb') as file: #Always open file with 'wb' attributes when pickling. (wb -> write binary)
pickle.dump(arr, file)
################################################################################################################
@nb.jit(nopython=True, cache=True, nogil=True, fastmath=True)
def frust(boolvect1,inter_mat):
''' Calculates frustration of a vector '''
# By calculating number of non-zero elements we can know number of edges in a network
edges = np.count_nonzero(inter_mat)
# transpose in Numpy produces the same mtrix for 1xn mtrix. So we use reshaping
boolvect2 = boolvect1.reshape((-1, 1))
# reshape((-1,1)) means columns to rows and single column
frust_mat = (np.multiply((np.multiply(inter_mat,boolvect2)),boolvect1))
# Frustration for a node = sigma JijSiSj
result = (frust_mat < 0).sum() # Checking now many nodes are Frustrated
return result/edges # returns relative frustration
def Frustration(vect,inter_mat):
''' Returns frustration using the njit function '''
vect = num2vect(vect,inter_mat.shape[0])
num = frust(vect,inter_mat)
return num
################################################################################
def highestPowerof2(n):
''' Find the highest Power of 2 '''
p = int(math.log(n, 2))
return p
################################################################################
class tempmap(np.memmap):
"""
Extension of numpy memmap to automatically map to a file stored in temporary directory.
Usefull as a fast storage option when numpy arrays become large and
we just want to do some quick experimental stuff.
"""
def __new__(subtype, dtype=np.uint64, mode='w+', offset=0,
shape=None, order='C'):
ntf = tempfile.NamedTemporaryFile()
self = np.memmap.__new__(subtype, ntf, dtype, mode, offset, shape, order)
self.temp_file_obj = ntf
return self
def __del__(self):
if hasattr(self,'temp_file_obj') and self.temp_file_obj is not None:
self.temp_file_obj.close()
del self.temp_file_obj
def np_as_tmp_map(nparray): #A useful function for creating arrays fast
tmpmap = tempmap(dtype=nparray.dtype, mode='w+', shape=nparray.shape)
tmpmap[...] = nparray
return tmpmap
################################################################################
|
<reponame>TheCoder777/Python-Report-Booklet-Writer
# system modules
import datetime
import time
# internal modules
from ..defines import configs
def __current_year():
return int(time.strftime("%Y"))
def __calc_start(week: int, year: int) -> str:
"""
Calculates the start date using an iso calender
"""
start_date = datetime.datetime.strptime(f"{str(year)}-W{str(week)}-D1",
"%G-W%V-D%w")
# return in yyyy-mm-dd (html) format
return start_date.strftime("%Y-%m-%d")
def __calc_end(week: int, year: int) -> str:
"""
Almost same as above, but D5 instead of D1 (Friday instead of Monday)
"""
start_date = datetime.datetime.strptime(f"{str(year)}-W{str(week)}-D5",
"%G-W%V-D%w")
return start_date.strftime("%Y-%m-%d")
def __calc_year(entered_year: int, beginning_year: int) -> int:
"""
Calculates the year as a single digit (0 for first year, 2 for 3rd year)
(+1 because it's zero indexed)
"""
return entered_year - beginning_year + 1
def __calc_nr(entered_week: int, beginning_week: int, year: int) -> int:
"""
Calculates the nr (number). This is more or less the count of how many report booklets are done!
(+1 because it's also zero indexed)
"""
return (year - 1) * 52 + entered_week - beginning_week + 1
def __start_date_edge_cases(start_date):
# first edge case:
# first week of apprenticeship is only 4 days long, beginning on tuesday!
year, month, day = start_date.split("-")
part_date = "-".join([month, day])
if part_date == "08-31":
start_date = year + "-09-01"
return start_date
def calc_sign_date():
return time.strftime("%Y-%m-%d")
def get_current_year():
return __current_year()
def calc_beginning_year():
return __current_year()
def get_current_week():
return time.strftime("%V")
def calc_all(entered_year: int,
entered_week: int,
beginning_year: int = __current_year(),
start_week: int = configs.START_WEEK):
"""
Unified function to calculate pdf params either from config or from given values
"""
# calculate start and end date (strptime format is: year-calender_week-week_day(1-7))
start_date = datetime.datetime.strptime(f"{entered_year}-{entered_week}-{configs.START_OF_WEEK}",
"%G-%V-%w").strftime("%Y-%m-%d")
end_date = datetime.datetime.strptime(f"{entered_year}-{entered_week}-{configs.END_OF_WEEK}",
"%G-%V-%w").strftime("%Y-%m-%d")
# some edge cases for the start_date
start_date = __start_date_edge_cases(start_date)
# calculate single digit year
year = __calc_year(entered_year, beginning_year)
# calculate number (see description)
nr = __calc_nr(entered_week, start_week, year)
return {
"start": start_date,
"end": end_date,
"nr": nr,
"year": year
}
def calc_user_defaults(year_from_db: int):
year = __current_year() + year_from_db
return {
"sign": calc_sign_date(),
"year": year
}
|
<filename>openstackclient/identity/v3/user.py
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 User action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
from openstackclient.identity import common
class CreateUser(show.ShowOne):
"""Create new user"""
log = logging.getLogger(__name__ + '.CreateUser')
def get_parser(self, prog_name):
parser = super(CreateUser, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<user-name>',
help='New user name',
)
parser.add_argument(
'--password',
metavar='<user-password>',
help='New user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<user-email>',
help='New user email address',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Set default project (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='New default domain name or ID',
)
parser.add_argument(
'--description',
metavar='<description>',
help='Description for new user',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.project:
project_id = utils.find_resource(
identity_client.projects,
parsed_args.project,
).id
else:
project_id = None
if parsed_args.domain:
domain_id = common.find_domain(identity_client,
parsed_args.domain).id
else:
domain_id = None
enabled = True
if parsed_args.disable:
enabled = False
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
user = identity_client.users.create(
name=parsed_args.name,
domain=domain_id,
default_project=project_id,
password=<PASSWORD>,
email=parsed_args.email,
description=parsed_args.description,
enabled=enabled
)
user._info.pop('links')
return zip(*sorted(six.iteritems(user._info)))
class DeleteUser(command.Command):
"""Delete user"""
log = logging.getLogger(__name__ + '.DeleteUser')
def get_parser(self, prog_name):
parser = super(DeleteUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to delete (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
identity_client.users.delete(user.id)
return
class ListUser(lister.Lister):
"""List users"""
log = logging.getLogger(__name__ + '.ListUser')
def get_parser(self, prog_name):
parser = super(ListUser, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Filter user list by <domain> (name or ID)',
)
parser.add_argument(
'--group',
metavar='<group>',
help='List memberships of <group> (name or ID)',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
else:
domain = None
if parsed_args.group:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
).id
else:
group = None
# List users
if parsed_args.long:
columns = ('ID', 'Name', 'Project Id', 'Domain Id',
'Description', 'Email', 'Enabled')
else:
columns = ('ID', 'Name')
data = identity_client.users.list(
domain=domain,
group=group,
)
return (
columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data)
)
class SetUser(command.Command):
"""Set user properties"""
log = logging.getLogger(__name__ + '.SetUser')
def get_parser(self, prog_name):
parser = super(SetUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to change (name or ID)',
)
parser.add_argument(
'--name',
metavar='<new-user-name>',
help='New user name',
)
parser.add_argument(
'--password',
metavar='<user-password>',
help='New user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<user-email>',
help='New user email address',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='New domain name or ID',
)
parser.add_argument(
'--project',
metavar='<project>',
help='New project name or ID',
)
parser.add_argument(
'--description',
metavar='<description>',
help='New description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
if (not parsed_args.name
and not parsed_args.name
and not parsed_args.password
and not parsed_args.email
and not parsed_args.domain
and not parsed_args.project
and not parsed_args.description
and not parsed_args.enable
and not parsed_args.disable):
return
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
kwargs = {}
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.email:
kwargs['email'] = parsed_args.email
if parsed_args.password:
kwargs['password'] = <PASSWORD>_args.password
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.project:
project_id = utils.find_resource(
identity_client.projects, parsed_args.project).id
kwargs['project'] = project_id
if parsed_args.domain:
kwargs['domain'] = common.find_domain(identity_client,
parsed_args.domain).id
kwargs['enabled'] = user.enabled
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
identity_client.users.update(user.id, **kwargs)
return
class SetPasswordUser(command.Command):
"""Change current user password"""
log = logging.getLogger(__name__ + '.SetPasswordUser')
def get_parser(self, prog_name):
parser = super(SetPasswordUser, self).get_parser(prog_name)
parser.add_argument(
'--password',
metavar='<new-password>',
help='New user password'
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
current_password = utils.get_password(
self.app.stdin, prompt="Current Password:", confirm=False)
password = <PASSWORD>_<PASSWORD>.password
if password is None:
password = utils.get_password(
self.app.stdin, prompt="New Password:")
identity_client.users.update_password(current_password, password)
class ShowUser(show.ShowOne):
"""Show user details"""
log = logging.getLogger(__name__ + '.ShowUser')
def get_parser(self, prog_name):
parser = super(ShowUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to display (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain where user resides (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
user = utils.find_resource(identity_client.users,
parsed_args.user,
domain_id=domain.id)
else:
user = utils.find_resource(identity_client.users,
parsed_args.user)
user._info.pop('links')
return zip(*sorted(six.iteritems(user._info)))
|
<gh_stars>1-10
from time import sleep
from threading import Thread
import requests
import sys
import select
import telepot
def get_api_key() -> str:
api_key = ""
with open("api_key.txt", 'r') as file:
api_key = file.readline()
return api_key
def get_id() -> int:
id = ""
with open("telegram_id.txt", 'r') as file:
id = file.readline()
return int(id)
# headers are used to fool AMD website
headers = {"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36"}
button = "btn-shopping-cart" # the shopping button
bot = telepot.Bot(get_api_key()) # put your API key in a file called api_key.txt
telegram_id = get_id() # put your ID in a file called telegram_id.txt
class URLS:
def __init__(self):
self.urls = []
self.availables = []
self.names = []
def add(self, url: str, name: str):
'''
Adds an url (AMD) and the name of the product
'''
self.urls.append(url)
self.names.append(name)
self.availables.append(False)
def check_availability(self):
'''
Checks avialability of all added products
'''
for i in range(len(self.urls)):
r = requests.get(self.urls[i], headers=headers)
self.availables[i] = button in r.text
def get_availables(self) -> list:
'''
Returns all available products (only name)
'''
availables_names = []
for i in range(len(self.availables)):
if self.availables[i]:
availables_names.append(self.names[i])
return availables_names
class Updater(Thread):
def __init__(self, urls: URLS):
Thread.__init__(self)
self.urls = urls
def run(self):
self.urls.check_availability()
if __name__ == "__main__":
update = True
run = True
urls = URLS()
urls.add("https://www.amd.com/en/direct-buy/5458374200/it", "Available now RX-6900XT\n Buy now: https://www.amd.com/en/direct-buy/5458374200/it")
urls.add("https://www.amd.com/en/direct-buy/5458374100/it", "Available now RX-6800XT\n Buy now: https://www.amd.com/en/direct-buy/5458374100/it")
urls.add("https://www.amd.com/en/direct-buy/5496921500/it", "Available now RX-6800XT Midnight\n Buy now: https://www.amd.com/en/direct-buy/5458374100/it")
urls.add("https://www.amd.com/en/direct-buy/5458374000/it", "Available now RX-6800\n Buy now: https://www.amd.com/en/direct-buy/5458374000/it")
urls.add("https://www.amd.com/en/direct-buy/5496921400/it", "Available now RX-6700XT\n Buy now: https://www.amd.com/en/direct-buy/5496921400/it")
urls.add("https://www.amd.com/en/direct-buy/5450881400/it", "Available now RYZEN 9 5950X\n Buy now: https://www.amd.com/en/direct-buy/5450881400/it")
urls.add("https://www.amd.com/en/direct-buy/5450881500/it", "Available now RYZEN 9 5900X\n Buy now: https://www.amd.com/en/direct-buy/5450881500/it")
urls.add("https://www.amd.com/en/direct-buy/5450881600/it", "Available now RYZEN 7 5800X\n Buy now: https://www.amd.com/en/direct-buy/5450881600/it")
urls.add("https://www.amd.com/en/direct-buy/5450881700/it", "Available now RYZEN 5 5600X\n Buy now: https://www.amd.com/en/direct-buy/5450881700/it")
updater = Updater(urls)
updater.start()
cached_availables = ["cached"]
while run:
# get input non-blocking
command = select.select([sys.stdin], [], [], 1)[0]
if command:
value = sys.stdin.readline().rstrip()
value = value.split(" ") # get the command
if value[0] == "q":
run = False # stop the loop
print("Exiting...")
elif value[0] == "add" and len(value) == 3: # add an url
urls.add(value[1], value[2])
else:
# every loop is like tic-toc
# tic = update
# toc = check availibility
if update:
updater.run()
update = False
else:
update = True
print("Waiting for the updater...")
updater.join()
urls.check_availability()
availables = urls.get_availables() # get all availables
if availables != cached_availables:
if len(availables) == 0:
bot.sendMessage(telegram_id, "Nothing is available")
else:
availables_str = availables[0] # instatiate a string with the first available
if len(availables) > 1: # if there are more than 1 available, add them to the string
for i in range(len(availables)-1):
availables_str = f"{availables_str}, {availables[i+1]}"
bot.sendMessage(telegram_id, availables_str)
cached_availables = availables
sleep(5)
|
<filename>tests/integration/cattletest/core/test_svc_volume_template.py
from common_fixtures import * # NOQA
from cattle import ApiError
import yaml
def test_create_volume_template(client):
opts = {'foo': 'true', 'bar': 'true'}
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id)
# check volume template was created
vts = client.list_volumeTemplate(stackId=stack.id)
assert len(vts) == 1
vt = vts[0]
assert vt.name == 'foo'
assert vt.driver == "nfs"
assert vt.driverOpts == opts
# delete stack
client.wait_success(stack.remove())
wait_for(lambda: client.reload(vt).state == 'removed')
def test_create_dup_volume_template(client):
opts = {'foo': 'true', 'bar': 'true'}
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id)
with pytest.raises(ApiError) as e:
client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'name'
def test_stack_volume(client, context):
opts = {'foo': 'true', 'bar': 'true'}
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
t = client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id)
# create service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "dataVolumes": "foo:/bar"}
svc1 = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1)
svc1 = client.wait_success(svc1)
client.wait_success(svc1.activate())
c1 = _validate_compose_instance_start(client, svc1, stack, "1")
path_to_mount = c1.dataVolumeMounts
assert len(path_to_mount) == 1
volume_id_1 = 0
for key, value in path_to_mount.iteritems():
assert key == '/bar'
assert value is not None
volume_id_1 = value
volumes = client.list_volume(name_like=stack.name + "_foo_%")
assert len(volumes) == 1
volume = volumes[0]
assert volume.id == volume_id_1
assert volume.driver == 'nfs'
assert volume.driverOpts == opts
svc2 = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1)
svc2 = client.wait_success(svc2)
client.wait_success(svc2.activate())
# svc2 volume should be the same
c2 = _validate_compose_instance_start(client, svc2, stack, "1")
path_to_mount = c2.dataVolumeMounts
assert len(path_to_mount) == 1
volume_id_2 = 0
for key, value in path_to_mount.iteritems():
assert key == '/bar'
assert value is not None
volume_id_2 = value
assert volume_id_2 == volume_id_1
# remove services, validate the volume stayed intact
client.wait_success(svc1.remove())
client.wait_success(svc2.remove())
wait_for(lambda: client.reload(volume).state == 'inactive')
# test export
compose_config = stack.exportconfig()
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
assert t.name in docker_yml['volumes']
vol = docker_yml['volumes'][t.name]
assert vol['driver'] == 'nfs'
assert vol['driver_opts'] == opts
assert 'external' not in vol
assert 'per_container' not in vol
# remove stack, validate its volume is removed
client.wait_success(stack.remove())
wait_for(lambda: client.reload(volume).state == 'removed')
def test_external_volume(client, context):
opts = {'foo': 'true', 'bar': 'true'}
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
t = client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id,
external=True)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "dataVolumes": "foo:/bar"}
service = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1)
service = client.wait_success(service)
assert service.state == "inactive"
service.activate()
wait_for(lambda: client.reload(service).state == 'error')
# create volume
v = client.create_volume(name="foo", driver="nfs", stackId=stack.id)
client.wait_success(v)
service.activate()
wait_state(client, service, 'active')
# test export
compose_config = stack.exportconfig()
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
assert t.name in docker_yml['volumes']
vol = docker_yml['volumes'][t.name]
assert vol['external'] is True
assert vol['driver'] == 'nfs'
assert vol['driver_opts'] == opts
assert 'per_container' not in vol
def test_du_unhealthy_reuse_volume(new_context, super_client):
client = new_context.client
# create storage driver
storage_stack = client.create_stack(name=random_str())
super_client.update(storage_stack, system=True)
storage = client.create_storage_driver_service(
name=random_str(),
stackId=storage_stack.id,
storageDriver={
'name': 'nfs',
'volumeAccessMode': 'singleHostRW',
'blockDevicePath': 'some path',
'volumeCapabilities': [
'superAwesome',
],
})
storage = client.wait_success(storage)
client.wait_success(storage.activate())
# create volume template
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
t = client.create_volumeTemplate(name=random_str(),
driver="nfs",
stackId=stack.id,
perContainer=True)
# create service
launch_config = {
'imageUuid': new_context.image_uuid,
'dataVolumes': t.name + ':/bar'
}
svc = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=2)
svc = client.wait_success(svc)
svc = client.wait_success(svc.activate())
assert len(svc.instanceIds) == 2
c1 = client.by_id_container(svc.instanceIds[0])
c1 = client.wait_success(c1)
assert c1.state == 'running'
c2 = client.by_id_container(svc.instanceIds[1])
c2 = client.wait_success(c2)
assert c2.state == 'running'
assert c1.id != c2.id
super_client.update(c2, healthState='unhealthy')
c2.restart()
c2 = wait_for_condition(client, c2, lambda x: x.removed is not None)
svc = wait_state(client, svc, 'active')
svc = wait_for_condition(client, svc, lambda x: len(x.instanceIds) == 2)
c3 = None
for i in svc.instanceIds:
if i != c1.id:
c3 = client.by_id_container(i)
assert c3.id != c2.id
assert dict(c1.dataVolumeMounts) != dict(c2.dataVolumeMounts)
assert dict(c2.dataVolumeMounts) == dict(c3.dataVolumeMounts)
svc = client.update(svc, scale=1)
wait_state(client, svc, 'active')
count = set()
for i in [c1, c2, c3]:
volume = client.by_id_volume(dict(i.dataVolumeMounts).values()[0])
volume = client.wait_success(volume)
if volume.removed is None:
count.add(volume.id)
assert len(count) == 1
client.delete(stack)
stack = client.wait_success(stack)
assert stack.removed is not None
volume = client.by_id_volume(dict(c2.dataVolumeMounts).values()[0])
volume = client.wait_success(volume)
assert volume.removed is not None
volume = client.by_id_volume(dict(c1.dataVolumeMounts).values()[0])
volume = client.wait_success(volume)
assert volume.removed is not None
def test_du_volume(new_context, super_client):
context = new_context
client = context.client
opts = {'foo': 'true', 'bar': 'true'}
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
t = client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id,
perContainer=True)
# create service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "dataVolumes": "foo:/bar"}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary",
"dataVolumes": "foo:/bar"}
svc = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=2,
secondaryLaunchConfigs=[secondary_lc])
svc = client.wait_success(svc)
client.wait_success(svc.activate())
c11 = _validate_compose_instance_start(client, svc, stack, "1")
path_to_mount = c11.dataVolumeMounts
assert len(path_to_mount) == 1
for key, value in path_to_mount.iteritems():
assert key == '/bar'
assert value is not None
c11 = super_client.reload(c11)
name = stack.name + "_foo_1"
volumes = client.list_volume(name_like=name + "_%")
assert len(volumes) == 1
v11 = volumes[0]
assert v11.driver == 'nfs'
assert v11.driverOpts == opts
c12 = _validate_compose_instance_start(client, svc, stack,
"1", "secondary")
path_to_mount = c12.dataVolumeMounts
assert len(path_to_mount) == 1
for key, value in path_to_mount.iteritems():
assert key == '/bar'
assert value is not None
name = stack.name + "_foo_1"
volumes = client.list_volume(name_like=name + "_%")
assert len(volumes) == 1
v12 = volumes[0]
assert v12.id == v11.id
assert v12.driver == 'nfs'
assert v12.driverOpts == opts
c21 = _validate_compose_instance_start(client, svc, stack, "2")
path_to_mount = c21.dataVolumeMounts
assert len(path_to_mount) == 1
for key, value in path_to_mount.iteritems():
assert key == '/bar'
assert value is not None
c21 = super_client.reload(c21)
name = stack.name + "_foo_2"
volumes = client.list_volume(name_like=name + "_%")
assert len(volumes) == 1
v21 = volumes[0]
assert v21.id != v11.id
assert v21.driver == 'nfs'
assert v21.driverOpts == opts
# scale down,verify that the volume for du2 was removed
svc = client.update(svc, scale=1)
client.wait_success(svc, 120)
wait_state(client, c21, 'removed')
wait_state(client, v21, 'removed')
# test export
compose_config = stack.exportconfig()
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
assert t.name in docker_yml['volumes']
vol = docker_yml['volumes'][t.name]
assert vol['per_container'] is True
assert vol['driver'] == 'nfs'
assert vol['driver_opts'] == opts
assert 'external' not in vol
def _validate_compose_instance_start(client, service, env,
number, launch_config_name=None):
cn = launch_config_name + "-" if \
launch_config_name is not None else ""
name = env.name + "-" + service.name + "-" + cn + number
def wait_for_map_count(service):
instances = client. \
list_container(name=name,
state="running")
return len(instances) == 1
wait_for(lambda: wait_for_condition(client, service,
wait_for_map_count))
instances = client. \
list_container(name=name,
state="running")
return instances[0]
def test_upgrade_du_volume(client, context, super_client):
opts = {'foo': 'true', 'bar': 'true'}
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id,
perContainer=True)
# create service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "dataVolumes": "foo:/bar"}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary",
"dataVolumes": "foo:/bar"}
svc = client.create_service(name="duvolume" + random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1,
secondaryLaunchConfigs=[secondary_lc])
svc = client.wait_success(svc)
client.wait_success(svc.activate())
c11 = _validate_compose_instance_start(client, svc, stack, "1")
path_to_mount = c11.dataVolumeMounts
assert len(path_to_mount) == 1
for key, value in path_to_mount.iteritems():
assert key == '/bar'
assert value is not None
c11 = super_client.reload(c11)
name = stack.name + "_foo_1"
volumes = client.list_volume(name_like=name + "_%")
assert len(volumes) == 1
v11 = volumes[0]
assert v11.driver == 'nfs'
assert v11.driverOpts == opts
# upgrade service
strategy = {"launchConfig": launch_config,
"intervalMillis": 100}
svc.upgrade_action(inServiceStrategy=strategy)
svc = client.wait_success(svc)
client.wait_success(svc.finishupgrade())
c12 = _validate_compose_instance_start(client, svc, stack, "1")
assert c11.id != c12.id
assert c11.deploymentUnitUuid == c12.deploymentUnitUuid
path_to_mount = c12.dataVolumeMounts
assert len(path_to_mount) == 1
for key, value in path_to_mount.iteritems():
assert key == '/bar'
assert value is not None
c12 = super_client.reload(c12)
name = stack.name + "_foo_1"
volumes = client.list_volume(name_like=name + "_%")
assert len(volumes) == 1
v12 = volumes[0]
assert v11.id == v12.id
volumes = client.list_volume(name_like=name + "_%")
assert len(volumes) == 1
v12 = volumes[0]
assert v11.id == v12.id
def test_classic_volume(client, context):
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
# create service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "dataVolumes": "foo2:/bar"}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary",
"dataVolumes": "foo2:/bar"}
svc = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1,
secondaryLaunchConfigs=[secondary_lc])
svc = client.wait_success(svc)
client.wait_success(svc.activate())
c11 = _validate_compose_instance_start(client, svc, stack, "1")
assert len(c11.dataVolumeMounts) == 0
assert "foo2:/bar" in c11.dataVolumes
def test_no_scope(client):
opts = {'foo': 'true', 'bar': 'true'}
# negative test case
with pytest.raises(ApiError) as e:
client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts)
assert e.value.error.status == 422
def test_v1_v2_mix_export(client, context):
opts = {'foo': 'true', 'bar': 'true'}
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
t = client.create_volumeTemplate(name="foo", driver="nfs",
driverOpts=opts,
stackId=stack.id,
external=True)
image_uuid = context.image_uuid
data_volumes = ["foo:/bar", "bar:/foo", "baz", "/bar:/foo"]
launch_config = {"imageUuid": image_uuid,
"dataVolumes": data_volumes, "volumeDriver": "nfs"}
svc = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1)
svc = client.wait_success(svc)
assert svc.state == "inactive"
# test export
compose_config = stack.exportconfig()
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
volumes = docker_yml['volumes']
assert len(volumes) == 2
# check volumeTemplate volume
assert t.name in volumes
vol = volumes[t.name]
assert vol['external'] is True
assert vol['driver'] == 'nfs'
assert vol['driver_opts'] == opts
assert 'per_container' not in vol
# check v1 style named volume
assert 'bar' in volumes
vol = volumes["bar"]
assert vol['external'] is True
assert vol['driver'] == 'nfs'
assert 'per_container' not in vol
assert 'volume_driver' not in docker_yml["services"][svc.name]
def test_null_driver_export(client, context):
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
image_uuid = context.image_uuid
data_volumes = ["foo:/bar"]
launch_config = {"imageUuid": image_uuid,
"dataVolumes": data_volumes,
}
svc = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1)
svc = client.wait_success(svc)
assert svc.state == "inactive"
# test export
compose_config = stack.exportconfig()
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
assert 'volumes' not in docker_yml
def test_volume_template_without_driver_fields(client, context):
stack = client.create_stack(name=random_str())
stack = client.wait_success(stack)
t = client.create_volumeTemplate(name="foo",
stackId=stack.id,
external=False)
image_uuid = context.image_uuid
data_volumes = ["foo:/bar"]
launch_config = {"imageUuid": image_uuid,
"dataVolumes": data_volumes,
}
svc = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config,
scale=1)
svc = client.wait_success(svc)
assert svc.state == "inactive"
compose_config = stack.exportconfig()
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
volumes = docker_yml['volumes']
assert len(volumes) == 1
assert t.name in volumes
vol = volumes[t.name]
assert len(vol) == 0
|
import logging
from abc import ABC
from autoconf import conf
from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.mapper.prior_model.collection import CollectionPriorModel
from autofit.non_linear.analysis.multiprocessing import AnalysisPool
from autofit.non_linear.paths.abstract import AbstractPaths
from autofit.non_linear.result import Result
from autofit.non_linear.samples import OptimizerSamples
logger = logging.getLogger(
__name__
)
class Analysis(ABC):
"""
Protocol for an analysis. Defines methods that can or
must be implemented to define a class that compute the
likelihood that some instance fits some data.
"""
def log_likelihood_function(self, instance):
raise NotImplementedError()
def visualize(self, paths: AbstractPaths, instance, during_analysis):
pass
def save_attributes_for_aggregator(self, paths: AbstractPaths):
pass
def save_results_for_aggregator(self, paths: AbstractPaths, model: CollectionPriorModel,
samples: OptimizerSamples):
pass
def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel):
"""
Overwrite this method to modify the attributes of the `Analysis` class before the non-linear search begins.
An example use-case is using properties of the model to alter the `Analysis` class in ways that can speed up
the fitting performed in the `log_likelihood_function`.
"""
return self
def modify_after_fit(self, paths: AbstractPaths, model: AbstractPriorModel, result: Result):
"""
Overwrite this method to modify the attributes of the `Analysis` class before the non-linear search begins.
An example use-case is using properties of the model to alter the `Analysis` class in ways that can speed up
the fitting performed in the `log_likelihood_function`.
"""
return self
def make_result(self, samples, model, search):
return Result(samples=samples, model=model, search=search)
def profile_log_likelihood_function(self, paths: AbstractPaths, instance):
"""
Overwrite this function for profiling of the log likelihood function to be performed every update of a
non-linear search.
This behaves analogously to overwriting the `visualize` function of the `Analysis` class, whereby the user
fills in the project-specific behaviour of the profiling.
Parameters
----------
paths
An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database).
instance
The maximum likliehood instance of the model so far in the non-linear search.
"""
pass
def __add__(
self,
other: "Analysis"
) -> "CombinedAnalysis":
"""
Analyses can be added together. The resultant
log likelihood function returns the sum of the
underlying log likelihood functions.
Parameters
----------
other
Another analysis class
Returns
-------
A class that computes log likelihood based on both analyses
"""
if isinstance(
other,
CombinedAnalysis
):
return other + self
return CombinedAnalysis(
self, other
)
class CombinedAnalysis(Analysis):
def __init__(self, *analyses: Analysis):
"""
Computes the summed log likelihood of multiple analyses
applied to a single model.
Either analyses are performed sequentially and summed,
or they are mapped out to processes.
If the number of cores is greater than one then the
analyses are distributed across a number of processes
equal to the number of cores.
Parameters
----------
analyses
"""
self.analyses = analyses
n_cores = conf.instance[
"general"
][
"analysis"
][
"n_cores"
]
if n_cores > 1:
self.log_likelihood_function = AnalysisPool(
analyses,
n_cores
)
else:
self.log_likelihood_function = lambda instance: sum(
analysis.log_likelihood_function(
instance
)
for analysis in analyses
)
def _for_each_analysis(self, func, paths):
"""
Convenience function to call an underlying function for each
analysis with a paths object with an integer attached to the
end.
Parameters
----------
func
Some function of the analysis class
paths
An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database).
"""
for i, analysis in enumerate(self.analyses):
child_paths = paths.for_sub_analysis(
analysis_name=f"analyses/analysis_{i}"
)
func(child_paths, analysis)
def save_attributes_for_aggregator(self, paths: AbstractPaths):
def func(child_paths, analysis):
analysis.save_attributes_for_aggregator(
child_paths,
)
self._for_each_analysis(
func,
paths
)
def save_results_for_aggregator(
self,
paths: AbstractPaths,
model: CollectionPriorModel,
samples: OptimizerSamples
):
def func(child_paths, analysis):
analysis.save_results_for_aggregator(
child_paths,
model,
samples
)
self._for_each_analysis(
func,
paths
)
def visualize(
self,
paths: AbstractPaths,
instance,
during_analysis
):
"""
Visualise the instance according to each analysis.
Visualisation output is distinguished by using an integer suffix
for each analysis path.
Parameters
----------
paths
An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database).
instance
The maximum likliehood instance of the model so far in the non-linear search.
during_analysis
Is this visualisation during analysis?
"""
def func(child_paths, analysis):
analysis.visualize(
child_paths,
instance,
during_analysis
)
self._for_each_analysis(
func,
paths
)
def profile_log_likelihood_function(
self,
paths: AbstractPaths,
instance,
):
"""
Profile the log likelihood function of the maximum likelihood model instance using each analysis.
Profiling output is distinguished by using an integer suffix for each analysis path.
Parameters
----------
paths
An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database).
instance
The maximum likliehood instance of the model so far in the non-linear search.
"""
def func(child_paths, analysis):
analysis.profile_log_likelihood_function(
child_paths,
instance,
)
self._for_each_analysis(
func,
paths
)
def make_result(
self, samples, model, search
):
return [analysis.make_result(samples, model, search) for analysis in self.analyses]
def __len__(self):
return len(self.analyses)
def __add__(self, other: Analysis):
"""
Adding anything to a CombinedAnalysis results in another
analysis containing all underlying analyses (no combined
analysis children)
Parameters
----------
other
Some analysis
Returns
-------
An overarching analysis
"""
if isinstance(
other,
CombinedAnalysis
):
return CombinedAnalysis(
*self.analyses,
*other.analyses
)
return CombinedAnalysis(
*self.analyses,
other
)
def log_likelihood_function(
self,
instance
) -> float:
"""
The implementation of this function is decided in the constructor
based on the number of cores available
Parameters
----------
instance
An instance of a model
Returns
-------
The likelihood that model corresponds to the data encapsulated
by the child analyses
"""
|
<reponame>bodastage/bts-ce-api
from flask import Blueprint, request, render_template, \
flash, g, session, redirect, url_for, \
jsonify, make_response, send_file
from btsapi.modules.reports.models import Report, ReportCategory, ReportCategoryMASchema, DummyTable, ReportsTaskLog, ReportMASchema
from btsapi.extensions import db
import datetime
import math
from sqlalchemy import Table, MetaData, or_, text, Column, String
from sqlalchemy.orm import load_only, Query
from datatables import DataTables, ColumnDT
from btsapi import app
from flask_login import login_required
import csv
from .dttables import DataTables as DTTables
from sqlalchemy.dialects import postgresql
import pika
import json
import os
mod_reports = Blueprint('reports', __name__, url_prefix='/api/reports')
@mod_reports.route('/', methods=['GET'], strict_slashes=False)
@login_required
def get_reports():
"""Get reports"""
report_categories = ReportCategory.query.all()
reports = db.session.query(Report).all()
report_data = []
indx = 0
for c in report_categories:
report_data.append({"cat_id": c.pk, "cat_name": c.name, "reports": []})
reports = db.session.query(Report).filter_by(category_pk=c.pk).all()
for r in reports:
report = {"id": r.pk, "name": r.name}
report_data[indx]["reports"].append(report)
indx += 1
return jsonify(report_data)
@mod_reports.route('/tree/<cat_or_report>/<int:parent_pk>', methods=['GET'], strict_slashes=False)
@login_required
def get_audit_tree(cat_or_report, parent_pk):
"""Get nreport category and report
cat_or_report = categories for category nodes and "reports" for report nodes
"""
search_term = request.args.get('search_term',"")
search_categories = request.args.get('search_categories', "false")
search_reports = request.args.get('search_reports', "true")
tree_nodes = []
query = None
if cat_or_report == 'categories' and search_categories == "true":
query = ReportCategory.query.filter(ReportCategory.name.ilike('%{}%'.format(search_term))).filter_by(parent_pk=parent_pk)
if cat_or_report == 'categories' and search_categories == "false":
query = ReportCategory.query.filter_by(parent_pk=parent_pk)
if cat_or_report == 'rules' and search_reports == "true":
query = Report.query.filter(Report.name.ilike('%{}%'.format(search_term))).filter_by(category_pk=parent_pk)
if cat_or_report == 'rules' and search_reports == "false":
query = Report.query.filter_by(category_pk=parent_pk)
if query is not None:
for r in query.all():
tree_nodes.append({
"id": r.pk,
"label": r.name,
"inode": True if cat_or_report == 'categories' else False,
"open": False,
"nodeType": "category" if cat_or_report == 'categories' else "report"
})
return jsonify(tree_nodes)
@mod_reports.route('/fields/<int:report_id>', methods=['GET'])
@login_required
def get_report_data_fields(report_id):
fields = []
report = db.session.query(Report).filter_by(pk=report_id).first()
fields = db.engine.execute(report.query).keys()
return jsonify(fields)
def get_query_from_dt_request(request, report_query):
"""
Takes are request in dt format
:param request:
:param report_query:
:return:
"""
q = Query(DummyTable)
# #############
table_columns = db.engine.execute(text(report_query)).keys()
params = request.args.to_dict()
columns = []
for c in table_columns:
columns.append(ColumnDT( Column(c, String(255)), column_name=c, mData=c))
dt_table = DTTables(params, q, columns)
# app.logger.info(dt_table.compile_query(query=dt_table.filtered_query))
dt_sql = dt_table.compile_query()
dt_sql = dt_sql.replace('dummy_table.dummy_pk,','')
dt_sql = dt_sql.replace('FROM dummy_table','FROM ({}) dt '.format(report_query))
app.logger.info(dt_sql)
dt_filtered_sql = dt_table.compile_query(query=dt_table.filtered_query)
dt_filtered_sql = dt_filtered_sql.replace('dummy_table.dummy_pk,','')
dt_filtered_sql = dt_filtered_sql.replace('FROM dummy_table','FROM ({}) dt '.format(report_query))
app.logger.info(dt_filtered_sql)
cardinality = db.engine.execute(text(report_query)).rowcount
cardinality_filtered = db.engine.execute(text(dt_filtered_sql)).rowcount
result = db.engine.execute(text(dt_sql))
data_results = [{k: v for k, v in zip(
table_columns, row)} for row in result.fetchall()]
output = {}
output['draw'] = str(int(params['draw']))
output['recordsTotal'] = str(cardinality)
output['recordsFiltered'] = str(cardinality_filtered)
output['data'] = data_results
return output
@mod_reports.route('/dt/<int:report_id>', methods=['GET'])
@login_required
def get_report_data(report_id):
report = db.session.query(Report).filter_by(pk=report_id).first()
output = get_query_from_dt_request(request, report.query)
return jsonify(output)
@mod_reports.route('/download/<int:report_id>', methods=['GET'])
@login_required
def download_report(report_id):
try:
report = db.session.query(Report).filter_by(pk=report_id).one()
category = db.session.query(ReportCategory).filter_by(pk=report.category_pk).one()
sanitized_filename = "{}__{}".format(category.name.lower().replace(" ", "_"), report.name.lower().replace(" ", "_"))
filename = "{}.csv".format(sanitized_filename)
metadata =MetaData()
# task_log = Table('reports_task_log', metadata, autoload=True, autoload_with=db.engine, schema="reports")
task_log = ReportsTaskLog()
task_log.action = 'reports.generate'
task_log.status = 'PENDING'
task_log.options = {'format': 'csv', 'filename': filename, 'query': report.query}
db.session.add(task_log)
db.session.commit()
db.session.flush()
task_id = task_log.pk
# Send download task to reports queue
mq_user = os.getenv("BTS_MQ_USER", "btsuser")
mq_pass = os.getenv("BTS_MQ_PASS", "<PASSWORD>")
mq_host = os.getenv("BTS_MQ_HOST", "192.168.99.100")
mq_vhost = os.getenv("BTS_MQ_VHOST", "/bs")
credentials = pika.PlainCredentials(mq_user, mq_pass)
parameters = pika.ConnectionParameters(mq_host, virtual_host=mq_vhost, credentials=credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='reports', durable=True)
data = json.dumps({'task_id': task_id})
channel.basic_publish(exchange='',
routing_key='reports',
body=data)
channel.close()
return jsonify({'status': 'PENDING', # Job status
'message': 'Download job logged.',
'status_url': '/api/reports/download/status/{}'.format(task_id),
'download_url': '/api/reports/file/{}'.format(task_id)} )
except Exception as e:
return jsonify({'status': 'error', 'message': 'Failed to start download', 'error': str(e)}),404
@mod_reports.route('/download/status/<int:job_id>')
@login_required
def get_download_status(job_id):
task = db.session.query(ReportsTaskLog).filter_by(pk=job_id).one()
if task is None:
return jsonify({'message': 'Message does not exist'}), 404
return jsonify({'status': task.status, 'log': task.log})
@mod_reports.route('/file/<int:job_id>')
def get_download_file(job_id):
task = db.session.query(ReportsTaskLog).filter_by(pk=job_id).one()
if task is None:
return jsonify({'message': 'Download does not exist'}), 404
if task.status != 'FINISHED':
return jsonify({'message': 'Download does not exist'}), 404
filename = task.log
path_to_file = '/reports/{}'.format(filename)
try:
return send_file(path_to_file, attachment_filename=filename, mimetype='application/octet-stream', as_attachment=True, )
except Exception as e:
return jsonify({'message': str(e)}), 404
@mod_reports.route('/create/fields', methods=['POST'])
def get_new_report_columns():
"""Get the list of columns/fields in the new report query"""
content = request.get_json()
qry = content['qry']
try:
fields = db.engine.execute(text(qry)).keys()
return jsonify(fields)
except Exception as e:
return str(e), 400
@mod_reports.route('/create/dt', methods=['POST'])
def get_new_report_data():
"""Takes a post request with the report name and SQL. Returns report data"""
content = request.get_json()
try:
output = get_query_from_dt_request(request, content['qry'])
return jsonify(output)
except Exception as e:
return str(e), 400
@mod_reports.route('/create', methods=['POST'])
def create_or_update_report():
content = request.get_json()
try:
if 'report_id' not in content:
report = Report(name=content['name'],
category_pk=content['category_id'],
query=content['qry'],
notes=content['notes'],
options=content['options'])
db.session.add(report)
db.session.commit()
else:
report = db.session.query(Report).filter_by(pk=content['report_id']).first()
report.name = content['name']
report.category_pk = content['category_id']
report.notes = content['notes']
report.query = content['qry']
report.options = content['options']
db.session.commit()
return jsonify({}), 201
except Exception as e:
return str(e), 400
@mod_reports.route('/create/<int:report_id>', methods=['POST'])
def update_report(report_id):
content = request.get_json()
try:
report = db.session.query(Report).filter_by(pk=report_id).first()
report.name = content['name']
report.category_pk = content['category_id']
report.notes = content['notes']
report.query = content['qry']
report.options = content['options']
db.session.commit()
return jsonify({}), 201
except Exception as e:
return str(e), 400
@mod_reports.route('/<int:report_id>', methods=['DELETE'])
def delete_report(report_id):
try:
db.session.query(Report).filter_by(pk=report_id).delete()
db.session.commit()
return jsonify({}), 200
except Exception as e:
return jsonify(str(e)), 400
@mod_reports.route('/<int:report_id>')
def get_report(report_id):
try:
report_schema = ReportMASchema()
return jsonify(report_schema.dump(db.session.query(Report).get(report_id)).data)
except Exception as e:
return jsonify(str(e)), 400
@mod_reports.route('/categories', methods=['POST'])
def create_category():
content = request.get_json()
try:
category = ReportCategory(name=content['name'],
notes=content['notes'])
db.session.add(category)
db.session.commit()
return jsonify({}), 201
except Exception as e:
return str(e), 400
@mod_reports.route('/categories/<int:id>', methods=['POST'])
def update_category(id):
content = request.get_json()
try:
category = db.session.query(ReportCategory).filter_by(pk=id).first()
category.name = content['name']
category.notes = content['notes']
db.session.commit()
return jsonify({}), 201
except Exception as e:
return str(e), 400
@mod_reports.route('/categories/<int:id>', methods=['GET'])
def get_category(id):
content = request.get_json()
try:
category = db.session.query(ReportCategory).filter_by(pk=id).one()
ma_schema = ReportCategoryMASchema()
return jsonify(ma_schema.dump(category).data)
return jsonify({}), 201
except Exception as e:
return str(e), 400
@mod_reports.route('/categories/<int:id>', methods=['DELETE'])
def delete_category(id):
try:
# First delete the reports under the category
db.session.query(Report).filter_by(category_pk=id).delete()
db.session.query(ReportCategory).filter_by(pk=id).delete()
db.session.commit()
return jsonify({}), 200
except Exception as e:
db.session.rollback()
return jsonify(str(e)), 400
@mod_reports.route('/graphdata/<int:report_id>', methods=['GET'])
@login_required
def get_graph_data(report_id):
"""
Get graph data for report of type Graph
:param report_id:
:return:
"""
report = db.session.query(Report).filter_by(pk=report_id).first()
#@TODO: Add check for report type
table_columns = db.engine.execute(text(report.query)).keys()
result = db.engine.execute(text(report.query))
data_results = [{k: v for k, v in zip(
table_columns, row)} for row in result.fetchall()]
return jsonify(data_results)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.