text stringlengths 957 885k |
|---|
<gh_stars>0
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.scheduler import enqueue_events
from frappe.celery_app import get_celery, celery_task, task_logger, LONGJOBS_PREFIX, ASYNC_TASKS_PREFIX
from frappe.utils import get_sites
from frappe.utils.error import make_error_snapshot
from frappe.utils.file_lock import create_lock, delete_lock
from frappe.handler import execute_cmd
from frappe.async import set_task_status, END_LINE, get_std_streams
from frappe.utils.scheduler import log
import frappe.utils.response
import sys
import time
import json
import os
import MySQLdb
@celery_task()
def sync_queues():
"""notifies workers to monitor newly added sites"""
app = get_celery()
shortjob_workers, longjob_workers, async_tasks_workers = get_workers(app)
if shortjob_workers:
for worker in shortjob_workers:
sync_worker(app, worker)
if longjob_workers:
for worker in longjob_workers:
sync_worker(app, worker, prefix=LONGJOBS_PREFIX)
if async_tasks_workers:
for worker in async_tasks_workers:
sync_worker(app, worker, prefix=ASYNC_TASKS_PREFIX)
def get_workers(app):
longjob_workers = []
shortjob_workers = []
async_tasks_workers = []
active_queues = app.control.inspect().active_queues()
for worker in active_queues:
if worker.startswith(LONGJOBS_PREFIX):
longjob_workers.append(worker)
elif worker.startswith(ASYNC_TASKS_PREFIX):
async_tasks_workers.append(worker)
else:
shortjob_workers.append(worker)
return shortjob_workers, longjob_workers, async_tasks_workers
def sync_worker(app, worker, prefix=''):
active_queues = set(get_active_queues(app, worker))
required_queues = set(get_required_queues(app, prefix=prefix))
to_add = required_queues - active_queues
to_remove = active_queues - required_queues
for queue in to_add:
if is_site_in_maintenance_mode(queue, prefix):
continue
app.control.broadcast('add_consumer', arguments={
'queue': queue
}, reply=True, destination=[worker])
for queue in to_remove:
app.control.broadcast('cancel_consumer', arguments={
'queue': queue
}, reply=True, destination=[worker])
def get_active_queues(app, worker):
active_queues = app.control.inspect().active_queues()
if not (active_queues and active_queues.get(worker)):
return []
return [queue['name'] for queue in active_queues[worker]]
def get_required_queues(app, prefix=''):
ret = []
for site in get_sites():
ret.append('{}{}'.format(prefix, site))
ret.append(app.conf['CELERY_DEFAULT_QUEUE'])
return ret
def is_site_in_maintenance_mode(queue, prefix):
# check if site is in maintenance mode
site = queue.replace(prefix, "")
try:
frappe.init(site=site)
if not frappe.local.conf.db_name or frappe.local.conf.maintenance_mode or frappe.conf.disable_scheduler:
# don't add site if in maintenance mode
return True
finally:
frappe.destroy()
return False
@celery_task()
def scheduler_task(site, event, handler, now=False):
traceback = ""
task_logger.info('running {handler} for {site} for event: {event}'.format(handler=handler, site=site, event=event))
try:
frappe.init(site=site)
if not create_lock(handler):
return
if not now:
frappe.connect(site=site)
frappe.get_attr(handler)()
except Exception:
frappe.db.rollback()
traceback = log(handler, "Method: {event}, Handler: {handler}".format(event=event, handler=handler))
task_logger.warn(traceback)
raise
else:
frappe.db.commit()
finally:
delete_lock(handler)
if not now:
frappe.destroy()
task_logger.info('ran {handler} for {site} for event: {event}'.format(handler=handler, site=site, event=event))
@celery_task()
def enqueue_scheduler_events():
for site in get_sites():
enqueue_events_for_site.delay(site=site)
@celery_task()
def enqueue_events_for_site(site):
try:
frappe.init(site=site)
if frappe.local.conf.maintenance_mode or frappe.conf.disable_scheduler:
return
frappe.connect(site=site)
enqueue_events(site)
except:
task_logger.error('Exception in Enqueue Events for Site {0}'.format(site))
raise
finally:
frappe.destroy()
@celery_task()
def pull_from_email_account(site, email_account):
try:
frappe.init(site=site)
frappe.connect(site=site)
email_account = frappe.get_doc("Email Account", email_account)
email_account.receive()
frappe.db.commit()
finally:
frappe.destroy()
@celery_task(bind=True)
def run_async_task(self, site=None, user=None, cmd=None, form_dict=None, hijack_std=False):
ret = {}
frappe.init(site)
frappe.connect()
frappe.local.task_id = self.request.id
if hijack_std:
original_stdout, original_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = get_std_streams(self.request.id)
frappe.local.stdout, frappe.local.stderr = sys.stdout, sys.stderr
try:
set_task_status(self.request.id, "Running")
frappe.db.commit()
frappe.set_user(user)
# sleep(60)
frappe.local.form_dict = frappe._dict(form_dict)
execute_cmd(cmd, from_async=True)
ret = frappe.local.response
except Exception, e:
frappe.db.rollback()
ret = frappe.local.response
http_status_code = getattr(e, "http_status_code", 500)
ret['status_code'] = http_status_code
frappe.errprint(frappe.get_traceback())
frappe.utils.response.make_logs()
set_task_status(self.request.id, "Error", response=ret)
task_logger.error('Exception in running {}: {}'.format(cmd, ret['exc']))
else:
set_task_status(self.request.id, "Success", response=ret)
if not frappe.flags.in_test:
frappe.db.commit()
finally:
if not frappe.flags.in_test:
frappe.destroy()
if hijack_std:
sys.stdout.write('\n' + END_LINE)
sys.stderr.write('\n' + END_LINE)
sys.stdout.close()
sys.stderr.close()
sys.stdout, sys.stderr = original_stdout, original_stderr
return ret
@celery_task()
def sendmail(site, communication_name, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, lang=None, session=None):
try:
frappe.connect(site=site)
if lang:
frappe.local.lang = lang
if session:
# hack to enable access to private files in PDF
session['data'] = frappe._dict(session['data'])
frappe.local.session.update(session)
# upto 3 retries
for i in xrange(3):
try:
communication = frappe.get_doc("Communication", communication_name)
communication._notify(print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc)
except MySQLdb.OperationalError, e:
# deadlock, try again
if e.args[0]==1213:
frappe.db.rollback()
time.sleep(1)
continue
else:
raise
else:
break
except:
traceback = log("frappe.tasks.sendmail", frappe.as_json({
"site": site,
"communication_name": communication_name,
"print_html": print_html,
"print_format": print_format,
"attachments": attachments,
"recipients": recipients,
"cc": cc,
"lang": lang
}))
task_logger.error(traceback)
raise
else:
frappe.db.commit()
finally:
frappe.destroy()
|
<reponame>Patechoc/xyz2top
#!/usr/bin/env python
import sys, os
import argparse
import numpy as np
import xyz2molecule as xyz
import math
import elements
class atomEntity(object):
def __init__(self, atomInfos, atomIndex):
self.atomIndex = atomIndex
self.atomInfos = atomInfos
self.neighbourIndices = []
def add_neighbourAtom(self, atomEntityObject):
self.neighbourIndices.append(atomEntityObject.atomIndex)
def get_object(self):
obj = {}
obj["atomIndex"] = self.atomIndex
obj["atomInfos"] = self.atomInfos.get_object()
obj["neighbourIndices"] = self.neighbourIndices
return obj
def __str__(self):
str = "ATOM ENTITY: index={}, infos= {}".format(self.atomIndex,self.atomInfos)
if len(self.neighbourIndices) > 0:
str += ", #neighbours= {} ({})".format(len(self.neighbourIndices),self.neighbourIndices)
else:
str += ", #neighbours= 0"
return str
class atomPair(object):
def __init__(self, atomEntity_i, atomEntity_j):
self.atomEntity_i = atomEntity_i
self.atomEntity_j = atomEntity_j
# INTER-ATOMIC DISTANCE
self.distance = get_interatomic_distance(self.atomEntity_i.atomInfos,
self.atomEntity_j.atomInfos)
# COVALENT BOND DISTANCE
self.covDist = self.sum_covalent_radii()
def get_object(self):
obj = {}
obj["atomEntity_i"] = self.atomEntity_i.get_object()
obj["atomEntity_j"] = self.atomEntity_j.get_object()
obj["distance"] = self.distance
obj["covDist"] = self.covDist
return obj
def __str__(self):
return "ATOM PAIR between:\n\
\t {}\n\
\t {}\n\
\t Covalent bond distance: {}\n\
\t Interatomic distance : {}\
".format(self.atomEntity_i, self.atomEntity_j,
self.covDist, self.distance)
def sum_covalent_radii(self):
ele_ai = elements.ELEMENTS[self.atomEntity_i.atomInfos.atomSymbol]
ele_aj = elements.ELEMENTS[self.atomEntity_j.atomInfos.atomSymbol]
covRad_ai = ele_ai.covrad
covRad_aj = ele_aj.covrad
return covRad_ai + covRad_aj
class atomTriple(object):
def __init__(self, atomEntity_i, atomEntity_j, atomEntity_k):
self.atomEntity_i = atomEntity_i
self.atomEntity_j = atomEntity_j
self.atomEntity_k = atomEntity_k
self.vector_ji = np.array(atomEntity_i.atomInfos.coordinates()) - np.array(atomEntity_j.atomInfos.coordinates())
self.vector_jk = np.array(atomEntity_k.atomInfos.coordinates()) - np.array(atomEntity_j.atomInfos.coordinates())
self.distance_ji = math.sqrt(np.dot(self.vector_ji, self.vector_ji))
self.distance_jk = math.sqrt(np.dot(self.vector_jk, self.vector_jk))
self.cos_angle_ijk = np.dot(self.vector_ji,self.vector_jk)/self.distance_ji/self.distance_jk
self.angle_ijk = np.arccos(self.cos_angle_ijk)
def get_object(self):
obj = {}
obj["atomEntity_i"] = self.atomEntity_i.get_object()
obj["atomEntity_j"] = self.atomEntity_j.get_object()
obj["atomEntity_k"] = self.atomEntity_k.get_object()
obj["angle_ijk"] = self.angle_ijk
return obj
def __str__(self):
return "ATOM TRIPLE between:\n\
\tI: {}\n\
\tJ: {}\n\
\tK: {}\n\
\tdistance JI: {}\n\
\tdistance JK: {}\n\
\tangle IJK=acos(JI.JK): {} radians, {} degres\
".format(self.atomEntity_i, self.atomEntity_j, self.atomEntity_k,
self.distance_ji, self.distance_jk,
self.angle_ijk, self.get_angle())
def get_angle(self, inDegree=True):
if inDegree:
return self.angle_ijk*180./math.pi
else:
return self.angle_ijk
class atomQuadruple(object):
def __init__(self, atomEntity_i, atomEntity_j, atomEntity_k, atomEntity_l):
self.atomEntity_i = atomEntity_i
self.atomEntity_j = atomEntity_j
self.atomEntity_k = atomEntity_k
self.atomEntity_l = atomEntity_l
vector_ij = np.array(atomEntity_j.atomInfos.coordinates()) - np.array(atomEntity_i.atomInfos.coordinates())
vector_jk = np.array(atomEntity_k.atomInfos.coordinates()) - np.array(atomEntity_j.atomInfos.coordinates())
vector_kl = np.array(atomEntity_l.atomInfos.coordinates()) - np.array(atomEntity_k.atomInfos.coordinates())
## The unit vectors B1 and B2 define the first plane,
## whereas B2 and B3 define the second plane.
vector_B1 = vector_ij / self.get_distance(vector_ij)
vector_B2 = vector_jk / self.get_distance(vector_jk)
vector_B3 = vector_kl / self.get_distance(vector_kl)
# n1= b1 x b2
vector_N1 = np.cross(vector_B1, vector_B2)
# n2= b2 x b3
vector_N2 = np.cross(vector_B2, vector_B3)
# Dihedral= atan2( dot(n1xn2,b2/|b2|) , dot(n1,n2))
self.dihedral = np.arctan2( np.dot(np.cross(vector_N1, vector_N2),vector_B2),
np.dot(vector_N1, vector_N2) )
def get_distance(self, vector):
return math.sqrt(np.dot(vector, vector))
def get_object(self):
obj = {}
obj["atomEntity_i"] = self.atomEntity_i.get_object()
obj["atomEntity_j"] = self.atomEntity_j.get_object()
obj["atomEntity_k"] = self.atomEntity_k.get_object()
obj["atomEntity_l"] = self.atomEntity_l.get_object()
obj["dihedral_degree"] = self.get_dihedral_angle()
obj["dihedral_radian"] = self.get_dihedral_angle(inDegree=False)
return obj
def __str__(self):
return "ATOM QUADRUPLE between:\n\
\tI: {}\n\
\tJ: {}\n\
\tK: {}\n\
\tL: {}\n\
\tdihedral angle: {} radians, {} degres\
".format(self.atomEntity_i, self.atomEntity_j, self.atomEntity_k, self.atomEntity_l,
self.get_dihedral_angle(inDegree=False), self.get_dihedral_angle())
def get_dihedral_angle(self, inDegree=True):
if inDegree:
return self.dihedral*180./math.pi
else:
return self.dihedral
class topology(object):
def __init__(self, molecule, covRadFactor=1.3):
self.molecule = molecule
self.covRadFactor = covRadFactor
self.atomEntities = [atomEntity(ai,i) for i,ai in enumerate(self.molecule.listAtoms)]
self.atomicPairs = [] # contains all atomPairs
self.covalentBonds = [] # contains only atomPairs detected as connected
self.covalentBondAngles = []
self.covalentDihedralAngles = []
self.covBonds_built = False
self.covBondAngles_built = False
self.covBondDihedrals_built = False
self.build_topology()
def get_object(self):
obj = {}
obj["molecule"] = self.molecule.get_object()
obj["atomEntities"] = [e.get_object() for e in self.atomEntities]
obj["atomicPairs"] = [p.get_object() for p in self.atomicPairs]
obj["covalentBonds"] = [b.get_object() for b in self.covalentBonds]
obj["covalentBondAngles"] = [b.get_object() for b in self.covalentBondAngles]
obj["covalentDihedralAngles"] = [b.get_object() for b in self.covalentDihedralAngles]
return obj
def get_indices_neighbouringAtoms(self, indexAtomEntity):
entity = self.get_atomEntity_by_index(indexAtomEntity)
return entity.neighbourIndices
def get_atomEntity_by_index(self, indexAtomEntity):
return [ai for i,ai in enumerate(self.atomEntities) if i == indexAtomEntity][0]
def __str__(self):
return "TOPOLOGY summary:\
\n\tmolecule: {} ({} atoms)\
\n\tCovalent radius factor: {}\
\n\tTotal nb. of possible atomic pairs : {}\
\n\tTotal nb. of pairs detected as bonds: {}\
\n\tTotal nb. of angles between bonds: {}\
\n\tTotal nb. of dihedral angles: {}\
".format(self.molecule.shortname, self.molecule.nbAtomsInMolecule,
self.covRadFactor, len(self.atomicPairs),
len(self.covalentBonds),
len(self.covalentBondAngles),
len(self.covalentDihedralAngles))
def is_connected(self, pair):
isConnected = False
if ( get_interatomic_distance(pair.atomEntity_i.atomInfos,
pair.atomEntity_j.atomInfos)
< self.covRadFactor * pair.covDist):
isConnected = True
return isConnected
def add_covalentBond(self, ai, aj, i, j):
entity_i = atomEntity(ai, i)
entity_j = atomEntity(aj, j)
pair = atomPair(entity_i, entity_j)
# add the pair to the list of atomic pairs
self.atomicPairs.append(pair)
# if the atoms are 'close' enough, add the pair to the list of covalentBonds too
if self.is_connected(pair):
self.covalentBonds.append(pair)
self.atomEntities[i].add_neighbourAtom(entity_j)
self.atomEntities[j].add_neighbourAtom(entity_i)
#if aj.atomSymbol == "O":
# print atomPair(self.atomEntities[i], self.atomEntities[j])
#print pair.get_object()
def get_covalentBonds(self):
if not(self.covBonds_built):
# go through all unique pairs of atoms
# compare distance to covalent bond distance (scaled)
[[self.add_covalentBond(ai, aj, i, j) for j, aj in enumerate(self.molecule.listAtoms) if j>i] for i, ai in enumerate(self.molecule.listAtoms[:-1])]
#print "Nb. of total unique pair of atoms: ",len(self.atomicPairs)
#print "Nb. of covalent bond detected: ",len(self.covalentBonds)
self.covBonds_built = True
else:
print "Covalent bonds have already been found!"
def get_covalentBondAngles(self):
if not(self.covBondAngles_built):
# reduce the search to the atoms that have 'at least' 2 neighbours
indicesAtomsWithEnoughBonds =[j for j,aj in enumerate(self.atomEntities)
if len(self.get_atomEntity_by_index(j).neighbourIndices) > 1]
for j in indicesAtomsWithEnoughBonds:
for i in self.get_indices_neighbouringAtoms(j):
for k in self.get_indices_neighbouringAtoms(j):
if k>i:
ai = self.get_atomEntity_by_index(i)
aj = self.get_atomEntity_by_index(j)
ak = self.get_atomEntity_by_index(k)
self.covalentBondAngles.append(atomTriple(ai,aj,ak))
#print atomTriple(ai,aj,ak)
#if aj.atomInfos.atomSymbol == "C" and j==9:
# print atomTriple(ai,aj,ak)
self.covBondAngles_built = True
else:
print "Covalent bond angles have already been found!"
def get_covalentDihedralAngles(self):
if not(self.covBondDihedrals_built):
### reduce the search to the bonding atoms that have 'at least' 2 neighbours each
indicesPairsWithEnoughNeighbours = []
for indPair,pair in enumerate(self.covalentBonds):
#print "index of the pair: ", indPair
#print atomPair(self.get_atomEntity_by_index(pair.atomEntity_i.atomIndex),\
#self.get_atomEntity_by_index(pair.atomEntity_j.atomIndex))
indicesNeighboursAtom2 = self.get_indices_neighbouringAtoms(pair.atomEntity_i.atomIndex)
indicesNeighboursAtom3 = self.get_indices_neighbouringAtoms(pair.atomEntity_j.atomIndex)
if len(indicesNeighboursAtom2)>1 and len(indicesNeighboursAtom3)>1:
indicesPairsWithEnoughNeighbours.append(indPair)
### for each such bond, find all possible plans to compare and get dihedral angles
for indPair in indicesPairsWithEnoughNeighbours:
pairJK = self.covalentBonds[indPair]
j = pairJK.atomEntity_i.atomIndex
k = pairJK.atomEntity_j.atomIndex
atomJ = self.get_atomEntity_by_index(j)
atomK = self.get_atomEntity_by_index(k)
indicesAtom1 = [i for i in self.get_indices_neighbouringAtoms(j) if i!=k]
indicesAtom4 = [l for l in self.get_indices_neighbouringAtoms(k) if l!=j]
for i in indicesAtom1:
atomI = self.get_atomEntity_by_index(i)
for l in indicesAtom4:
atomL = self.get_atomEntity_by_index(l)
self.covalentDihedralAngles.append(atomQuadruple(atomI,atomJ,atomK,atomL))
#print atomQuadruple(atomI,atomJ,atomK,atomL)
#if atomJ.atomInfos.atomSymbol == "C" and j==9 and k==19:
# print atomQuadruple(atomI,atomJ,atomK,atomL)
self.covBondDihedrals_built = True
else:
print "Dihedral angles have already been found!"
def build_topology(self):
self.get_covalentBonds()
self.get_covalentBondAngles()
self.get_covalentDihedralAngles()
def get_as_JSON(self):
topo = self.get_object()
import json
#with open('./tests/files/HistidineTopology.json', 'w') as outfile:
# json.dump(jsonTopology, outfile, sort_keys=True, indent=4)
return json.dumps(topo, sort_keys=True, indent=4)
def order_convalentBondDistances(self):
out = ""
list_pairs = []
# build list of unique IDs to order covalent bond distances
for indPair,pair in enumerate(self.covalentBonds):
[i,j] = sorted([pair.atomEntity_i.atomIndex, pair.atomEntity_j.atomIndex])
id = i * self.molecule.nbAtomsInMolecule + j
list_pairs.append([id, i, j, pair.covDist, pair.distance])
# order pairs by their IDs
ordered_list = sorted(list_pairs)
ordered_list.insert(0, ["uniquePairID", "index_i", "index_j", "covBondDist [A]", "distance [A]"])
return ordered_list
def order_convalentBondDistances_string(self):
ordered_list = self.order_convalentBondDistances()
out = "\n".join(["".join(["{0}".format("".join([str(elem), ","]).ljust(20, ' ')) for elem in pair]) for pair in ordered_list])
return out
def order_angles(self):
out = ""
list_triples = []
# build list of unique IDs to order angles between covalent bonds
for ind,triple in enumerate(self.covalentBondAngles):
[i,k] = sorted([triple.atomEntity_i.atomIndex, triple.atomEntity_k.atomIndex])
j = triple.atomEntity_j.atomIndex
id = i * self.molecule.nbAtomsInMolecule**2 + j*self.molecule.nbAtomsInMolecule + k
list_triples.append([id, i, j, k,
triple.distance_ji, triple.distance_jk,
triple.get_angle(inDegree=False),
triple.get_angle(inDegree=True)])
# order triples by their IDs
ordered_list = sorted(list_triples)
ordered_list.insert(0, ["uniqueID", "index_i", "index_j","index_k",
"covBondDist IJ [A]", "covBondDist JK [A]",
"Angle IJK [rad]",
"Angle IJK [deg]"])
return ordered_list
def order_angles_string(self):
ordered_list = self.order_angles()
out = "\n".join(["".join(["{0}".format("".join([str(elem), ","]).ljust(22, ' ')) for elem in triple]) for triple in ordered_list])
return out
def order_dihedralAngles(self):
out = ""
list_quads = []
# build list of unique IDs to order dihedral angles between 3 covalent bonds
for ind, quad in enumerate(self.covalentDihedralAngles):
i = quad.atomEntity_i.atomIndex
j = quad.atomEntity_j.atomIndex
k = quad.atomEntity_k.atomIndex
l = quad.atomEntity_l.atomIndex
id = i * self.molecule.nbAtomsInMolecule**3 \
+ j*self.molecule.nbAtomsInMolecule**2 \
+ k*self.molecule.nbAtomsInMolecule \
+ l
list_quads.append([id, i, j, k, l,
quad.get_dihedral_angle(inDegree=False),
quad.get_dihedral_angle(inDegree=True)])
# order quads by their IDs
ordered_list = sorted(list_quads)
ordered_list.insert(0, ["uniqueID", "index_i", "index_j", "index_k", "index_l",
"Dihedral IJ-KL [rad]",
"Dihedral IJ-KL [deg]"])
return ordered_list
def order_dihedralAngles_string(self):
ordered_list = self.order_dihedralAngles()
out = "\n".join(["".join(["{0}".format("".join([str(elem), ","]).ljust(22, ' ')) for elem in quad]) for quad in ordered_list])
return out
def write_topology_files(self, prefix = ""):
filename_config = "configTopology.txt"
filename_bonds = "covBondDist.csv"
filename_angles = "covAngles.csv"
filename_dihedralAngles = "covDihedralAngles.csv"
if prefix == "":
prefix = self.molecule.shortname
filename_config = prefix + "_" + filename_config
filename_bonds = prefix + "_" + filename_bonds
filename_angles = prefix + "_" + filename_angles
filename_dihedralAngles = prefix + "_" + filename_dihedralAngles
# config. file with arbitrary choice of covalent radius coefficient
# (defining if an atom pair is a covalent bond)
config = "covRadFactor = " + str(self.covRadFactor)
with open(filename_config, 'w') as outfile:
outfile.write(config)
# covalent bonds distances
str_bonds = self.order_convalentBondDistances_string()
with open(filename_bonds, 'w') as outfile:
outfile.write(str_bonds)
# angles between bonds
str_angles = self.order_angles_string()
with open(filename_angles, 'w') as outfile:
outfile.write(str_angles)
# dihedrals angles between bonds
str_dihedral = self.order_dihedralAngles_string()
with open(filename_dihedralAngles, 'w') as outfile:
outfile.write(str_dihedral)
return [filename_config, filename_bonds, filename_angles, filename_dihedralAngles]
def get_as_Zmatrix(self, useVariables=False):
atomIndicesWritten = []
zmat=""
debug_msg =""
# find first atom with at least one neighbour
# (if atoms no neighbours, they should be treated in the end)
i = 0
atomI = self.get_atomEntity_by_index(i)
while(len(atomI.neighbourIndices) == 0):
i+=1
atomI = self.get_atomEntity_by_index(i)
print debug_msg
return zmat
def read_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("filename",
help="molecular geometry in .XYZ format")
parser.add_argument('-out', nargs='?', type=argparse.FileType('w'),
default=sys.stdout,
help="optional output filename,\
if not, default is [filename].top")
parser.add_argument("-crf", "--covRadFactor", type=float,
help="optional covalent radius factor,\
equal to 1 by default")
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
args = parser.parse_args()
return args
def get_interatomic_distance(atomInfos_i,atomInfos_j):
"""
atomInfos_i and atomInfos_j are atomInfos objects and this function
returns the interatomic distance which separates them
"""
return math.sqrt((atomInfos_i.xCoord-atomInfos_j.xCoord)**2
+(atomInfos_i.yCoord-atomInfos_j.yCoord)**2
+(atomInfos_i.zCoord-atomInfos_j.zCoord)**2)
def main():
# read inputs
args = read_arguments()
path_to_file = os.path.abspath(args.filename)
if (args.covRadFactor == None):
print "no factor for bond distance specified\n>> default covalent radius factor will apply.\n(Run './main.py --help' for more options.)"
else:
print "Covalent radius factor set to ", args.covRadFactor
if args.verbose:
print "Approximate the molecular topology stored in {} \n \
with connections detected as covalent bonds if pair-atomic \
distance goes below {} times the sum of the covalent radii.\
".format(args.filename, args.covRadFactor)
### parse_molecule_XYZ()
molecule = xyz.parse_XYZ(path_to_file)
#print molecule.get_object()
#print molecule
### compute the topology
if (args.covRadFactor != None):
molecular_topology = topology(molecule, args.covRadFactor)
else:
molecular_topology = topology(molecule)
molecular_topology.build_topology()
# print molecular_topology.get_as_JSON()
print molecular_topology
### print topology to file
jsonString = molecular_topology.get_as_JSON()
with open('./topology.json', 'w') as outfile:
outfile.write(jsonString)
print "\nZmatrix format: (not done yet)"
print molecular_topology.get_as_Zmatrix()
print "\nZmatrix format with variables: (not done yet)"
print molecular_topology.get_as_Zmatrix(useVariables=True)
print "\nCreate 3 topology files for bonds, angles and dihedrals + config.txt"
[filename_config, filename_bonds, filename_angles, filename_dihedralAngles] = molecular_topology.write_topology_files()
print "files generated:" \
+ "\n\t- " + filename_config \
+ "\n\t- " + filename_bonds \
+ "\n\t- " + filename_angles \
+ "\n\t- " + filename_dihedralAngles;
if __name__ == "__main__":
main()
|
<reponame>hardbyte/sorting-gym<gh_stars>1-10
import pytest
from gym.spaces import flatten
from sorting_gym.agents.scripted import bubble_sort_agent, insertion_sort_agent, quicksort_agent
from sorting_gym.envs.functional_neural_sort_interface import FunctionalNeuralSortInterfaceEnv
from tests.util import _test_sort_agent
def test_reset_gives_valid_observation():
env = FunctionalNeuralSortInterfaceEnv(k=4, number_of_functions=5)
obs = flatten(env.nested_observation_space, env.reset())
assert obs.shape[0] == 68 + 5 + 6 + 51 + 1
def test_function_env_preserves_function_id():
"""
Create a functional environment with 2 functions taking 0 args and returning 0 args
"""
env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=2, function_inputs=0, function_returns=0)
original_obs = env.reset()
assert original_obs['current_function'] == -1
assert env.action_space.contains((3, 0))
obs, reward, done, info = env.step((3, 0))
assert obs['current_function'] == 0
obs, reward, done, info = env.step((3, 1))
assert obs['current_function'] == 1
# return
obs, reward, done, info = env.step((4,))
assert obs['current_function'] == 0
obs, reward, done, info = env.step((4,))
assert obs['current_function'] == -1
def test_function_env_can_pass_through_arg():
"""
Functional environment with 1 function taking 1 arg and returning 1 arg
We will create a function that assigns the input to a local variable, and
returns that local variable.
"""
env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=1, function_inputs=1, function_returns=1)
env.reset()
n = len(env.A) - 1
assert env.current_function == -1
assert env.v[1] == n
assert env.v[2] == 0
# Call the function 0 with:
# local variable ID l=0
# outer variable ID o=1 (pointing to end of array)
# returning ID r=2
obs, reward, done, info = env.step((3, 0, 0, 1, 2))
assert obs['current_function'] == 0
assert env.v[1] == 0
assert env.v[2] == 0
# Now inside the function assign "local" variable (id 1) with the function input (id 0)
# Which should be our locally passed in end of array pointer
obs, reward, done, info = env.step((2, 1, 0))
assert env.v[1] == n
assert env.v[2] == 0
# Now return from the function with local variable (id 1).
# Returning ID is 2, so now v[2] should be n
obs, reward, done, info = env.step((4, 1))
assert env.v[2] == n
def test_function_env_swap_args():
"""
Functional environment with 1 function taking 2 arg and returning 2 args
We will create a function that swaps the inputs.
"""
env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=1, function_inputs=2, function_returns=2)
env.reset()
n = len(env.A) - 1
assert env.current_function == -1
env.v[1] = 1
env.v[2] = 2
# Call the function
obs, reward, done, info = env.step((3, 0,
0, 1, # local inputs
1, 2, # outer variables
1, 2 # write over inputs
))
assert obs['current_function'] == 0
assert env.v[0] == 1
assert env.v[1] == 2
# Swap the "local" variables
# Save temp var (id 2) with the first function input (id 0)
obs, reward, done, info = env.step((2, 2, 0))
# Assign v0 = v1
obs, reward, done, info = env.step((2, 0, 1))
# Assign v1 = v2
obs, reward, done, info = env.step((2, 1, 2))
assert env.v[0] == 2
assert env.v[1] == 1
# Now return from the function.
obs, reward, done, info = env.step((4, 0, 1))
# Check that the outer scope has had the variables swapped
assert env.v[1] == 2
assert env.v[2] == 1
def test_function_env_swap_args_in_call():
"""
Functional environment with 1 function taking 2 arg and returning 2 args
We will create a nop function that swaps the inputs by swapping the return args.
"""
env = FunctionalNeuralSortInterfaceEnv(k=3, number_of_functions=1, function_inputs=2, function_returns=2)
env.reset()
n = len(env.A) - 1
assert env.current_function == -1
env.v[1] = 1
env.v[2] = 2
# Call the function
obs, reward, done, info = env.step((3, 0,
0, 1, # local inputs
1, 2, # outer variables
1, 2 # write over inputs
))
assert obs['current_function'] == 0
assert env.v[0] == 1
assert env.v[1] == 2
# Now return from the function - swapping the return values around
obs, reward, done, info = env.step((4, 1, 0))
# Check that the outer scope has had the variables swapped
assert env.v[1] == 2
assert env.v[2] == 1
def test_bubble_sort_agent():
"""
Functional environment should still work using the scripted
Bubble Sort agent.
"""
env = FunctionalNeuralSortInterfaceEnv(k=3)
agent_f = bubble_sort_agent
_test_sort_agent(agent_f, env, 100)
def test_bubble_sort_agent_not_enough_pointers():
env = FunctionalNeuralSortInterfaceEnv(k=2)
agent_f = bubble_sort_agent
with pytest.raises(IndexError):
_test_sort_agent(agent_f, env, 100)
def test_quick_sort_agent():
"""
Tests the environment using a Quick Sort agent.
c.f. Algorithm 8 - pg 25
"""
env = FunctionalNeuralSortInterfaceEnv(k=4, number_of_functions=2)
_test_sort_agent(quicksort_agent, env, number_of_problems=100, max_steps=10000, verbose=True)
|
<reponame>Vman45/app
import os
from email.message import EmailMessage, Message
from email.utils import make_msgid, formatdate
from smtplib import SMTP
import dkim
from jinja2 import Environment, FileSystemLoader
from app.config import (
SUPPORT_EMAIL,
ROOT_DIR,
POSTFIX_SERVER,
NOT_SEND_EMAIL,
DKIM_SELECTOR,
DKIM_PRIVATE_KEY,
DKIM_HEADERS,
ALIAS_DOMAINS,
SUPPORT_NAME,
)
from app.log import LOG
def render(template_name, **kwargs) -> str:
templates_dir = os.path.join(ROOT_DIR, "templates", "emails")
env = Environment(loader=FileSystemLoader(templates_dir))
template = env.get_template(template_name)
return template.render(**kwargs)
def send_welcome_email(user):
send_email(
user.email,
f"Welcome to SimpleLogin {user.name}",
render("com/welcome.txt", name=user.name, user=user),
render("com/welcome.html", name=user.name, user=user),
)
def send_trial_end_soon_email(user):
send_email(
user.email,
f"Your trial will end soon {user.name}",
render("transactional/trial-end.txt", name=user.name, user=user),
render("transactional/trial-end.html", name=user.name, user=user),
)
def send_activation_email(email, name, activation_link):
send_email(
email,
f"Just one more step to join SimpleLogin {name}",
render(
"transactional/activation.txt",
name=name,
activation_link=activation_link,
email=email,
),
render(
"transactional/activation.html",
name=name,
activation_link=activation_link,
email=email,
),
)
def send_reset_password_email(email, name, reset_password_link):
send_email(
email,
f"Reset your password on SimpleLogin",
render(
"transactional/reset-password.txt",
name=name,
reset_password_link=reset_password_link,
),
render(
"transactional/reset-password.html",
name=name,
reset_password_link=reset_password_link,
),
)
def send_change_email(new_email, current_email, name, link):
send_email(
new_email,
f"Confirm email update on SimpleLogin",
render(
"transactional/change-email.txt",
name=name,
link=link,
new_email=new_email,
current_email=current_email,
),
render(
"transactional/change-email.html",
name=name,
link=link,
new_email=new_email,
current_email=current_email,
),
)
def send_new_app_email(email, name):
send_email(
email,
f"Any question/feedback for SimpleLogin {name}?",
render("com/new-app.txt", name=name),
render("com/new-app.html", name=name),
)
def send_test_email_alias(email, name):
send_email(
email,
f"This email is sent to {email}",
render("transactional/test-email.txt", name=name, alias=email),
render("transactional/test-email.html", name=name, alias=email),
)
def send_cannot_create_directory_alias(user, alias, directory):
"""when user cancels their subscription, they cannot create alias on the fly.
If this happens, send them an email to notify
"""
send_email(
user.email,
f"Alias {alias} cannot be created",
render(
"transactional/cannot-create-alias-directory.txt",
name=user.name,
alias=alias,
directory=directory,
),
render(
"transactional/cannot-create-alias-directory.html",
name=user.name,
alias=alias,
directory=directory,
),
)
def send_cannot_create_domain_alias(user, alias, domain):
"""when user cancels their subscription, they cannot create alias on the fly with custom domain.
If this happens, send them an email to notify
"""
send_email(
user.email,
f"Alias {alias} cannot be created",
render(
"transactional/cannot-create-alias-domain.txt",
name=user.name,
alias=alias,
domain=domain,
),
render(
"transactional/cannot-create-alias-domain.html",
name=user.name,
alias=alias,
domain=domain,
),
)
def send_reply_alias_must_use_personal_email(user, alias, sender):
"""
The reply_email can be used only by user personal email.
Notify user if it's used by someone else
"""
send_email(
user.email,
f"Reply from your alias {alias} only works with your personal email",
render(
"transactional/reply-must-use-personal-email.txt",
name=user.name,
alias=alias,
sender=sender,
user_email=user.email,
),
render(
"transactional/reply-must-use-personal-email.html",
name=user.name,
alias=alias,
sender=sender,
user_email=user.email,
),
)
def send_email(to_email, subject, plaintext, html):
if NOT_SEND_EMAIL:
LOG.d(
"send email with subject %s to %s, plaintext: %s",
subject,
to_email,
plaintext,
)
return
# host IP, setup via Docker network
smtp = SMTP(POSTFIX_SERVER, 25)
msg = EmailMessage()
msg["Subject"] = subject
msg["From"] = f"{SUPPORT_NAME} <{SUPPORT_EMAIL}>"
msg["To"] = to_email
msg.set_content(plaintext)
if html is not None:
msg.add_alternative(html, subtype="html")
msg_id_header = make_msgid()
LOG.d("message-id %s", msg_id_header)
msg["Message-ID"] = msg_id_header
date_header = formatdate()
LOG.d("Date header: %s", date_header)
msg["Date"] = date_header
# add DKIM
email_domain = SUPPORT_EMAIL[SUPPORT_EMAIL.find("@") + 1 :]
add_dkim_signature(msg, email_domain)
msg_raw = msg.as_string().encode()
smtp.sendmail(SUPPORT_EMAIL, to_email, msg_raw)
def get_email_name(email_from):
"""parse email from header and return the name part
First Last <<EMAIL>> -> First Last
<EMAIL> -> ""
"""
if "<" in email_from:
return email_from[: email_from.find("<")].strip()
return ""
def get_email_part(email_from):
"""parse email from header and return the email part
First Last <<EMAIL>> -> <EMAIL>
<EMAIL> -> ""
"""
if "<" in email_from:
return email_from[email_from.find("<") + 1 : email_from.find(">")].strip()
return email_from
def get_email_local_part(email):
"""
Get the local part from email
<EMAIL> -> ab
"""
return email[: email.find("@")]
def get_email_domain_part(email):
"""
Get the domain part from email
<EMAIL> -> cd.com
"""
return email[email.find("@") + 1 :]
def add_dkim_signature(msg: Message, email_domain: str):
if msg["DKIM-Signature"]:
LOG.d("Remove DKIM-Signature %s", msg["DKIM-Signature"])
del msg["DKIM-Signature"]
# Specify headers in "byte" form
# Generate message signature
sig = dkim.sign(
msg.as_string().encode(),
DKIM_SELECTOR,
email_domain.encode(),
DKIM_PRIVATE_KEY.encode(),
include_headers=DKIM_HEADERS,
)
sig = sig.decode()
# remove linebreaks from sig
sig = sig.replace("\n", " ").replace("\r", "")
msg.add_header("DKIM-Signature", sig[len("DKIM-Signature: ") :])
def add_or_replace_header(msg: Message, header: str, value: str):
try:
msg.add_header(header, value)
except ValueError:
# the header exists already
msg.replace_header(header, value)
def delete_header(msg: Message, header: str):
"""a header can appear several times in message."""
for h in msg._headers:
if h[0].lower() == header.lower():
msg._headers.remove(h)
def email_belongs_to_alias_domains(email: str) -> bool:
"""return True if an emails ends with one of the alias domains provided by SimpleLogin"""
for domain in ALIAS_DOMAINS:
if email.endswith("@" + domain):
return True
return False
def can_be_used_as_personal_email(email: str) -> bool:
"""return True if an email can be used as a personal email. Currently the only condition is email domain is not
- one of ALIAS_DOMAINS
- one of custom domains
"""
domain = get_email_domain_part(email)
if not domain:
return False
if domain in ALIAS_DOMAINS:
return False
from app.models import CustomDomain
if CustomDomain.get_by(domain=domain, verified=True):
return False
return True
|
# Lesson 6. Loops
print("\n--- Print out numbers from 0, 1, ... 10")
x = 1
while x <= 10:
print(x)
x +=1
# while x > 0: # always true -> endless loop
# print(x) # endless loop
# x +=1
print("\n--- Print out num in order reverce 8,7 .... 1")
x = 8
while x >= 1:
print(x)
x -= 1
print()
# 2, 4, 6 ... 20
x =2
while x<=20:
print(x)
x +=2
print()
print("\n--- 1+2+3+ .... 10")
# 1+2+3+ .... 10
s = 0
x = 1
while x <= 10:
s = s + x
x += 1
print(s)
print("\n--- Factorial - n!")
# n! = 1*2*3*4*5...*n - factorial
n = int(input("Enter number: "))
factorial = 1
x = 1
while x <= n:
# f = factorial * x
factorial *= x
x += 1
# print(f"{n}! = {f}")
print(f"{n}! = {factorial}")
print("\n--- ")
# range(n) -> 0 ... n-1 / в интервале 0...n
print("--- output 0,1, ... 10")
# 0,1, ... 10
for x in range(11): # = 0-10
print(x)
print("\n 5 6 7 8 9 10")
# 5.6
for x in range(5,11):
print(x)
print("\n--- ")
# 1,2, ... 10
for x in range(1, 11): # интервал от 1 - 10
print(x)
print("\n--- Print all odd numbers from 3, 5, 7, ... 15")
#
for number in range(3, 16, 2):
print(number)
print("\n--- Countdown 10,9,8, ... 1")
# 10,9,8, ... 1
for x in range(10,0,-1):
print(x)
print("\n--- Output 10,7,4,1")
for x in range(10,0,-3):
print(x)
print("\n--- Output 1 2 3 4 5 6 7 8 9 10")
# s = "1 2 3 4 5 6 7 8 9 10")
s = m = ''
x = 1
while x <= 10:
s = s + str(x) + " "
m = m + str(x) + "_" # extra symbol at the end
x += 1
print(m)
print(s[:-1]) # to rid of extra space
print(s.strip())
print("\n--- Ptint out 1 sheep... 2 sheep... 30 sheep...")
sh = ''
for x in range(1,31):
sh = sh + f"{x} sheep... "
print(sh)
for x in range(1,31):
if x < 30:
sh = sh + f"{x} sheep... "
else:
sh = sh + f"{x} sheep..." # no space at the end
print(sh)
print("\nAdd all odd numbers 1+3+5+7...99")
s = 0
for x in range(1, 100, 2):
s += x
print(s)
print("\n--- Break/continue in loop")
# breake - to break Loop
# continue - skips one iteration
print("--- Print sum of two entered nunbers until they entred 0")
s = 0
while True:
n = int(input("Enter number: "))
if n == 0:
break
s = s + n # = s += n
print(s)
print("\n--- Print odd numbers 1 3 5 7 9 11")
s = ''
x = 1
while x < 12:
if x % 2 == 0: # x is even
x += 1
continue
s = s + str(x) + " "
x += 1
print(s)
print("\n--- Sum 1+2+3+5+6+7+8+9+10")
a= 0
x =1
while x <=10:
if x == 4:
x += 1 # if skip this command --> endles loop
continue
a += x
x += 1
print(a)
print("\n--- Print od numbers 1 3 5 7 9 11")
s = ''
x = 1
while x < 12:
if x % 2 != 0: # x is odd
s = s + str(x) + " "
x += 1
print(s)
print("\n--- Print stars in Pyramid shape")
# *
# **
# ***
# ***
s = ''
n = 4
x = 1 # number of *
while (x <= n):
print("*" * x)
x = x +1
print(s)
n = 5
for x in range(1, n+1):
print("*" * x)
print(s)
n = 4
x = 1
while (x <= n):
if x < n:
s = s + "*" * x + "\n"
else:
s = s + "*" * x # without a space at the end
x = x + 1
print(s)
print("\n--- Print numbers 10 in Pyramid shape")
#10
#1010
#...
n = 10
s = ""
x = 1
while x <= n:
s = s + str(n) * x + "\n"
x += 1
print(s)
print("\n--- Pyramid lined up numbers")
# 1
# 22
# 333
# 4444
# 55555
s = ""
n = 5
for x in range(1, n+1): # n-1
if x < n:
s = s + str(x) * x + "\n"
else:
s = s + str(x) * x
print(s)
|
# -*- coding: utf-8 -*-
import os
"""
General Django settings for FST webservice
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT = os.path.abspath(os.path.dirname(__file__))
make_root_path = lambda *args: os.path.join(ROOT, *args)
# Read SECRET_KEY from file at project level
# To replace secret key with a new one,
# run: 'python manage.py generate_secret_key --replace'
PARENT_DIR = os.path.abspath(os.path.join(ROOT, os.pardir))
SECRET_FILE = os.path.join(PARENT_DIR, 'secretkey.txt')
with open(SECRET_FILE) as f:
SECRET_KEY = f.read().strip()
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Stockholm'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'sv-se'
DATE_FORMAT = 'Y-m-d'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Generate this the same way for all rinfo instances!
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'uploads')
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# NOTE! In Django 1.4 this replaces "ADMIN_MEDIA_PREFIX"
# URL prefix for admin static files -- CSS, JavaScript and images.
STATIC_URL = '/static/'
# NOTE! This is deprecated in Django 1.4
# URL prefix for admin static files -- CSS, JavaScript and images.
ADMIN_MEDIA_PREFIX = '/media/'
# Additional locations of static files
STATICFILES_DIRS = ()
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Default. More documentaton here:
# http://docs.djangoproject.com/en/dev/ref/contrib/sites/
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'fst_web.urls'
WSGI_APPLICATION = 'fst_web.wsgi.application'
# Specify directory where logs can be found
LOG_DIR = (make_root_path('logs'))
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# Application specific here 'fst_web.fs_doc',
'fst_web.fs_doc',
'fst_web.adminplus',
)
# Ensure that users are logged out automatically if inactive for
# specified time.
SESSION_SAVE_EVERY_REQUEST = True # Refresh cookie on new activity
SESSION_COOKIE_AGE = 30 * 60 # Cookie expires after this number of seconds
# Specify how detailed log output you want
LOG_LEVEL = "WARNING"
DB_DEBUG_LEVEL = "WARNING" # Silence noisy debug output
EMAIL_HOST_USER = None # Email notifications are enabled in local settings
# MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES +
# ('debug_toolbar.middleware.DebugToolbarMiddleware',)
# INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar',)
# INTERNAL_IPS = ('127.0.0.1',) #
# New for Django 1.4: list all possible password algorithms.
# Unless your application has very special security needs, default is fine.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
# Look for instance-specific settings
# TODO - declare specific imports
try:
from .local_settings import * # Use local settings if they exist
except ImportError:
from .demo_settings import * # else fall back to demo settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [make_root_path('templates')],
'OPTIONS': {
'debug': DEBUG,
'loaders':
['django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Project-specific:
"fst_web.context_processors.add_request_vars",
],
},
},
]
# Setup standard logging: daily rotating files for requests, app logging,
# debbugging DB calls etc.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s \%(process)d %'
'(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'handlers': {
'console': {
'level': '%s' % LOG_LEVEL,
'class': 'logging.StreamHandler',
},
'app_handler': {
'level': '%s' % LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'fst_web.app.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'verbose',
},
'db_handler': {
'level': '%s' % LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'fst_web.db.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'simple',
},
'request_handler': {
'level': '%s' % LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'django_request.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'simple',
}
},
'loggers': {'': {'handlers':
['app_handler'],
'level': '%s' % LOG_LEVEL,
'propagate': False
},
'django.request': {
'handlers': ['request_handler'],
'level': '%s' % LOG_LEVEL,
'propagate': False
},
'django.db.backends': {
'handlers': ['db_handler'],
'level': DB_DEBUG_LEVEL,
'propagate': False,
}
}
}
if EMAIL_HOST_USER:
LOGGING['handlers']['mail_admins'] = {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': False,
}
LOGGING['loggers']['django.request']['handlers'].append('mail_admins')
|
<gh_stars>0
# coding: utf-8
# In[1]:
import networkx as nx
# In[2]:
def createDict(dataset):
authors_dict = {}
authors_dict_reference = {}
publications_dict = {}
conferences_dict = {}
for publication in dataset:
publications_dict[publication["id_publication"]] = []
if publication["id_conference"] not in conferences_dict:
conferences_dict[publication["id_conference"]] = [publication["id_publication"]]
else:
conferences_dict[publication["id_conference"]].append(publication["id_publication"])
conferences_dict["id_conference"] = []
for aut in publication["authors"]:
#clean ugly names
if "&" not in aut["author"]:
publications_dict[publication["id_publication"]].append(aut["author_id"])
if aut["author_id"] not in authors_dict:
authors_dict[aut["author_id"]] = [aut["author"],[(publication["id_publication"],publication["id_publication_int"])],
[(publication["id_conference"],publication["id_conference_int"])]]
authors_dict_reference[aut["author"]] = aut["author_id"]
else:
authors_dict[aut["author_id"]][1].append((publication["id_publication"],publication["id_publication_int"]))
if (publication["id_conference"],publication["id_conference_int"]) not in authors_dict[aut["author_id"]][2]:
authors_dict[aut["author_id"]][2].append((publication["id_conference"],publication["id_conference_int"]))
return authors_dict, authors_dict_reference, publications_dict, conferences_dict
# In[3]:
def Jaccard(id1,id2, authors_dict):
a = authors_dict[id1][1]
b = authors_dict[id2][1]
intersection = len(list(set(a) & set(b)))
union = len(set(a + b))
return intersection/union
# In[11]:
def createGraph(authors_dict, publications_dict):
import networkx as nx
import itertools
from Modules import Jaccard
similar={}
G = nx.Graph()
for k,v in authors_dict.items():
G.add_node(k, id = k, author_name = v[0], pubblications = v[1], conferences = v[2])
for publication, authors in publications_dict.items():
try:
for couple in itertools.combinations(authors, 2):
author_1 = couple[0]
author_2 = couple[1]
w = 1 - Jaccard(author_1, author_2,authors_dict)
if w == 0:
if author_1 not in similar:
similar[author_2] = author_1
else:
similar[author_2] = similar[author_1]
G.add_edge(author_1, author_2, weight= w)
except:
pass
return G, similar
# In[5]:
#function that returns the nodes at hop distance d
def neighbors(G, start, d):
visit = []
to_visit = [start]
for i in range(d):
temp = []
for n in to_visit:
if n not in visit:
temp += G.neighbors(n)
visit = list(set(visit).union(to_visit))
to_visit = temp
visit = list(set(visit).union(to_visit))
return visit
# In[6]:
#remove isolated nodes and identical nodes
def removeNodes(G, similar):
import networkx as nx
Gcon = G.copy()
#remove isolated nodes
for node in nx.nodes(Gcon):
if Gcon.degree(node)==0:
Gcon.remove_node(node)
#remove identical nodes (nodes whose distance is 0.0)
for node in similar.keys():
Gcon.remove_node(node)
return Gcon
# In[7]:
def Dijkstra(graph, start):
import networkx as nx
from heapq import heappush, heappop
#A = [None] * len(graph)
A={}
for node in graph.nodes():
A[node]=None
queue = [(0, start)]
while queue:
path_len, v = heappop(queue)
if A[v] is None: # v is unvisited
A[v] = path_len
for w, edge_len in graph[v].items():
if A[w] is None:
heappush(queue, (path_len + edge_len["weight"], w))
# to give same result as original, assign zero distance to unreachable vertices
return A
#return [0 if x is None else x for x in A]
# In[8]:
def aris_subgraph(Gcon, similar):
aris = 256176
if aris in similar:
aris = similar[aris]
p = Dijkstra(Gcon, 256176)
return p
# In[9]:
def distances_aris(p, similar):
authorid = int(input("Enter Author id: "))
if authorid in similar:
authorid = similar[authorid]
try:
output = p[authorid]
if output == None:
print("The nodes are not connected")
else:
print(output)
except:
print("The node does not exist")
# In[10]:
def groupNumber(G, Gcon, similar):
inp = list(map(int,input("Insert author id or enter to stop: ").split()))
#inp = [234889, 523286, 523285, 256177, 114821]
if len(inp)>21:
print("Too many nodes!")
else:
GROUP_NUMBER = {}
dijkstra_list = [] #list of dikstra dictionaries for each author in the input list
for author in inp:
if author in similar:
author = similar[author]
dijkstra_list.append(Dijkstra(Gcon, author))
for nodeG in G.nodes():
groups = []
if nodeG in similar:
node = similar[nodeG]
else:
node = nodeG
try:
for i in range(len(dijkstra_list)):
#for tree in dijkstra_list:
shortest_path = dijkstra_list[i][node]
if shortest_path != None:
groups.append((shortest_path,inp[i]))
except: #for the isolated nodes
pass
if len(groups) == 0:
GROUP_NUMBER[nodeG] = "This node is not connected to any of these nodes."
#print("node "+str(nodeG)+" isn't connected to any of these nodes")
else:
result = min(groups)
GROUP_NUMBER[nodeG] =(result[1],result[0])
#print("the groupnumber for node "+str(nodeG)+" is "+str(result[1])+" with distance "+ str(result[0]))
return GROUP_NUMBER
# In[ ]:
# In[ ]:
|
import numpy as np
import torch
import torch.nn as nn
from data.datasets.LowResHighResDataset import region_geometry
from networks.modular_downscaling_model.base_modules import ParametricModule
class LocalizedLinearModel(ParametricModule):
__options__ = {
"input_channels": None,
"output_channels": None,
"num_models": 30000,
"num_nearest_neighbors_lr": 16,
"num_nearest_neighbors_hr": None
}
def __init__(self, **kwargs):
super(LocalizedLinearModel, self).__init__(**kwargs)
self._require_not_none('input_channels', 'output_channels')
self.shape_lr = None
self.shape_hr = None
self.input_index_lon_lr = None
self.input_index_lat_lr = None
self.input_index_lon_hr = None
self.input_index_lat_hr = None
self.model_index_lon = None
self.model_index_lat = None
self.model_index = None
if self.num_nearest_neighbors_hr is None:
self.num_nearest_neighbors_hr = 12 * self.num_nearest_neighbors_lr
self.model = None
def set_model_index(self, geometry_lr, geometry_hr, model_index=None):
assert isinstance(geometry_lr, (region_geometry, dict))
assert isinstance(geometry_hr, (region_geometry, dict))
if isinstance(geometry_lr, dict):
geometry_lr = geometry_lr[list(geometry_lr.keys())[0]]
self.shape_lr = geometry_lr[0].shape
if isinstance(geometry_hr, dict):
geometry_hr = geometry_hr[list(geometry_hr.keys())[0]]
self.shape_hr = geometry_hr.mask.shape
index_lon_hr, index_lat_hr = np.meshgrid(np.arange(self.shape_hr[1]), np.arange(self.shape_hr[0]))
valid_index_lon_hr = index_lon_hr[geometry_hr.mask == 0].astype(int)
valid_index_lat_hr = index_lat_hr[geometry_hr.mask == 0].astype(int)
max_num_models = np.sum(1 - geometry_hr.mask)
if model_index is None:
model_index = np.arange(max_num_models).astype(int)
if self.num_models < len(model_index):
np.random.shuffle(model_index)
model_index = np.sort(model_index[:self.num_models])
else:
assert len(model_index) <= max_num_models
assert max(model_index) <= max_num_models
self.model_index = model_index
self.num_models = len(model_index)
self.model_index_lon = valid_index_lon_hr[model_index]
self.model_index_lat = valid_index_lat_hr[model_index]
if isinstance(self.input_channels, list):
num_features = self.num_nearest_neighbors_lr * self.input_channels[0]
num_features += self.num_nearest_neighbors_hr * self.input_channels[1]
else:
num_features = self.num_nearest_neighbors_lr * self.input_channels
self.model = nn.Conv1d(
in_channels=self.num_models,
out_channels=self.output_channels * self.num_models,
groups=self.num_models,
kernel_size=num_features
)
def forward(self, x):
if self.model is None:
raise Exception("[ERROR] Need to set geometry before applying the model.")
output = self.model(x)
return self._reshape_output(output)
def _reshape_output(self, model_output):
batch_size = model_output.size(0)
# output = torch.zeros(batch_size, self.output_channels, *self.shape_hr, device=model_output.device)
model_output = torch.cat(torch.split(model_output, self.output_channels, dim=1), dim=2)
# output[:, :, self.model_index_lat, self.model_index_lon] = model_output
return model_output
|
"""
Component that will help set the microsoft face for verify processing.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/image_processing.microsoft_face_identify/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import split_entity_id, callback
from homeassistant.const import STATE_UNKNOWN
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.microsoft_face import DATA_MICROSOFT_FACE
from homeassistant.components.image_processing import (
PLATFORM_SCHEMA, ImageProcessingEntity, CONF_CONFIDENCE, CONF_SOURCE,
CONF_ENTITY_ID, CONF_NAME, ATTR_ENTITY_ID, ATTR_CONFIDENCE)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_callback_threadsafe
DEPENDENCIES = ['microsoft_face']
_LOGGER = logging.getLogger(__name__)
EVENT_IDENTIFY_FACE = 'identify_face'
ATTR_NAME = 'name'
ATTR_TOTAL_FACES = 'total_faces'
ATTR_KNOWN_FACES = 'known_faces'
CONF_GROUP = 'group'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_GROUP): cv.slugify,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the microsoft face identify platform."""
api = hass.data[DATA_MICROSOFT_FACE]
face_group = config[CONF_GROUP]
confidence = config[CONF_CONFIDENCE]
entities = []
for camera in config[CONF_SOURCE]:
entities.append(MicrosoftFaceIdentifyEntity(
camera[CONF_ENTITY_ID], api, face_group, confidence,
camera.get(CONF_NAME)
))
yield from async_add_devices(entities)
class ImageProcessingFaceIdentifyEntity(ImageProcessingEntity):
"""Base entity class for face identify/verify image processing."""
def __init__(self):
"""Initialize base face identify/verify entity."""
self.known_faces = {} # last scan data
self.total_faces = 0 # face count
@property
def state(self):
"""Return the state of the entity."""
confidence = 0
face_name = STATE_UNKNOWN
# search high verify face
for i_name, i_co in self.known_faces.items():
if i_co > confidence:
confidence = i_co
face_name = i_name
return face_name
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {
ATTR_KNOWN_FACES: self.known_faces,
ATTR_TOTAL_FACES: self.total_faces,
}
return attr
def process_faces(self, known, total):
"""Send event with detected faces and store data."""
run_callback_threadsafe(
self.hass.loop, self.async_process_faces, known, total
).result()
@callback
def async_process_faces(self, known, total):
"""Send event with detected faces and store data.
known are a dict in follow format:
{ 'name': confidence }
This method must be run in the event loop.
"""
detect = {name: confidence for name, confidence in known.items()
if confidence >= self.confidence}
# send events
for name, confidence in detect.items():
self.hass.async_add_job(
self.hass.bus.async_fire, EVENT_IDENTIFY_FACE, {
ATTR_NAME: name,
ATTR_ENTITY_ID: self.entity_id,
ATTR_CONFIDENCE: confidence,
}
)
# update entity store
self.known_faces = detect
self.total_faces = total
class MicrosoftFaceIdentifyEntity(ImageProcessingFaceIdentifyEntity):
"""Microsoft face api entity for identify."""
def __init__(self, camera_entity, api, face_group, confidence, name=None):
"""Initialize openalpr local api."""
super().__init__()
self._api = api
self._camera = camera_entity
self._confidence = confidence
self._face_group = face_group
if name:
self._name = name
else:
self._name = "MicrosoftFace {0}".format(
split_entity_id(camera_entity)[1])
@property
def confidence(self):
"""Return minimum confidence for send events."""
return self._confidence
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the entity."""
return self._name
@asyncio.coroutine
def async_process_image(self, image):
"""Process image.
This method is a coroutine.
"""
detect = None
try:
face_data = yield from self._api.call_api(
'post', 'detect', image, binary=True)
if face_data is None or len(face_data) < 1:
return
face_ids = [data['faceId'] for data in face_data]
detect = yield from self._api.call_api(
'post', 'identify',
{'faceIds': face_ids, 'personGroupId': self._face_group})
except HomeAssistantError as err:
_LOGGER.error("Can't process image on microsoft face: %s", err)
return
# parse data
knwon_faces = {}
total = 0
for face in detect:
total += 1
if len(face['candidates']) == 0:
continue
data = face['candidates'][0]
name = ''
for s_name, s_id in self._api.store[self._face_group].items():
if data['personId'] == s_id:
name = s_name
break
knwon_faces[name] = data['confidence'] * 100
# process data
self.async_process_faces(knwon_faces, total)
|
<reponame>MervmessInc/sfdx-scratch-org-builder<gh_stars>1-10
# org_manager.py
__version__ = '0.0.1'
import json
import logging
import os
import sys
import threading
import traceback
import sfdx_cli_utils as sfdx
# Set the working directory to the location of the file.
#
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
# Set the Log level
#
logging.basicConfig(
filename='debug_org_manager.log',
level=logging.ERROR,
format='%(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger()
# Config
#
TGREEN = "\033[1;32m"
TRED = "\033[1;31m"
ENDC = "\033[m"
#
#
def clean_org_data(org):
if "alias" not in org:
a = {"alias" : ""}
org.update(a)
if "isDevHub" not in org:
dh = {"isDevHub" : False}
org.update(dh)
if "defaultMarker" not in org:
dm = {"defaultMarker" : ""}
org.update(dm)
if "status" not in org:
s = {"status" : "Active"}
org.update(s)
if "expirationDate" not in org:
dt = {"expirationDate" : ""}
org.update(dt)
return org
def get_org_list():
if os.path.isfile("org_list.json"):
org_list = json.load(open("org_list.json", "r"))
t = threading.Thread(target=update_org_list)
t.start()
else:
org_list = update_org_list()
return org_list
def get_orgs_map(org_list):
non_scratch_orgs = org_list['result']['nonScratchOrgs']
scratch_orgs = org_list['result']['scratchOrgs']
orgs = {}
defaultusername = 1
index = 1
for o in non_scratch_orgs:
org = {index : clean_org_data(o)}
orgs.update(org)
index = index + 1
for o in scratch_orgs:
clean_org = clean_org_data(o)
if clean_org['defaultMarker'] == "(U)":
defaultusername = index
org = {index : clean_org}
orgs.update(org)
index = index + 1
return orgs, defaultusername
def parse_sfdx_project():
defaultpath = ''
if os.path.isfile("sfdx-project.json"):
sfdx_project = json.load(open("sfdx-project.json", "r"))
for o in sfdx_project['packageDirectories']:
if o['default']:
defaultpath = o['path']
return defaultpath
def print_org_details(idx, o):
color = TGREEN
if o['status'] != "Active":
color = TRED
print("{:>3} {:<3} {:<20} {:<45} {:<12} {:<10}"
.format(
idx,
o['defaultMarker'],
o['alias'],
o['username'],
o['expirationDate'],
color + o['status'] + ENDC))
def print_org_list(orgs):
print("{:>3} {:<3} {:<20} {:<45} {:<12} {:<10}"
.format("idx", "", "Alias", "Username", "Expiration", "Status"))
print("{:>3} {:<3} {:<20} {:<45} {:<12} {:<10}"
.format("---", "", "-----", "--------", "----------", "------"))
for idx, o in orgs.items():
print_org_details(idx, o)
def show_org_list(orgs):
print()
print_org_list(orgs)
print()
choice = input("Enter choice 'idx' or 'U' > ") or 'Q'
return choice
def update_org_list():
org_list = sfdx.org_list()
json.dump(org_list, open("org_list.json", "w"))
return org_list
def main():
logging.debug("main()")
try:
org_list = get_org_list()
orgs, defaultusername = get_orgs_map(org_list)
choice = show_org_list(orgs)
if choice.isnumeric():
idx = int(choice)
elif choice.upper() == 'U':
idx = defaultusername
elif choice.isalpha():
sys.exit(0)
org = orgs.get(idx)
defaultpath = parse_sfdx_project()
username = org['username']
if len(org['alias']) > 0:
username = org['alias']
print()
action = input(f"[O]pen '{username}' > ") or 'O'
if action.upper() == 'O' or action.upper() == 'OPEN':
logging.error(f"~~~ Opening Org ({username}) ~~~")
sfdx.org_open(org['username'])
elif action.upper() == 'Q' or action.upper() == 'QUIT':
sys.exit(0)
except Exception:
traceback.print_exc()
if __name__ == '__main__':
main()
|
import unittest
import os
import shutil
from LF2CRLF import LF2CRLF
class LF2CRLF_Tests(unittest.TestCase):
def test_ansi_windows_line_ending(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'ANSI Windows.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\r\n')
self.assertEqual(lf2crlf.unix_line_end, b'\n')
self.assertFalse(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertEqual(content, lf2crlf.content)
def test_unix_read_only(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'Unix Read Only.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\r\0\n\0')
self.assertEqual(lf2crlf.unix_line_end, b'\n\0')
self.assertTrue(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertNotEqual(content, lf2crlf.content)
def test_utf8_single_line(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'UTF-8 Single Line.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\r\n')
self.assertEqual(lf2crlf.unix_line_end, b'\n')
self.assertFalse(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertEqual(content, lf2crlf.content)
def test_utf8_unix(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'UTF-8 Unix.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\r\n')
self.assertEqual(lf2crlf.unix_line_end, b'\n')
self.assertTrue(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertNotEqual(content, lf2crlf.content)
def test_utf8_windows(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'UTF-8 Windows.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\r\n')
self.assertEqual(lf2crlf.unix_line_end, b'\n')
self.assertFalse(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertEqual(content, lf2crlf.content)
def test_utf16_be_unix(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'UTF-16 BE Unix.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\0\r\0\n')
self.assertEqual(lf2crlf.unix_line_end, b'\0\n')
self.assertTrue(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertNotEqual(content, lf2crlf.content)
def test_utf16_be_windows(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'UTF-16 BE Windows.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\0\r\0\n')
self.assertEqual(lf2crlf.unix_line_end, b'\0\n')
self.assertFalse(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertEqual(content, lf2crlf.content)
def test_utf16_le_unix(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'UTF-16 LE Unix.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\r\0\n\0')
self.assertEqual(lf2crlf.unix_line_end, b'\n\0')
self.assertTrue(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertNotEqual(content, lf2crlf.content)
def test_utf16_le_windows(self):
lf2crlf = LF2CRLF(os.path.join('Test Files', 'UTF-16 LE Windows.txt'))
self.assertEqual(lf2crlf.win_line_end, b'\r\0\n\0')
self.assertEqual(lf2crlf.unix_line_end, b'\n\0')
self.assertFalse(lf2crlf.unix_endings)
content = lf2crlf.content
lf2crlf.convert()
self.assertEqual(content, lf2crlf.content)
def test_save(self):
shutil.copy2(
os.path.join('Test Files', 'UTF-8 Unix.txt'),
os.path.join('Test Files', 'save test.txt')
)
with open(os.path.join('Test Files', 'save test.txt'), 'rb') as file:
content = file.read()
lf2crlf = LF2CRLF(os.path.join('Test Files', 'save test.txt'))
lf2crlf.convert()
lf2crlf.save()
with open(os.path.join('Test Files', 'save test.txt'), 'rb') as file:
new_content = file.read()
os.remove(os.path.join('Test Files', 'save test.txt'))
self.assertNotEqual(content, new_content)
if __name__ == '__main__':
unittest.main()
|
<reponame>THU-luvision/Occuseg
import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp
from functools import partial
import torch.nn.functional as F
import logging
from sklearn.neighbors import KDTree
import pdb
from torch_scatter import scatter_mean,scatter_std,scatter_add,scatter_max
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('training logger')
logger.setLevel(logging.DEBUG)
class ScanNet(object):
def __init__(self,
train_pth_path,
val_pth_path,
config):
if isinstance(train_pth_path,list):
self.train_pths = []
for train_pth in train_pth_path:
self.train_pths += glob.glob(train_pth)
else:
self.train_pths = glob.glob(train_pth_path)
self.val_pths = glob.glob(val_pth_path)
self.train, self.val = [], []
self.blur0 = np.ones((3, 1, 1)).astype('float32') / 3
self.blur1 = np.ones((1, 3, 1)).astype('float32') / 3
self.blur2 = np.ones((1, 1, 3)).astype('float32') / 3
self.scale = config['scale']
self.val_reps = config['val_reps']
self.batch_size = config['batch_size']
self.dimension = config['dimension']
self.full_scale = config['full_scale']
self.use_normal = config['use_normal']
self.use_elastic = config['use_elastic']
self.use_feature = config['use_feature']
self.use_rotation_noise = config['use_rotation_noise']
self.regress_sigma = config['regress_sigma']
self.PRINT_ONCE_FLAG = 0
torch.manual_seed(100) # cpu
torch.cuda.manual_seed(100) # gpu
np.random.seed(100) # numpy
torch.backends.cudnn.deterministic = True # cudnn
def elastic(self, x, gran, mag):
if not self.use_elastic:
return x
bb = np.abs(x).max(0).astype(np.int32) // gran + 3
noise = [np.random.randn(bb[0], bb[1], bb[2]).astype('float32') for _ in range(3)]
noise = [scipy.ndimage.filters.convolve(n, self.blur0, mode='constant', cval=0) for n in noise]
noise = [scipy.ndimage.filters.convolve(n, self.blur1, mode='constant', cval=0) for n in noise]
noise = [scipy.ndimage.filters.convolve(n, self.blur2, mode='constant', cval=0) for n in noise]
noise = [scipy.ndimage.filters.convolve(n, self.blur0, mode='constant', cval=0) for n in noise]
noise = [scipy.ndimage.filters.convolve(n, self.blur1, mode='constant', cval=0) for n in noise]
noise = [scipy.ndimage.filters.convolve(n, self.blur2, mode='constant', cval=0) for n in noise]
ax = [np.linspace(-(b - 1) * gran, (b - 1) * gran, b) for b in bb]
interp = [scipy.interpolate.RegularGridInterpolator(ax, n, bounds_error=0, fill_value=0) for n in noise]
def g(x_):
return np.hstack([i(x_)[:, None] for i in interp])
return x + g(x) * mag
def trainMerge(self, tbl, train):
locs = []
feats = []
labels = []
normals = []
pth_files = []
index_list = []
totalPoints = 0
sizes = []
masks = []
offsets = []
displacements = []
regions = []
region_masks = []
region_indexs = []
instance_masks = []
instance_sizes = []
for idx, i in enumerate(tbl):
a, b, c = train[i]['coords'], train[i]['colors'], train[i]['w']
if 'normal' in train[i]:
d = train[i]['normals']
else:
d = train[i]['coords']
if ('depth' in train[i]):
e = train[i]['depth']
else:
e = train[i]['coords']
if('region' in train[i]):
region_parts = train[i]['region']
else:
region_parts = c[:,1]
pth_files.append(self.train_pths[i])
# checked
# logger.debug("CHECK RANDOM SEED(np seed): sample id {}".format(np.random.randn(3, 3)))
m = np.eye(3)
if (self.use_rotation_noise):
m = m + np.random.randn(3, 3) * 0.1
m[0][0] *= np.random.randint(0, 2) * 2 - 1
m *= self.scale
# m *= (1 + np.random.randn(1) * 0.2) # add scale distortion, which might be useful?
theta = np.random.rand() * 2 * math.pi
m = np.matmul(m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]])
# align with origin
# a = a - np.mean(a,0)
"""
theta=np.random.rand()*2*math.pi * 0.05
m=np.matmul(m,[[math.cos(theta),0,math.sin(theta)],[0,1,0],[-math.sin(theta),0,math.cos(theta)]])
theta=np.random.rand()*2*math.pi * 0.05
m=np.matmul(m,[[1,0,0],[0,math.cos(theta),math.sin(theta)],[0,-math.sin(theta),math.cos(theta)]])
"""
a = np.matmul(a, m)
d = np.matmul(d, m) / self.scale
random_scale = np.random.rand()
a = self.elastic(a, 6 * self.scale // 50, random_scale * 40 * self.scale / 50)
random_scale = np.random.rand()
a = self.elastic(a, 20 * self.scale // 50, random_scale * 160 * self.scale / 50)
m = a.min(0)
M = a.max(0)
q = M - m
# offset=-m+np.clip(full_scale-M+m-0.001,0,None)*np.random.rand(3)+np.clip(full_scale-M+m+0.001,None,0)*np.random.rand(3)
offset = (np.min(a[:, 0]) - 10, np.min(a[:, 1]) - 10, np.min(a[:, 2]) - 10) + np.random.rand(3)
a = a - offset
idxs = (a.min(1) >= 0) * (a.max(1) < self.full_scale)
# random drop part of the point clouds
"""
pre_total_points = 0
post_total_points = 0
if (np.sum(idxs) > 400000):
pre_total_points = np.sum(idxs)
idxs = idxs * ((a[:, 0] < np.median(a[:, 0])) > 0)
post_total_points = np.sum(idxs)
if (np.sum(idxs) > 400000):
idxs = idxs * ((a[:, 1] < np.median(a[:, 1])) > 0)
post_total_points = np.sum(idxs)
if (np.sum(idxs) > 400000):
filtered_a = a.copy()
filtered_a = a[idxs]
idxs = idxs * ((a[:, 0] < np.median(filtered_a[:, 0]))> 0)
post_total_points = np.sum(idxs)
if (np.sum(idxs) > 400000):
filtered_a = a.copy()
filtered_a = a[idxs]
idxs = idxs * ((a[:, 1] < np.median(filtered_a[:, 1]))> 0)
post_total_points = np.sum(idxs)
"""
# randomly zoom out one class
# idxs[c == (3+np.random.randint(14))] = 0
# idxs[c == (3-np.random.randint(14))] = 0
a = a[idxs]
b = b[idxs]
c = c[idxs].astype(np.int32)
d = d[idxs]
e = e[idxs]
region_numpy = region_parts[idxs]
region = torch.from_numpy(region_numpy)
[region_index, region_mask] = np.unique(region_numpy, False, True)
region_mask = torch.from_numpy(region_mask)
region_index = torch.from_numpy(region_index)
# a=torch.from_numpy(a).long()
# generate masks for each instance:
c[:,1] = np.unique(c[:,1], False, True)[1]
instance_mask = c[:,1]
instance_size = scatter_add(torch.ones([a.shape[0]]), torch.Tensor(instance_mask).long(), dim = 0)
instance_size = torch.gather(instance_size, dim = 0, index = torch.Tensor(instance_mask).long())
mask = torch.zeros((a.shape[0], np.max(c[:,1]) + 1), dtype=torch.float32)
mask[torch.arange(a.shape[0]), c[:,1].astype(np.int32)] = 1
a = torch.from_numpy(a).float()
e = torch.from_numpy(e).float()
displacement = torch.zeros([a.shape[0],3], dtype = torch.float32)
offset = torch.zeros(a.shape[0], dtype = torch.float32)
for count in range(mask.shape[1]):
indices = mask[:,count] == 1
# cls = torch.from_numpy(c[:,0])[indices][0]
# if(cls > 1):
# random_shift = (torch.rand(3)) * self.scale * 3 # randomly shift 3 meters
# random_shift[2] = 0
# a[indices,:] += random_shift
mean = torch.mean(a[indices,:],dim = 0)
distance = torch.norm(a[indices,:] - mean,dim = 1)
offset[indices] = torch.exp(- (distance / self.scale/ self.regress_sigma ) ** 2 )
displacement[indices,:] = (a[indices,:] - mean) / self.scale
totalPoints = totalPoints + a.shape[0]
# if totalPoints < 1500000:
if True:
locs.append(torch.cat([a, torch.FloatTensor(a.shape[0], 1).fill_(idx)], 1))
lf = a - torch.mean(a, dim = 0).view(1,-1).expand_as(a)
l_feature = lf.div(torch.norm(lf, p=2, dim=1).view(-1,1).expand_as(lf))
color = torch.from_numpy(b).float() + torch.randn(3).float() * 0.1
color = torch.clamp(color, -1, 1)
tmp_feature = []
if 'l' in self.use_feature:
tmp_feature.append(l_feature)
if 'c' in self.use_feature:
tmp_feature.append(color)
if 'n' in self.use_feature:
tmp_feature.append(torch.from_numpy(d).float())
if 'd' in self.use_feature:
tmp_feature.append(e)
if 'h' in self.use_feature:
tmp_feature.append(a[:, 2:3])
# concat in channel dim
tmp_feature = torch.cat(tmp_feature, dim=1)
feats.append(tmp_feature)
sizes.append(torch.tensor(np.unique(c[:,1]).size))
masks.append(mask)
regions.append(region)
region_masks.append(region_mask)
region_indexs.append(region_index)
labels.append(torch.from_numpy(c))
normals.append(torch.from_numpy(d).float().cpu())
offsets.append(offset)
displacements.append(displacement)
instance_masks.append(torch.Tensor(instance_mask))
instance_sizes.append(instance_size)
index_list.append(torch.from_numpy(idxs.astype(int)))
else:
print("lost file for training: ", self.train_pths[i])
local_batch_size = len(locs)
locs = torch.cat(locs, 0)
feats = torch.cat(feats, 0)
labels = torch.cat(labels, 0)
sizes = torch.stack(sizes, 0)
normals = torch.cat(normals, 0)
offsets = torch.cat(offsets,0)
displacements = torch.cat(displacements, 0)
instance_masks = torch.cat(instance_masks, 0)
instance_sizes = torch.log(torch.cat(instance_sizes, 0))
regions = torch.cat(regions,0)
region_masks = torch.cat(region_masks, 0)
region_indexs = torch.cat(region_indexs, 0)
if not self.use_normal:
normals = torch.zeros(3, 3).float()
return {'x': [locs, feats, normals, local_batch_size], 'y': labels.long(), 'id': tbl, 'elastic_locs': a,
'pth_file': pth_files,
'idxs': index_list,
'masks': masks,
'instance_masks':instance_masks,
'instance_sizes':instance_sizes,
'sizes':sizes,
'offsets': offsets.view(-1,1),
'displacements':displacements,
'regions':regions,
'region_masks':region_masks,
'region_indexs':region_indexs}
def valMerge(self, tbl, val, valOffsets):
locs = []
feats = []
labels = []
point_ids = []
pth_files = []
index_list = []
normals = []
sizes = []
masks = []
offsets = []
displacements = []
regions = []
region_masks = []
region_indexs = []
totalPoints = 0
instance_masks = []
instance_sizes = []
for idx, i in enumerate(tbl):
a, b, c = val[i]['coords'], val[i]['colors'], val[i]['w']
if 'normal' in val[i]:
d = val[i]['normals']
else:
d = val[i]['coords']
if ('depth' in val[i]):
e = val[i]['depth']
else:
e = val[i]['coords']
if('region' in val[i]):
region_parts = val[i]['region']
else:
region_parts = c[:,1]
pth_files.append(self.val_pths[i])
m = np.eye(3)
m[0][0] *= np.random.randint(0, 2) * 2 - 1
m *= self.scale
theta = np.random.rand() * 2 * math.pi
# theta = np.random.randint(4) * 0.5 * math.pi
m = np.matmul(m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]])
# align with origin
# a = a - np.mean(a,0)
a = np.matmul(a, m) + self.full_scale / 2 + np.random.uniform(-2, 2, 3)
d = np.matmul(d, m) / self.scale
m = a.min(0)
M = a.max(0)
offset = (np.min(a[:, 0]) - 10, np.min(a[:, 1]) - 10, np.min(a[:, 2]) - 10) + np.random.rand(3)
a -= offset
# idxs=(a.min(1)>=0)*(a.max(1)<full_scale)
# offset = ( np.min(a[:,0])-10,np.min(a[:,1])-10,np.min(a[:,2])-10) + np.random.rand(3)
# a = a - offset
idxs = (a.min(1) >= 0) * (a.max(1) < self.full_scale)
# if(np.max(a[:,0]) > 2000 or np.max(a[:,1]) > 2000 or np.max(a[:,2]) > 1000):
# print("warning! scale too large!")
a = a[idxs]
b = b[idxs]
c = c[idxs].astype(np.int32)
d = d[idxs]
e = e[idxs]
region_numpy = region_parts[idxs]
region = torch.from_numpy(region_numpy)
[region_index, region_mask] = np.unique(region_numpy, False, True)
region_mask = torch.from_numpy(region_mask)
region_index = torch.from_numpy(region_index)
c[:,1] = np.unique(c[:,1], False, True)[1]
instance_mask = c[:,1]
instance_size = scatter_add(torch.ones([a.shape[0]]), torch.Tensor(instance_mask).long(), dim = 0)
instance_size = torch.gather(instance_size, dim = 0, index = torch.Tensor(instance_mask).long())
mask = torch.zeros((a.shape[0], np.max(c[:,1]) + 1), dtype=torch.float32)
mask[torch.arange(a.shape[0]), c[:,1]] = 1
a = torch.from_numpy(a).float()
e = torch.from_numpy(e).float()
displacement = torch.zeros([a.shape[0],3], dtype = torch.float32)
offset = torch.zeros(a.shape[0], dtype = torch.float32)
for count in range(mask.shape[1]):
indices = mask[:,count] == 1
mean = torch.mean(a[indices,:],dim = 0)
distance = torch.norm(a[indices,:] - mean,dim = 1)
offset[indices] = torch.exp(- (distance / self.scale/ self.regress_sigma ) ** 2 )
displacement[indices,:] = (a[indices,:] - mean) / self.scale
totalPoints = totalPoints + a.shape[0]
# if totalPoints < 1500000:
if True:
locs.append(torch.cat([a, torch.FloatTensor(a.shape[0], 1).fill_(idx)], 1))
lf = a - torch.mean(a, dim = 0).view(1,-1).expand_as(a)
l_feature = lf.div(torch.norm(lf, p=2, dim=1).view(-1,1).expand_as(lf))
color = torch.from_numpy(b).float() + torch.randn(3).float() * 0.1
color = torch.clamp(color, -1, 1)
tmp_feature = []
if 'l' in self.use_feature:
tmp_feature.append(l_feature)
if 'c' in self.use_feature:
tmp_feature.append(color)
if 'n' in self.use_feature:
tmp_feature.append(torch.from_numpy(d).float())
if 'd' in self.use_feature:
tmp_feature.append(e)
if 'h' in self.use_feature:
tmp_feature.append(a[:, 2:3])
# concat in channel dim
tmp_feature = torch.cat(tmp_feature, dim=1)
feats.append(tmp_feature.float())
labels.append(torch.from_numpy(c))
masks.append(mask)
sizes.append(torch.tensor(np.unique(c[:,1]).size))
normals.append(torch.from_numpy(d).float().cpu())
point_ids.append(torch.from_numpy(np.nonzero(idxs)[0] + valOffsets[i]))
index_list.append(torch.from_numpy(idxs.astype(int)))
offsets.append(offset)
displacements.append(displacement)
regions.append(region)
region_masks.append(region_mask)
region_indexs.append(region_index)
instance_sizes.append(instance_size)
instance_masks.append(torch.Tensor(instance_mask))
else:
print("lost file for training: ", self.val_pths[i])
local_batch_size = len(locs)
locs = torch.cat(locs, 0)
feats = torch.cat(feats, 0)
labels = torch.cat(labels, 0)
sizes = torch.stack(sizes, 0)
point_ids = torch.cat(point_ids, 0)
normals = torch.cat(normals, 0)
offsets = torch.cat(offsets,0)
displacements = torch.cat(displacements,0)
regions = torch.cat(regions,0)
region_masks = torch.cat(region_masks,0)
region_indexs = torch.cat(region_indexs,0)
instance_masks = torch.cat(instance_masks, 0)
instance_sizes = torch.log(torch.cat(instance_sizes, 0))
if not self.use_normal:
normals = torch.zeros(3, 3).float()
return {'x': [locs, feats, normals, local_batch_size], 'y': labels.long(), 'id': tbl, 'point_ids': point_ids,
'pth_file': pth_files,
'idxs': index_list,
'masks': masks,
'instance_masks':instance_masks,
'instance_sizes':instance_sizes,
'sizes':sizes,
'offsets': offsets.view(-1,1),
'displacements': displacements,
'regions':regions,
'region_masks':region_masks,
'region_indexs':region_indexs}
def load_data(self):
for x in torch.utils.data.DataLoader(
self.train_pths,
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
self.train.append(x)
for x in torch.utils.data.DataLoader(
self.val_pths,
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
self.val.append(x)
print('Training examples:', len(self.train))
print('Validation examples:', len(self.val))
if len(self.train) == 0 or len(self.val) == 0:
raise ValueError('Please prepare_data.py to generate training files')
max_instance_train = 0
for idx,x in enumerate(self.train):
self.train[idx]['w'][:,1] = np.unique(x['w'][:,1], False, True)[1]
max_instance_train = np.max([max_instance_train,np.max(self.train[idx]['w'][:,1]) ])
self.max_instance_train = max_instance_train + 1
train_data_loader = torch.utils.data.DataLoader(
list(range(len(self.train))), batch_size=self.batch_size,
collate_fn=partial(self.trainMerge, train=self.train), num_workers=10, shuffle=True)
valOffsets = [0]
valLabels = []
for idx, x in enumerate(self.val):
self.val[idx]['w'][:,1] = np.unique(x['w'][:,1], False, True)[1]
valOffsets.append(valOffsets[-1] + x['w'].shape[0])
valLabels.append(x['w'][:,0].astype(np.int32))
# d = x[2].astype(np.int32)
valLabels = np.hstack(valLabels)
val_data_loader = torch.utils.data.DataLoader(
list(range(len(self.val))), batch_size=self.batch_size,
collate_fn=partial(self.valMerge, val=self.val, valOffsets=valOffsets), num_workers=10,
shuffle=True)
return valOffsets, train_data_loader, val_data_loader, valLabels
|
#!/usr/bin/env python
import sys, argparse
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("csv_file", default=None, help="CSV file with level runtimes")
parser.add_argument("--scale_size", default=False, action="store_true", help="Scale point size by serial level runtime")
parser.add_argument("--average", default=False, action="store_true", help="Draw average lines")
parser.add_argument("-f", default=None, help="Output filename")
args = parser.parse_args()
return args
def main():
args = parse_args()
data = {}
with open(args.csv_file) as f:
csv_reader = csv.DictReader(f)
for field in csv_reader.fieldnames:
data[field] = []
for row in csv_reader:
for field in csv_reader.fieldnames:
data[field].append(float(row[field]))
for series_name, data_values in data.iteritems():
print "\tSeries: ", series_name
print "\t# Values: ", len(data_values)
#Calculate derived series
derived_series = {}
speedup_fwd = {}
speedup_bck = {}
size_factor = 0
if args.scale_size:
size_factor = 2000
size_min = 10
serial_total = sum(data['serial_fwd'][:] + data['serial_bck'][:])
for i in xrange(len(data['serial_fwd'])):
width = data['Width'][i]
serial_fwd = data['serial_fwd'][i]
serial_bck = data['serial_bck'][i]
parrallel_fwd = data['parallel_fwd'][i]
parrallel_bck = data['parallel_bck'][i]
if parrallel_fwd != 0.0:
speedup = serial_fwd / parrallel_fwd
serial_frac = serial_fwd / serial_total
val = (speedup, serial_frac)
try:
speedup_fwd[width].append(val)
except KeyError:
speedup_fwd[width] = [val]
if parrallel_bck != 0.0:
speedup = serial_bck / parrallel_bck
serial_frac = serial_bck / serial_total
val = (speedup, serial_frac)
try:
speedup_bck[width].append(val)
except KeyError:
speedup_bck[width] = [val]
fwd_x = []
fwd_y = []
fwd_s = []
for width, values in speedup_fwd.iteritems():
for speedup, serial_frac in values:
fwd_x.append(width)
fwd_y.append(speedup)
fwd_s.append(size_factor*serial_frac + size_min)
bck_x = []
bck_y = []
bck_s = []
for width, values in speedup_bck.iteritems():
for speedup, serial_frac in values:
bck_x.append(width)
bck_y.append(speedup)
bck_s.append(size_factor*serial_frac + size_min)
#Averages
fwd_x_avg = []
fwd_y_avg = []
for width, values in sorted(speedup_fwd.iteritems()):
speedups = [x[0] for x in values]
avg = sum(speedups) / len(speedups)
#print "Width, avg", width, values, speedups
fwd_x_avg.append(width)
fwd_y_avg.append(avg)
bck_x_avg = []
bck_y_avg = []
for width, values in sorted(speedup_bck.iteritems()):
speedups = [x[0] for x in values]
avg = sum(speedups) / len(speedups)
#print "Width, avg", width, values, speedups
bck_x_avg.append(width)
bck_y_avg.append(avg)
plt.scatter(fwd_x, fwd_y, fwd_s, c='b', label="speedup_fwd")
plt.scatter(bck_x, bck_y, bck_s, c='g', label="speedup_bck")
if args.average:
plt.plot(fwd_x_avg, fwd_y_avg, c='b', label="Average FWD Speed-Up")
plt.plot(bck_x_avg, bck_y_avg, c='g', label="Average BCK Speed-Up")
plt.xscale("log")
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
ymin = 0
xmin = 1
plt.ylim(ymin,ymax)
plt.xlim(xmin,xmax)
plt.title(os.path.splitext(os.path.basename(args.csv_file))[0])
plt.xlabel("Level Width")
plt.ylabel("Parallel Speed-Up")
plt.legend(loc='upper left')
if args.f:
plt.savefig(args.f, dpi=300)
else:
plt.show()
def runningMean(x, N):
return np.convolve(x, np.ones((N,))/N, mode='same')
if __name__ == "__main__":
main()
|
<reponame>aforalee/RRally<filename>tests/unit/verification/test_config.py
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
import requests
from rally import exceptions
from rally.verification.tempest import config
from tests.unit import fakes
from tests.unit import test
CONF = cfg.CONF
class ConfigTestCase(test.TestCase):
@mock.patch("rally.common.objects.deploy.db.deployment_get")
@mock.patch("rally.osclients.Clients.services",
return_value={"test_service_type": "test_service"})
@mock.patch("rally.osclients.Clients.verified_keystone")
@mock.patch("rally.verification.tempest.config.os.path.isfile",
return_value=True)
def setUp(self, mock_isfile, mock_clients_verified_keystone,
mock_clients_services, mock_deployment_get):
super(ConfigTestCase, self).setUp()
self.endpoint = {
"username": "test",
"tenant_name": "test",
"password": "<PASSWORD>",
"auth_url": "http://test/v2.0/",
"permission": "admin",
"admin_domain_name": "Default"
}
mock_deployment_get.return_value = {"admin": self.endpoint}
self.deployment = "fake_deployment"
self.conf_generator = config.TempestConfig(self.deployment)
self.conf_generator.clients.services = mock_clients_services
self.context = config.TempestResourcesContext(self.deployment,
"/path/to/fake/conf")
self.context.conf.add_section("compute")
keystone_patcher = mock.patch("rally.osclients.create_keystone_client")
keystone_patcher.start()
self.addCleanup(keystone_patcher.stop)
@staticmethod
def _remove_default_section(items):
# Getting items from config parser by specified section name
# returns also values from DEFAULT section
defaults = (("log_file", "tempest.log"), ("debug", "True"),
("use_stderr", "False"))
return [item for item in items if item not in defaults]
@mock.patch("rally.verification.tempest.config.requests")
@mock.patch("rally.verification.tempest.config.os.rename")
@mock.patch("six.moves.builtins.open", side_effect=mock.mock_open(),
create=True)
def test__download_cirros_image_success(self, mock_open, mock_rename,
mock_requests):
mock_result = mock.MagicMock()
mock_result.status_code = 200
mock_requests.get.return_value = mock_result
self.conf_generator._download_cirros_image()
mock_requests.get.assert_called_once_with(CONF.image.cirros_img_url,
stream=True)
@mock.patch("rally.verification.tempest.config.requests")
def test__download_cirros_image_notfound(self, mock_requests):
mock_result = mock.MagicMock()
mock_result.status_code = 404
mock_requests.get.return_value = mock_result
self.assertRaises(exceptions.TempestConfigCreationFailure,
self.conf_generator._download_cirros_image)
def test__get_service_url(self):
service = "test_service"
service_type = "test_service_type"
url = "test_url"
# Mocked at setUp
self.conf_generator.keystone.auth_ref = {
"serviceCatalog": [
{
"name": service,
"type": service_type,
"endpoints": [{"publicURL": url}]
}
]
}
self.assertEqual(self.conf_generator._get_service_url(service), url)
@mock.patch("rally.verification.tempest."
"config.TempestConfig._get_service_url")
def test__configure_boto(self, mock_tempest_config__get_service_url):
url = "test_url"
mock_tempest_config__get_service_url.return_value = url
s3_materials_path = os.path.join(
self.conf_generator.data_dir, "s3materials")
self.conf_generator._configure_boto()
expected = (("ec2_url", url),
("s3_url", url),
("http_socket_timeout", "30"),
("s3_materials_path", s3_materials_path))
results = self._remove_default_section(
self.conf_generator.conf.items("boto"))
self.assertEqual(sorted(expected), sorted(results))
def test__configure_default(self):
self.conf_generator._configure_default()
expected = (("debug", "True"), ("log_file", "tempest.log"),
("use_stderr", "False"))
results = self.conf_generator.conf.items("DEFAULT")
self.assertEqual(sorted(expected), sorted(results))
def test__configure_identity(self):
self.conf_generator._configure_identity()
expected = (
("username", self.endpoint["username"]),
("password", self.endpoint["password"]),
("tenant_name", self.endpoint["tenant_name"]),
("admin_username", self.endpoint["username"]),
("admin_password", self.endpoint["password"]),
("admin_tenant_name", self.endpoint["username"]),
("admin_domain_name", self.endpoint["admin_domain_name"]),
("uri", self.endpoint["auth_url"]),
("uri_v3", self.endpoint["auth_url"].replace("/v2.0/", "/v3")))
results = self._remove_default_section(
self.conf_generator.conf.items("identity"))
self.assertEqual(sorted(expected), sorted(results))
def test__configure_option_flavor(self):
mock_novaclient = mock.MagicMock()
mock_novaclient.flavors.create.side_effect = [
fakes.FakeFlavor(id="id1"), fakes.FakeFlavor(id="id2")]
mock_nova = mock.MagicMock()
mock_nova.client.Client.return_value = mock_novaclient
self.context.conf.set("compute", "flavor_ref", "")
self.context.conf.set("compute", "flavor_ref_alt", "")
with mock.patch.dict("sys.modules", {"novaclient": mock_nova}):
self.context._configure_option("flavor_ref",
mock_novaclient.flavors.create, 64)
self.context._configure_option("flavor_ref_alt",
mock_novaclient.flavors.create, 128)
self.assertEqual(mock_novaclient.flavors.create.call_count, 2)
expected = ("id1", "id2")
results = (self.context.conf.get("compute", "flavor_ref"),
self.context.conf.get("compute", "flavor_ref_alt"))
self.assertEqual(sorted(expected), sorted(results))
@mock.patch("six.moves.builtins.open")
def test__configure_option_image(self, mock_open):
mock_glanceclient = mock.MagicMock()
mock_glanceclient.images.create.side_effect = [
fakes.FakeImage(id="id1"), fakes.FakeImage(id="id2")]
mock_glance = mock.MagicMock()
mock_glance.Client.return_value = mock_glanceclient
self.context.conf.set("compute", "image_ref", "")
self.context.conf.set("compute", "image_ref_alt", "")
with mock.patch.dict("sys.modules", {"glanceclient": mock_glance}):
self.context._configure_option("image_ref",
mock_glanceclient.images.create)
self.context._configure_option("image_ref_alt",
mock_glanceclient.images.create)
self.assertEqual(mock_glanceclient.images.create.call_count, 2)
expected = ("id1", "id2")
results = (self.context.conf.get("compute", "image_ref"),
self.context.conf.get("compute", "image_ref_alt"))
self.assertEqual(sorted(expected), sorted(results))
def test__configure_network_if_neutron(self):
fake_neutronclient = mock.MagicMock()
fake_neutronclient.list_networks.return_value = {
"networks": [
{
"status": "ACTIVE",
"id": "test_id",
"router:external": True
}
]
}
mock_neutron = mock.MagicMock()
mock_neutron.client.Client.return_value = fake_neutronclient
with mock.patch.dict("sys.modules", {"neutronclient.neutron":
mock_neutron}):
self.conf_generator.available_services = ["neutron"]
self.conf_generator._configure_network()
expected = (("public_network_id", "test_id"),)
results = self._remove_default_section(
self.conf_generator.conf.items("network"))
self.assertEqual(sorted(expected), sorted(results))
def test__configure_network_if_nova(self):
self.conf_generator.available_services = ["nova"]
mock_novaclient = mock.MagicMock()
mock_network = mock.MagicMock()
mock_network.human_id = "fake-network"
mock_novaclient.networks.list.return_value = [mock_network]
mock_nova = mock.MagicMock()
mock_nova.client.Client.return_value = mock_novaclient
with mock.patch.dict("sys.modules", {"novaclient": mock_nova}):
self.conf_generator._configure_network()
self.assertEqual("fake-network",
self.conf_generator.conf.get(
"compute", "fixed_network_name"))
self.assertEqual("fake-network",
self.conf_generator.conf.get(
"compute", "network_for_ssh"))
@mock.patch("rally.verification.tempest.config.os.path.exists",
return_value=False)
@mock.patch("rally.verification.tempest.config.os.makedirs")
def test__configure_oslo_concurrency(self, mock_makedirs, mock_exists):
self.conf_generator._configure_oslo_concurrency()
lock_path = os.path.join(
self.conf_generator.data_dir, "lock_files_%s" % self.deployment)
mock_makedirs.assert_called_once_with(lock_path)
expected = (("lock_path", lock_path),)
results = self._remove_default_section(
self.conf_generator.conf.items("oslo_concurrency"))
self.assertEqual(sorted(expected), sorted(results))
@mock.patch("rally.verification.tempest.config.requests")
def test__configure_service_available(self, mock_requests):
mock_result = mock.MagicMock()
mock_result.status_code = 404
mock_requests.get.return_value = mock_result
available_services = ("nova", "cinder", "glance", "sahara")
self.conf_generator.available_services = available_services
self.conf_generator._configure_service_available()
expected_horizon_url = "http://test"
expected_timeout = CONF.openstack_client_http_timeout
mock_requests.get.assert_called_once_with(
expected_horizon_url,
timeout=expected_timeout)
expected = (("neutron", "False"), ("heat", "False"),
("ceilometer", "False"), ("swift", "False"),
("cinder", "True"), ("nova", "True"),
("glance", "True"), ("horizon", "False"),
("sahara", "True"))
options = self._remove_default_section(
self.conf_generator.conf.items("service_available"))
self.assertEqual(sorted(expected), sorted(options))
@mock.patch("rally.verification.tempest.config.requests")
def test__configure_service_available_horizon(self, mock_requests):
mock_result = mock.MagicMock()
mock_result.status_code = 200
mock_requests.get.return_value = mock_result
self.conf_generator._configure_service_available()
self.assertEqual(
self.conf_generator.conf.get(
"service_available", "horizon"), "True")
@mock.patch("rally.verification.tempest.config.requests.get")
def test__configure_service_not_available_horizon(self, mock_get):
mock_get.side_effect = requests.Timeout()
self.conf_generator._configure_service_available()
self.assertEqual(
self.conf_generator.conf.get(
"service_available", "horizon"), "False")
def test__configure_validation_if_neutron(self):
# if neutron is available
self.conf_generator.available_services = ["neutron"]
self.conf_generator._configure_validation()
self.assertEqual("floating",
self.conf_generator.conf.get("validation",
"connect_method"))
def test__configure_validation_if_novanetwork(self):
self.conf_generator._configure_validation()
self.assertEqual("fixed",
self.conf_generator.conf.get("validation",
"connect_method"))
@mock.patch("six.moves.builtins.open",
side_effect=mock.mock_open(), create=True)
def test__write_config(self, mock_open):
conf_path = "/path/to/fake/conf"
conf_data = mock.Mock()
config._write_config(conf_path, conf_data)
mock_open.assert_called_once_with(conf_path, "w+")
conf_data.write.assert_called_once_with(mock_open.side_effect())
|
from os import stat
from re import VERBOSE
from request_api.models.FOIRequestComments import FOIRequestComment
from request_api.models.FOIMinistryRequests import FOIMinistryRequest
from request_api.models.FOIRawRequestComments import FOIRawRequestComment
from request_api.models.FOIRawRequests import FOIRawRequest
import json
from dateutil.parser import parse
import datetime
from dateutil import parser
from dateutil import tz
from pytz import timezone
import pytz
import maya
class commentservice:
""" FOI comment management service
Supports creation, update and delete of comments for both unopened(raw) and opened(ministry) request
"""
def createministryrequestcomment(self, data, userid, type=1):
version = FOIMinistryRequest.getversionforrequest(data["ministryrequestid"])
return FOIRequestComment.savecomment(type, data, version, userid)
def createrawrequestcomment(self, data, userid, type=1):
version = FOIRawRequest.getversionforrequest(data["requestid"])
return FOIRawRequestComment.savecomment(type, data, version, userid)
def disableministryrequestcomment(self, commentid, userid):
return FOIRequestComment.disablecomment(commentid, userid)
def disablerawrequestcomment(self, commentid, userid):
return FOIRawRequestComment.disablecomment(commentid, userid)
def updateministryrequestcomment(self, commentid, data, userid):
return FOIRequestComment.updatecomment(commentid, data, userid)
def updaterawrequestcomment(self, commentid, data, userid):
return FOIRawRequestComment.updatecomment(commentid, data, userid)
def getministryrequestcomments(self, ministryrequestid):
data = FOIRequestComment.getcomments(ministryrequestid)
return self.__preparecomments(data)
def getrawrequestcomments(self, requestid):
data = FOIRawRequestComment.getcomments(requestid)
return self.__preparecomments(data)
def copyrequestcomment(self, ministryrequestid, comments, userid):
_comments = []
for comment in comments:
commentresponse=FOIRequestComment.savecomment(comment['commentTypeId'], self.__copyparentcomment(ministryrequestid, comment), 1, userid,comment['dateUF'])
_comments.append({"ministrycommentid":commentresponse.identifier,"rawcommentid":comment['commentId']})
if comment['replies']:
for reply in comment['replies']:
response=FOIRequestComment.savecomment(reply['commentTypeId'], self.__copyreplycomment(ministryrequestid, reply, commentresponse.identifier), 1, userid,reply['dateUF'])
_comments.append({"ministrycommentid":response.identifier,"rawcommentid":comment['commentId']})
return _comments
def __copyparentcomment(self, ministryrequestid, entry):
return {
"ministryrequestid": ministryrequestid,
"comment": entry['text'],
"taggedusers": entry['taggedusers']
}
def __copyreplycomment(self, ministryrequestid, entry, parentcommentid):
return {
"ministryrequestid": ministryrequestid,
"comment": entry['text'],
"taggedusers": entry['taggedusers'],
"parentcommentid":parentcommentid
}
def __preparecomments(self, data):
comments=[]
comments = self.__parentcomments(data)
for entry in data:
if entry['parentcommentid'] is not None:
for _comment in comments:
if entry['parentcommentid'] == _comment['commentId']:
_comment['replies'].append(self.__comment(entry))
return comments
def __parentcomments(self, data):
parentcomments = []
for entry in data:
if entry['parentcommentid'] is None:
_comment = self.__comment(entry)
_comment['replies'] = []
parentcomments.append(_comment)
return parentcomments
def __comment(self, comment):
commentcreateddate = maya.parse(comment["created_at"]).datetime(to_timezone='America/Vancouver', naive=False)
return {
"userId": comment['createdby'],
"commentId": comment['commentid'],
"text": comment['comment'],
"dateUF":comment["created_at"],
"date": commentcreateddate.strftime('%Y %b %d | %I:%M %p'),
"parentCommentId":comment['parentcommentid'],
"commentTypeId":comment['commenttypeid'],
"taggedusers" : comment['taggedusers']
} |
<filename>tests/test_mqttplugin.py<gh_stars>10-100
"""test_restapiplugin.py :: Tests for Fauxmo's `RESTAPIPlugin`."""
import json
import time
from unittest.mock import MagicMock, patch
from mqttplugin import MQTTPlugin
config_path_str = "tests/test_mqttplugin_config.json"
def test_mqttplugin_mosquitto_dot_org() -> None:
"""Test MQTTPlugin against test.mosquitto.org."""
with open(config_path_str) as f:
config: dict = json.load(f)
mosquitto_devices = (
device
for device in config["PLUGINS"]["MQTTPlugin"]["DEVICES"]
if device["mqtt_server"] == "test.mosquitto.org"
)
for device_conf in mosquitto_devices:
device = MQTTPlugin(**device_conf)
for _ in range(100):
if device.subscribed is True:
break
time.sleep(0.1)
else:
assert False, "Time out waiting for subscribe."
assert device.on() is True
for _ in range(20):
state = device.get_state()
if state != "unknown":
break
time.sleep(0.1)
assert state == "on"
assert device.off() is True
for _ in range(20):
state = device.get_state()
if state != "on":
break
time.sleep(0.1)
assert state == "off"
@patch("mqttplugin.Client", autospec=True)
def test_mqtt_auth(mock_client: MagicMock) -> None:
"""Ensure auth is being used if available."""
mock_instance = mock_client.return_value
with open(config_path_str) as f:
config: dict = json.load(f)
device_conf = next(
device
for device in config["PLUGINS"]["MQTTPlugin"]["DEVICES"]
if device["mqtt_server"] == "mqtt.yes_auth.no_state"
)
MQTTPlugin(**device_conf)
mock_instance.username_pw_set.assert_called_once_with(
"MyUser", "MyPassword"
)
@patch("mqttplugin.Client", autospec=True)
def test_mqtt_nostate(mock_client: MagicMock) -> None:
"""If state_cmd is not specified, loop_start is not called."""
mock_instance = mock_client.return_value
with open(config_path_str) as f:
config: dict = json.load(f)
device_conf = next(
device
for device in config["PLUGINS"]["MQTTPlugin"]["DEVICES"]
if device["mqtt_server"] == "mqtt.yes_auth.no_state"
)
device = MQTTPlugin(**device_conf)
mock_instance.loop_start.assert_not_called()
mock_instance.subscribe.assert_not_called()
assert device.subscribed is False
@patch("mqttplugin.Client", autospec=True)
def test_mqtt_noauth(mock_client: MagicMock) -> None:
"""Ensure auth is not being used if not configured."""
mock_instance = mock_client.return_value
with open(config_path_str) as f:
config: dict = json.load(f)
device_conf = next(
device
for device in config["PLUGINS"]["MQTTPlugin"]["DEVICES"]
if device["mqtt_server"] == "mqtt.no_auth.yes_state"
)
MQTTPlugin(**device_conf)
mock_instance.username_pw_set.assert_not_called()
@patch("mqttplugin.Client", autospec=True)
def test_mqtt_clientid(mock_client: MagicMock) -> None:
"""Ensure mqtt client id is properly set when configured."""
with open(config_path_str) as f:
config: dict = json.load(f)
device_conf = next(
device
for device in config["PLUGINS"]["MQTTPlugin"]["DEVICES"]
if device["mqtt_client_id"]
)
MQTTPlugin(**device_conf)
mock_client.assert_called_once_with(
client_id=device_conf["mqtt_client_id"]
)
|
<reponame>shivamashtikar/i3-dot-files<gh_stars>1-10
#!/usr/bin/env python3
# script copied from https://github.com/KJoke70/i3-tools
import i3ipc
import argparse
parser = argparse.ArgumentParser(
description='rotate clockwise or counterclockwise.')
parser.add_argument('direction', type=int,
help='0 = clockwise, 1 = counterclockwise.')
parser.add_argument('--times', '-t', type=int, default=1,
help='how often to rotate.')
parser.add_argument('--no-multimonitor', '-m', action='store_true',
help='disables multi-monitor support.')
parser.add_argument('--enable-floating', '-f', action='store_true',
help='explicitly allow floating windows. May behave unexpectedly.')
args = parser.parse_args()
i3 = i3ipc.Connection()
#check if multiple displays attached
active_displays = 0
active_workspaces = list()
for d in i3.get_outputs():
if d.active:
active_displays += 1
active_workspaces.append(int(d.current_workspace))
root = i3.get_tree()
focused = root.find_focused()
if args.no_multimonitor or active_displays == 1:
leaves = focused.workspace().leaves()
else:
focused_num = focused.workspace().num
f_ind = active_workspaces.index(focused_num)
active_workspaces[0], active_workspaces[f_ind] = active_workspaces[f_ind], active_workspaces[0]
w_spaces = root.workspaces()
leaves = list()
for ws in root.workspaces():
if ws.num in active_workspaces:
if ws.num == focused_num:
leaves = ws.leaves() + leaves
else:
leaves += ws.leaves()
if not args.enable_floating:
to_remove = list()
for i in range(len(leaves)):
if 'on' in leaves[i].floating:
to_remove.append(i)
for i in range(len(to_remove)-1, -1, -1):
del leaves[to_remove[i]]
number_of_leaves = len(leaves)
rotations = args.times % number_of_leaves
def clock():
old_focus = number_of_leaves - 1
comm = ""
for i in range(number_of_leaves-1):
if leaves[i].id == focused.id:
old_focus = i
comm += '[con_id=%s] swap container with con_id %s;' % (str(leaves[i].id),
str(leaves[i+1].id))
return old_focus, comm
def counterclock():
old_focus = 0
comm = ""
for i in range(number_of_leaves-1, 0, -1):
if leaves[i].id == focused.id:
old_focus = i
if i > 0:
comm += '[con_id=%s] swap container with con_id %s;' % (str(leaves[i].id),
str(leaves[i-1].id))
return old_focus, comm
command = ""
if args.direction == 0:
if rotations > 0:
for i in range(rotations):
old_focus, new_comm = clock()
command += new_comm
if not args.enable_floating and 'on' in focused.floating:
command += "[con_id=%s] focus;" % ( focused.id )
else:
command += "[con_id=%s] focus;" % ( leaves[(old_focus - rotations) %
number_of_leaves].id )
i3.command(command)
elif args.direction == 1:
if rotations > 0:
for i in range(rotations):
old_focus, new_comm = counterclock()
command += new_comm
if not args.enable_floating and 'on' in focused.floating:
command += "[con_id=%s] focus;" % ( focused.id )
else:
command += "[con_id=%s] focus;" % ( leaves[(old_focus + rotations) %
number_of_leaves].id )
i3.command(command)
|
#!/usr/bin/env python3
"""Easy installation and configuration of Linux/Mac/Windows apps.
"""
import os
import logging
import shutil
from pathlib import Path
from argparse import Namespace
from .utils import (
HOME,
USER,
BASE_DIR,
run_cmd,
add_subparser,
update_apt_source,
brew_install_safe,
is_debian_series,
is_linux,
is_macos,
is_fedora_series,
option_pip_bundle,
)
def ssh_server(args) -> None:
"""Install and configure SSH server.
"""
if args.install:
if is_debian_series():
update_apt_source(prefix=args.prefix)
run_cmd(
f"{args.prefix} apt-get install {args.yes_s} openssh-server fail2ban"
)
if args.uninstall:
if is_debian_series():
run_cmd(f"{args.prefix} apt-get purge {args.yes_s} openssh-server fail2ban")
elif is_macos():
pass
elif is_fedora_series():
pass
def _add_subparser_ssh_server(subparsers):
add_subparser(subparsers, "SSH server", func=ssh_server, aliases=["sshs"])
def _ignore_socket(dir_, files):
dir_ = Path(dir_)
return [file for file in files if (dir_ / file).is_socket()]
def _sshc_copy_from_host(ssh_home: Path):
"""Copy configuration files from /home_host/USER/.ssh if it exists.
:param ssh_home: The home directory (~/.ssh) of SSH client configuration.
"""
ssh_src = Path(f"/home_host/{USER}/.ssh")
if ssh_src.is_dir():
# inside a Docker container, use .ssh from host
try:
shutil.rmtree(ssh_home)
except FileNotFoundError:
pass
shutil.copytree(ssh_src, ssh_home, ignore=_ignore_socket)
logging.info("%s is copied to %s.", ssh_src, ssh_home)
def _sshc_copy_config(ssh_home: Path):
src = BASE_DIR / "ssh/client/config"
des = ssh_home / "config"
shutil.copy2(src, des)
logging.info("%s is copied to %s.", src, ssh_home)
def ssh_client(args) -> None:
"""Configure SSH client.
:param args: A Namespace object containing parsed command-line options.
"""
if args.config:
ssh_home = HOME / ".ssh"
_sshc_copy_from_host(ssh_home)
ssh_home.mkdir(exist_ok=True)
_sshc_copy_config(ssh_home)
control = ssh_home / "control"
control.mkdir(exist_ok=True)
control.chmod(0o700)
if is_linux() or is_macos():
cmd = f"{args.prefix} chown -R {USER}:`id -g {USER}` {HOME}/.ssh"
run_cmd(cmd)
for path in ssh_home.glob("**/*"):
if path.is_file():
path.chmod(0o600)
else:
path.chmod(0o700)
logging.info("The permissions of ~/.ssh and its contents are corrected set.")
def _add_subparser_ssh_client(subparsers):
add_subparser(subparsers, "SSH client", func=ssh_client, aliases=["sshc"])
def proxychains(args) -> None:
"""Install and configure ProxyChains.
:param args: A Namespace object containing parsed command-line options.
"""
if args.install:
if is_debian_series():
update_apt_source(prefix=args.prefix)
cmd = f"""{args.prefix} apt-get install {args.yes_s} proxychains4 \
&& {args.prefix} ln -svf /usr/bin/proxychains4 /usr/bin/proxychains"""
run_cmd(cmd)
elif is_macos():
brew_install_safe(["proxychains-ng"])
elif is_fedora_series():
run_cmd(f"{args.prefix} yum install proxychains")
if args.config:
print("Configuring proxychains ...")
src_file = BASE_DIR / "proxychains/proxychains.conf"
des_dir = os.path.join(HOME, ".proxychains")
os.makedirs(des_dir, exist_ok=True)
shutil.copy2(src_file, des_dir)
logging.info("%s is copied to the directory %s", src_file, des_dir)
if args.uninstall:
if is_debian_series():
run_cmd(f"{args.prefix} apt-get purge {args.yes_s} proxychains4")
elif is_macos():
run_cmd("brew uninstall proxychains-ng")
elif is_fedora_series():
run_cmd(f"{args.prefix} yum remove proxychains")
def _add_subparser_proxychains(subparsers):
add_subparser(
subparsers, "ProxyChains", func=proxychains, aliases=["pchains", "pc"]
)
def dryscrape(args):
"""Install and configure dryscrape.
"""
if args.install:
if is_debian_series():
update_apt_source(prefix=args.prefix)
cmd = f"""{args.prefix} apt-get install {args.yes_s} qt5-default libqt5webkit5-dev build-essential xvfb \
&& {args.pip_install} dryscrape
"""
run_cmd(cmd)
elif is_macos():
pass
elif is_fedora_series():
pass
if args.config:
pass
if args.uninstall:
if is_debian_series():
pass
elif is_macos():
pass
elif is_fedora_series():
pass
def _dryscrape_args(subparser) -> None:
option_pip_bundle(subparser)
def _add_subparser_dryscrape(subparsers):
add_subparser(
subparsers,
"dryscrape",
func=dryscrape,
aliases=[],
add_argument=_dryscrape_args
)
def download_tools(args: Namespace):
"""Install downloading tools.
:param args: An instance of Namespace containing arguments.
"""
if args.install:
if is_debian_series():
update_apt_source(prefix=args.prefix)
run_cmd(f"{args.prefix} apt-get install {args.yes_s} wget curl aria2", )
elif is_macos():
brew_install_safe(["wget", "curl", "aria2"])
elif is_fedora_series():
pass
if args.uninstall:
if is_debian_series():
run_cmd(f"{args.prefix} apt-get purge {args.yes_s} wget curl aria2")
elif is_macos():
run_cmd("brew uninstall wget curl aria2")
elif is_fedora_series():
pass
def _add_subparser_download_tools(subparsers):
add_subparser(
subparsers, "download tools", func=download_tools, aliases=["dl", "dlt"]
)
def iptables(args: Namespace):
"""Install iptables.
:param args: An instance of Namespace containing arguments.
"""
if args.install:
if is_debian_series():
run_cmd(f"{args.prefix} apt-get install {args.yes_s} iptables")
if args.config:
pass
if args.uninstall:
if is_debian_series():
run_cmd(f"{args.prefix} apt-get purge {args.yes_s} iptables")
def _add_subparser_iptables(subparsers):
add_subparser(subparsers, "iptables", func=iptables, aliases=["ipt"])
def sshuttle(args: Namespace):
"""Install sshuttle.
:param args: An instance of Namespace containing arguments.
"""
if args.install:
iptables(args)
run_cmd(f"{args.pip_install} sshuttle")
if args.config:
pass
if args.uninstall:
run_cmd(f"{args.pip_uninstall} sshuttle")
def _sshuttle_args(subparser):
option_pip_bundle(subparser)
def _add_subparser_sshuttle(subparsers):
add_subparser(
subparsers,
"sshuttle",
func=sshuttle,
aliases=["sshu"],
add_argument=_sshuttle_args
)
def ngrok(args: Namespace):
"""Install and configures ngrok.
:param args: An instance of Namespace containing arguments.
"""
if args.install:
if is_linux():
cmd = f"""curl -sSL https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.tgz -o /tmp/ngrok.tgz \
&& tar -zxvf /tmp/ngrok.tgz -C {HOME}/.local/bin/"""
run_cmd(cmd)
elif is_macos():
cmd = f"""curl -sSL https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip -o /tmp/ngrok.tgz \
&& unzip /tmp/ngrok.tgz -d {HOME}/.local/bin/"""
run_cmd(cmd)
else:
pass
if args.config:
cmd = f"{HOME}/.local/bin/ngrok authtoken 23wmm8NwCToDsy0bXj27UqawhkR_4J8eN7aWgoekzXuFJGvQ4"
run_cmd(cmd)
if args.uninstall:
(HOME / ".local/bin/ngrok").unlink()
def _ngrok_args(subparser):
option_pip_bundle(subparser)
def _add_subparser_ngrok(subparsers):
add_subparser(subparsers, "ngrok", func=ngrok, add_argument=_ngrok_args)
def _add_subparser_network(subparsers):
_add_subparser_ssh_server(subparsers)
_add_subparser_ssh_client(subparsers)
_add_subparser_proxychains(subparsers)
_add_subparser_dryscrape(subparsers)
_add_subparser_download_tools(subparsers)
_add_subparser_sshuttle(subparsers)
_add_subparser_ngrok(subparsers)
|
import datetime
import PIL.Image as Image
from data import ImShow as I
import numpy as np
import tensorflow as tf
from model import l21RobustDeepAutoencoderOnST as l21RDA
import os
from collections import Counter
from sklearn.metrics import precision_score as precision
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score as recall
from sklearn.metrics import confusion_matrix as CM
import matplotlib.pyplot as plt
import pandas as pd
def l21RDAE(X, layers, lamda, folder, learning_rate=0.15, inner=100, outer=10, batch_size=133, inputsize=(28, 28)):
if not os.path.isdir(folder):
os.makedirs(folder)
os.chdir(folder)
with tf.Graph().as_default():
with tf.Session() as sess:
rael21 = l21RDA.RobustL21Autoencoder(sess=sess, lambda_=lamda * X.shape[0], layers_sizes=layers)
l21L, l21S = rael21.fit(X=X, sess=sess, inner_iteration=inner, iteration=outer, batch_size=batch_size,
learning_rate=learning_rate, verbose=True)
l21R = rael21.getRecon(X=X, sess=sess)
l21H = rael21.transform(X, sess)
l21S.dump("l21S.npk")
os.chdir("../")
def experiment_frame(X,elem_num,lamda_list):
inner = 100
outer = 8
layers = [elem_num, 400, 200] ## S trans
folder = r"OutlierDetectionResult"
if not os.path.isdir(folder):
os.makedirs(folder)
os.chdir(folder)
for lam in lamda_list:
folder = "lam" + str(lam)
l21RDAE(X=X, layers=layers, lamda=lam, folder=folder, learning_rate=0.005,
inner=inner, outer=outer, batch_size=133, inputsize=(476, 166))
os.chdir("../")
def binary_y(value):
if value == 23:
return "o"
else:
return "m"
def binary_y2(value):
if value == 3:
return "o"
else:
return "m"
def binary_y3(value):
if value == 0:
return "m"
else:
return "o"
def binary_y4(value):
if value == 5:
return "o"
else:
return "m"
def binary_y5(value):
if value == 7:
return "o"
else:
return "m"
def binary_y6(value):
if value == 1:
return "m"
else:
return "o"
if __name__ == "__main__":
folder = 'OutlierDetectionResult'
for n in range(1,8):
dataset = n
# 1-ISOLET,2-MF-3,3-Arrhythmia,4-MF-5,5-MF-7,6-ionosphere,7-Musk2
if dataset==1:
elem_num=617
filename=r"data/ISOLET-23/data_23.dat"
print('当前数据集是:{0}'.format(filename))
t1 = datetime.datetime.now()
print('从当前时间开始:{0}'.format(t1))
X = pd.read_csv(filename, header=None, index_col=None, skiprows=0, sep=',')
X = X.iloc[:,:617].values
lambda_list = [0.001] #0.0017,0.0018 ,0.0015
experiment_frame(X,elem_num,lambda_list)
lam_list = list(map(str, lambda_list))
y_loc = r"data/ISOLET-23/classid_23.dat"
y = pd.read_csv(y_loc, header=None, index_col=None, skiprows=0, sep=',')
y = y.iloc[:,0].values
print(Counter(y))
bi_y = list(map(binary_y, y))
print(Counter(bi_y))
for i, lam in enumerate(lam_list):
print("lambda:", lam)
print('bi_y:{0}'.format(bi_y))
print('bi_y:{0}'.format(Counter(bi_y)))
S = np.load(folder + "\\" + "lam" + lam + "\\" + r"l21S.npk", allow_pickle=True)
zscore = np.linalg.norm(S, axis=1)
print('score:{0}'.format(zscore))
zscore_abs = np.fabs(zscore)
result_temp = []
temp_list = [5,10,15,20,30,50,60,80,100,150]
print('m的取值有:{0}'.format(temp_list))
for m in temp_list:
count = 0
index = np.argpartition(zscore_abs, -m)[-m:]
for each_index in index:
if bi_y[each_index] == 'o':
count += 1
result_temp.append(count)
print('result_temp:{0}'.format(result_temp))
t2 = datetime.datetime.now()
print('从当前时间结束:{0}'.format(t2))
print('一共用时:{0}'.format(t2 - t1))
print('第1个数据集完毕')
if dataset == 2:
elem_num = 649
filename = r"data/MF-3/data_3.dat"
print('当前数据集是:{0}'.format(filename))
t1 = datetime.datetime.now()
print('从当前时间开始:{0}'.format(t1))
X = pd.read_csv(filename, header=None, index_col=None, skiprows=0, sep=',')
X = X.iloc[:, :649].as_matrix()
lambda_list = [0.0001,0.001, 0.1, 1, 2,3] #3.7
experiment_frame(X, elem_num,lambda_list)
# lambda_list = [2.15, 2.3, 2.45, 2.6, 2.75,
# 3, 3.15, 3.3, 3.45, 3.6, 3.75, 4]
lam_list = list(map(str, lambda_list))
print(lam_list)
y_loc = r"data/MF-3/classid_3.dat"
y = pd.read_csv(y_loc, header=None, index_col=None, skiprows=0, sep=',')
y = y.iloc[:, 0].values
print(Counter(y))
bi_y = list(map(binary_y2, y))
print(Counter(bi_y))
for i, lam in enumerate(lam_list):
print("lambda:", lam)
print('bi_y:{0}'.format(bi_y))
print('bi_y:{0}'.format(Counter(bi_y)))
S = np.load(folder + "\\" + "lam" + lam + "\\" + r"l21S.npk", allow_pickle=True)
zscore = np.linalg.norm(S, axis=1)
zscore_abs = np.fabs(zscore)
result_temp = []
temp_list = [20,30,50,90,100,150]
print('m的取值有:{0}'.format(temp_list))
for m in temp_list:
count = 0
index = np.argpartition(zscore_abs, -m)[-m:]
for each_index in index:
if bi_y[each_index] == 'o':
count += 1
result_temp.append(count)
print('result_temp:{0}'.format(result_temp))
t2 = datetime.datetime.now()
print('从当前时间结束:{0}'.format(t2))
print('一共用时:{0}'.format(t2 - t1))
print('第2个数据集完毕')
if dataset == 3:
elem_num = 260
filename = r"data/Arrhythmia_withoutdupl_05_v03.dat"
print('当前数据集是:{0}'.format(filename))
t1 = datetime.datetime.now()
print('从当前时间开始:{0}'.format(t1))
X = pd.read_csv(filename, header=None, index_col=None, skiprows=0, sep=' ')
X = X.iloc[:, :260].values
lambda_list = [2.2,2.25,2.3,2.35,2.4] #2.45, 2.3
experiment_frame(X, elem_num,lambda_list)
# lambda_list = [2.15, 2.3, 2.45, 2.6, 2.75,
# 3, 3.15, 3.3, 3.45, 3.6, 3.75, 4]
lam_list = list(map(str, lambda_list))
print(lam_list)
y_loc = r"data/Arrhythmia_withoutdupl_05_v03.dat"
y = pd.read_csv(y_loc, header=None, index_col=None, skiprows=0, sep=' ')
y = y.iloc[:, 260].values
print(Counter(y))
bi_y = list(map(binary_y3, y))
print(bi_y)
print(Counter(bi_y))
for i, lam in enumerate(lam_list):
print("lambda:", lam)
print('bi_y:{0}'.format(bi_y))
print('bi_y:{0}'.format(Counter(bi_y)))
S = np.load(folder + "\\" + "lam" + lam + "\\" + r"l21S.npk", allow_pickle=True)
zscore = np.linalg.norm(S, axis=1)
zscore_abs = np.fabs(zscore)
result_temp = []
temp_list = [5, 10, 15, 25, 30, 35, 40,45, 50, 55, 60, 80, 90, 100, 110, 120, 140, 150, 160, 170, 180, 190,
200]
print('m的取值有:{0}'.format(temp_list))
for m in temp_list:
count = 0
index = np.argpartition(zscore_abs, -m)[-m:]
for each_index in index:
if bi_y[each_index] == 'o':
count += 1
result_temp.append(count)
print('result_temp:{0}'.format(result_temp))
t2 = datetime.datetime.now()
print('从当前时间结束:{0}'.format(t2))
print('一共用时:{0}'.format(t2 - t1))
print('第3个数据集完毕')
if dataset == 4:
elem_num = 649
filename = r"data/MF-5/data_5.dat"
print('当前数据集是:{0}'.format(filename))
t1 = datetime.datetime.now()
print('从当前时间开始:{0}'.format(t1))
X = pd.read_csv(filename, header=None, index_col=None, skiprows=0, sep=',')
X = X.iloc[:, :649].as_matrix()
lambda_list =[0.0001,0.001, 0.1, 1, 2]
experiment_frame(X, elem_num,lambda_list)
lam_list = list(map(str, lambda_list))
print(lam_list)
y_loc = r"data/MF-5/classid_5.dat"
y = pd.read_csv(y_loc, header=None, index_col=None, skiprows=0, sep=',')
y = y.iloc[:, 0].values
print(Counter(y))
bi_y = list(map(binary_y4, y))
print(Counter(bi_y))
for i, lam in enumerate(lam_list):
print("lambda:", lam)
print('bi_y:{0}'.format(bi_y))
print('bi_y:{0}'.format(Counter(bi_y)))
S = np.load(folder + "\\" + "lam" + lam + "\\" + r"l21S.npk", allow_pickle=True)
zscore = np.linalg.norm(S, axis=1)
zscore_abs = np.fabs(zscore)
result_temp = []
temp_list = [20,30,50,60,70,100,150]
print('m的取值有:{0}'.format(temp_list))
for m in temp_list:
count = 0
index = np.argpartition(zscore_abs, -m)[-m:]
for each_index in index:
if bi_y[each_index] == 'o':
count += 1
result_temp.append(count)
print('result_temp:{0}'.format(result_temp))
t2 = datetime.datetime.now()
print('从当前时间结束:{0}'.format(t2))
print('一共用时:{0}'.format(t2 - t1))
print('第4个数据集完毕')
if dataset == 5:
elem_num = 649
filename = r"data/MF-7/data_7.dat"
print('当前数据集是:{0}'.format(filename))
t1 = datetime.datetime.now()
print('从当前时间开始:{0}'.format(t1))
X = pd.read_csv(filename, header=None, index_col=None, skiprows=0, sep=',')
X = X.iloc[:, :649].as_matrix()
lambda_list = [3.85,3.9,3.95]
experiment_frame(X, elem_num, lambda_list)
lam_list = list(map(str, lambda_list))
print(lam_list)
y_loc = r"data/MF-7/classid_7.dat"
y = pd.read_csv(y_loc, header=None, index_col=None, skiprows=0, sep=',')
y = y.iloc[:, 0].values
print(Counter(y))
bi_y = list(map(binary_y5, y))
print(Counter(bi_y))
for i, lam in enumerate(lam_list):
print("lambda:", lam)
print('bi_y:{0}'.format(bi_y))
print('bi_y:{0}'.format(Counter(bi_y)))
S = np.load(folder + "\\" + "lam" + lam + "\\" + r"l21S.npk", allow_pickle=True)
zscore = np.linalg.norm(S, axis=1)
print('zscore:{0}'.format(zscore))
zscore_abs = np.fabs(zscore)
result_temp = []
temp_list = [20, 30, 50, 60, 90, 100, 150]
print('m的取值有:{0}'.format(temp_list))
for m in temp_list:
count = 0
index = np.argpartition(zscore_abs, -m)[-m:]
for each_index in index:
if bi_y[each_index] == 'o':
count += 1
result_temp.append(count)
print('result_temp:{0}'.format(result_temp))
t2 = datetime.datetime.now()
print('从当前时间结束:{0}'.format(t2))
print('一共用时:{0}'.format(t2 - t1))
print('第5个数据集完毕')
if dataset==6:
elem_num=34
filename = "data/ionosphere.txt"
print('当前数据集是:{0}'.format(filename))
t1 = datetime.datetime.now()
print('从当前时间开始:{0}'.format(t1))
X = np.loadtxt(filename, delimiter=",", usecols=np.arange(0, 34))
lambda_list = [0.003, 0.0035, 0.004, 0.0045] # 0.0045,0.0035
experiment_frame(X, elem_num, lambda_list)
lam_list = list(map(str, lambda_list))
print(lam_list)
y_loc = r"data/ionosphere.txt"
y = np.loadtxt(y_loc, delimiter=",", usecols=(-1,))
print(Counter(y))
bi_y = list(map(binary_y6, y))
print(Counter(bi_y))
for i, lam in enumerate(lam_list):
print("lambda:", lam)
print('bi_y:{0}'.format(bi_y))
print('bi_y:{0}'.format(Counter(bi_y)))
S = np.load(folder + "\\" + "lam" + lam + "\\" + r"l21S.npk", allow_pickle=True)
zscore = np.linalg.norm(S, axis=1)
zscore_abs = np.fabs(zscore)
result_temp = []
temp_list = [5, 10, 30, 60, 90, 120, 130, 140, 150, 200, 300, 340]
print('m的取值有:{0}'.format(temp_list))
for m in temp_list:
count = 0
index = np.argpartition(zscore_abs, -m)[-m:]
for each_index in index:
if bi_y[each_index] == 'o':
count += 1
result_temp.append(count)
print('result_temp:{0}'.format(result_temp))
t2 = datetime.datetime.now()
print('从当前时间结束:{0}'.format(t2))
print('一共用时:{0}'.format(t2 - t1))
print('第5个数据集完毕')
if dataset==7:
elem_num=166
filename = r"data/clean2.data"
print('当前数据集是:{0}'.format(filename))
t1 = datetime.datetime.now()
print('从当前时间开始:{0}'.format(t1))
X = pd.read_csv(filename, header=None, index_col=None, skiprows=0, sep=',')
X = X.iloc[:, 2:168].values
lambda_list = [0.24, 0.25, 0.255, 0.26, 0.265]
experiment_frame(X, elem_num, lambda_list)
lam_list = list(map(str, lambda_list))
print(lam_list)
y_loc = r"data/clean2.data"
y = pd.read_csv(y_loc, header=None, index_col=None, skiprows=0, sep=',')
y = y.iloc[:, 168].values
print(Counter(y))
bi_y = list(map(binary_y3, y))
print(Counter(bi_y))
for i, lam in enumerate(lam_list):
print("lambda:", lam)
print('bi_y:{0}'.format(bi_y))
print('bi_y:{0}'.format(Counter(bi_y)))
S = np.load(folder + "\\" + "lam" + lam + "\\" + r"l21S.npk", allow_pickle=True)
zscore = np.linalg.norm(S, axis=1)
zscore_abs = np.fabs(zscore)
result_temp = []
temp_list = [1000, 2000, 3000, 4000, 5000, 6000, 6598]
print('m的取值有:{0}'.format(temp_list))
for m in temp_list:
count = 0
index = np.argpartition(zscore_abs, -m)[-m:]
for each_index in index:
if bi_y[each_index] == 'o':
count += 1
result_temp.append(count)
print('result_temp:{0}'.format(result_temp))
t2 = datetime.datetime.now()
print('从当前时间结束:{0}'.format(t2))
print('一共用时:{0}'.format(t2 - t1))
print('第5个数据集完毕')
|
# encoding=utf8
from __future__ import unicode_literals
from ..log import user_data_log, log_with_user
from .base import boolean, date_time, to_string, string_agg, format_size
from .base import format_mimetype, parse_date
from io import BytesIO
from pyramid.response import FileIter
from pyramid.view import view_config
from sqlalchemy.sql import func
from sw.allotmentclub import Keylist, Key, KeylistAttachment, Member, Allotment
import collections
import sw.allotmentclub.browser.base
class KeylistQuery(sw.allotmentclub.browser.base.Query):
data_class = {
'Schlüsselbuch': 'expand'
}
def select(self):
return (
self.db.query(
Keylist.id.label('#'),
Keylist.subject.label('Schlüsselbuch'),
)
.select_from(Keylist))
@view_config(route_name='keylists', renderer='json', permission='view')
class KeylistListView(sw.allotmentclub.browser.base.TableView):
query_class = KeylistQuery
available_actions = [
dict(url='keylist_add', btn_class='btn-success', icon='fa fa-plus',
title='Neu'),
dict(url='keylist_edit', btn_class='btn-success', icon='fa fa-pencil',
title='Bearbeiten'),
dict(url='keys', btn_class='btn-success', icon='fa fa-list',
title='Schlüssel'),
dict(url='keylist_attachment', btn_class='btn-success',
icon='fa fa-list', title='Anlagen'),
dict(url='keylist_delete', btn_class='btn-danger',
icon='glyphicon glyphicon-trash', title='Löschen')]
@view_config(route_name='keylist_edit', renderer='json', permission='view')
class KeylistEditView(sw.allotmentclub.browser.base.EditJSFormView):
title = 'Schlüsselbuch bearbeiten'
@property
def load_options(self):
return {
'subject': {'label': 'Name'}
}
@property
def load_data(self):
fields = [('subject', self.context.subject)]
return collections.OrderedDict(fields)
def save(self, key, value):
if key == 'rent' and value:
value = parse_date(value)
return super(KeylistEditView, self).save(key, value)
@view_config(route_name='keylist_add', renderer='json', permission='view')
class KeylistAddView(KeylistEditView):
def __init__(self, context, request):
context = Keylist.create()
context.commit()
super(KeylistAddView, self).__init__(context, request)
log_with_user(
user_data_log.info, self.request.user,
'Schlüsselbuch %s hinzugefügt.', self.context.id)
@property
def route_name(self):
return 'keylist_edit'
@view_config(route_name='keylist_delete', renderer='json', permission='view')
class KeylistDeleteView(sw.allotmentclub.browser.base.DeleteView):
model = Keylist
class KeyQuery(sw.allotmentclub.browser.base.Query):
formatters = {
'Verliehen am': date_time,
'Verloren': boolean
}
data_class = {
'Seriennummer': 'expand'
}
data_hide = {
'Verloren': 'phone,tablet',
'Notiz': 'phone,tablet',
'Mitglied': 'phone',
'Verliehen am': 'phone',
}
def select(self):
return (
self.db.query(
Key.id.label('#'),
Key.serial.label('Seriennummer'),
(func.coalesce(to_string(Member.lastname).concat(', ').concat(
to_string(Member.firstname).concat(' (')
.concat(func.coalesce(string_agg(Allotment.number), 'n/a')))
.concat(')'))).label('Mitglied'),
Key.rent.label('Verliehen am'),
Key.note.label('Notiz'),
Key.lost.label('Verloren'))
.select_from(Key)
.outerjoin(Member)
.outerjoin(Allotment, Allotment.member_id == Member.id)
.group_by(Key.id, Member.lastname, Member.firstname)
.filter(Key.keylist == self.context))
@view_config(route_name='keys', renderer='json', permission='view')
class KeysView(sw.allotmentclub.browser.base.TableView):
""" Liste aller Protokolle."""
query_class = KeyQuery
default_order_by = 'serial'
available_actions = [
dict(url='key_add', btn_class='btn-success',
icon='fa fa-plus', title='Neu'),
dict(url='key_edit', btn_class='btn-success',
icon='fa fa-pencil', title='Bearbeiten'),
dict(url='key_delete', btn_class='btn-danger',
icon='glyphicon glyphicon-trash', title='Löschen')]
@view_config(route_name='key_edit', renderer='json', permission='view')
class KeyEditView(sw.allotmentclub.browser.base.EditJSFormView):
title = 'Schlüssel bearbeiten'
@property
def load_options(self):
return {
'serial': {'label': 'Seriennummer'},
'member_id': {
'label': 'Mitglied',
'source': self.member_source,
'css_class': 'chosen'
},
'rent': {
'label': 'Verliehen am',
'css_class': 'datetimepicker'
},
'note': {'label': 'Notiz'},
'lost': {'label': 'Verloren?', 'template': 'form_boolean'}
}
@property
def load_data(self):
fields = [
('serial', self.context.serial),
('member_id', self.context.member_id),
('rent', self.context.rent),
('note', self.context.note),
('lost', self.context.lost),
]
return collections.OrderedDict(fields)
def get_route(self, item, name):
route = super(KeyEditView, self).get_route(item, name)
return route.replace('{keylist_id}', str(self.context.keylist.id))
@view_config(route_name='key_add', renderer='json', permission='view')
class KeyAddView(KeyEditView):
title = 'Schlüssel hinzufügen'
def __init__(self, context, request):
context = Key.create(keylist=context)
context.commit()
super(KeyAddView, self).__init__(context, request)
log_with_user(user_data_log.info, self.request.user,
'Schlüssel %s hinzugefügt.', self.context.id)
@property
def route_name(self):
return 'key_edit'
@view_config(route_name='key_delete', renderer='json', permission='view')
class KeyDeleteView(sw.allotmentclub.browser.base.DeleteView):
model = Key
def log(self):
if self.deleted is not None:
deleted = self.context.id
keylist = self.context.keylist.subject
log_with_user(user_data_log.info,
self.request.user,
'Schlüssel %s aus Schlüsselbuch %s gelöscht.',
deleted, keylist)
class AttachmentQuery(sw.allotmentclub.browser.base.Query):
formatters = {
'Größe': format_size,
'Dateityp': format_mimetype,
}
data_class = {
'Name': 'expand'
}
data_hide = {
'Dateityp': 'phone,tablet',
'Größe': 'phone,tablet',
}
def select(self):
return (
self.db.query(
KeylistAttachment.id.label('#'),
KeylistAttachment.name.label('Name'),
KeylistAttachment.mimetype.label('Dateityp'),
KeylistAttachment.size.label('Größe'))
.select_from(KeylistAttachment)
.filter_by(keylist=self.context))
@view_config(route_name='keylist_attachment', renderer='json',
permission='view')
class KeylistAttachmentsView(sw.allotmentclub.browser.base.TableView):
""" Liste aller Anlagen."""
query_class = AttachmentQuery
default_order_by = 'Name'
available_actions = [
dict(url='keylist_attachment_add', btn_class='btn-success',
icon='fa fa-plus', title='Neu'),
dict(url='keylist_attachment_download', btn_class='btn-success',
icon='fa fa-download', title='Herunterladen')]
@view_config(route_name='keylist_attachment_add', renderer='json',
permission='view')
class KeylistAttachmentAddView(sw.allotmentclub.browser.base.AddView):
model = KeylistAttachment
def parse_file(self):
file = self.request.params.get('file')
data = file.file
data.seek(0)
data = data.read()
name = file.filename
mimetype = file.type
size = len(data)
return name, mimetype, size, data
def log(self, id):
log_with_user(user_data_log.info,
self.request.user,
'hat Ablage %s %s.', id, self.action)
def __call__(self):
if not self.form_submit():
return {'status': 'success', 'data': {}}
name, mimetype, size, data = self.parse_file()
attachment = self.model.create(
name=name, mimetype=mimetype, size=size, data=data,
keylist=self.context)
attachment.commit()
log_with_user(user_data_log.info,
self.request.user,
'hat Anlage %s zu Protokoll %s hinzugefügt.',
attachment.id, self.context.id)
return {'status': 'success'}
@view_config(route_name='keylist_attachment_download',
permission='view')
class KeylistAttachmentDownloadView(object):
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
response = self.request.response
response.set_cookie('fileDownload', value='true')
response.content_type = self.context.mimetype
response.content_length = int(self.context.size)
response.content_disposition = (
'attachment; filename=' + self.context.name
)
response.app_iter = FileIter(BytesIO(self.context.data))
return response
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import io
import os
import sys
import unittest
from datetime import datetime, timedelta
from subprocess import PIPE
from trac.core import TracError
from trac.test import EnvironmentStub, MockRequest, locate, mkdtemp, rmtree
from trac.util import create_file
from trac.util.compat import Popen, close_fds
from trac.util.datefmt import to_timestamp, utc
from trac.util.text import to_utf8
from trac.versioncontrol.api import Changeset, DbRepositoryProvider, \
InvalidRepository, Node, \
NoSuchChangeset, NoSuchNode, \
RepositoryManager
from trac.versioncontrol.web_ui.browser import BrowserModule
from trac.versioncontrol.web_ui.log import LogModule
from tracopt.versioncontrol.git.PyGIT import StorageFactory
from tracopt.versioncontrol.git.git_fs import GitCachedRepository, \
GitRepository, \
GitwebProjectsRepositoryProvider
class GitCommandMixin(object):
git_bin = locate('git')
def _git_commit(self, *args, **kwargs):
env = kwargs.get('env') or os.environ.copy()
if 'date' in kwargs:
self._set_committer_date(env, kwargs.pop('date'))
args = ('commit',) + args
kwargs['env'] = env
return self._git(*args, **kwargs)
def _spawn_git(self, *args, **kwargs):
args = map(to_utf8, (self.git_bin,) + args)
kwargs.setdefault('stdin', PIPE)
kwargs.setdefault('stdout', PIPE)
kwargs.setdefault('stderr', PIPE)
kwargs.setdefault('cwd', self.repos_path)
return Popen(args, close_fds=close_fds, **kwargs)
def _git(self, *args, **kwargs):
with self._spawn_git(*args, **kwargs) as proc:
stdout, stderr = proc.communicate()
self.assertEqual(0, proc.returncode,
'git exits with %r, args %r, kwargs %r, stdout %r, '
'stderr %r' %
(proc.returncode, args, kwargs, stdout, stderr))
return proc
def _git_fast_import(self, data, **kwargs):
if isinstance(data, unicode):
data = data.encode('utf-8')
with self._spawn_git('fast-import', stdin=PIPE, **kwargs) as proc:
stdout, stderr = proc.communicate(input=data)
self.assertEqual(0, proc.returncode,
'git exits with %r, stdout %r, stderr %r' %
(proc.returncode, stdout, stderr))
def _git_date_format(self, dt):
if dt.tzinfo is None:
dt = dt.replace(tzinfo=utc)
offset = dt.utcoffset()
secs = offset.days * 3600 * 24 + offset.seconds
hours, rem = divmod(abs(secs), 3600)
return '%d %c%02d:%02d' % (to_timestamp(dt), '-' if secs < 0 else '+',
hours, rem / 60)
def _set_committer_date(self, env, dt):
if not isinstance(dt, basestring):
if dt.tzinfo is None:
dt = dt.replace(tzinfo=utc)
dt = self._git_date_format(dt)
env['GIT_COMMITTER_DATE'] = dt
env['GIT_AUTHOR_DATE'] = dt
class BaseTestCase(unittest.TestCase, GitCommandMixin):
def setUp(self):
self.env = EnvironmentStub()
self.tmpdir = mkdtemp()
self.repos_path = os.path.join(self.tmpdir, 'gitrepos')
os.mkdir(self.repos_path)
if self.git_bin:
self.env.config.set('git', 'git_bin', self.git_bin)
def tearDown(self):
for repos in self._repomgr.get_real_repositories():
repos.close()
self._repomgr.reload_repositories()
StorageFactory._clean()
self.env.reset_db()
if os.path.isdir(self.tmpdir):
rmtree(self.tmpdir)
@property
def _repomgr(self):
return RepositoryManager(self.env)
@property
def _dbrepoprov(self):
return DbRepositoryProvider(self.env)
def _add_repository(self, reponame='gitrepos', bare=False, path=None):
if path is None:
path = self.repos_path
if not bare:
path = os.path.join(path, '.git')
self._dbrepoprov.add_repository(reponame, path, 'git')
def _git_init(self, data=True, bare=False, **kwargs):
if bare:
self._git('init', '--bare', **kwargs)
else:
self._git('init', **kwargs)
if not bare and data:
self._git('config', 'user.name', 'Joe', **kwargs)
self._git('config', 'user.email', '<EMAIL>', **kwargs)
create_file(os.path.join(self.repos_path, '.gitignore'))
self._git('add', '.gitignore', **kwargs)
self._git_commit('-a', '-m', 'test',
date=datetime(2001, 1, 29, 16, 39, 56), **kwargs)
class SanityCheckingTestCase(BaseTestCase):
def test_bare(self):
self._git_init(bare=True)
self._dbrepoprov.add_repository('gitrepos', self.repos_path, 'git')
self._repomgr.get_repository('gitrepos')
def test_non_bare(self):
self._git_init(bare=False)
self._dbrepoprov.add_repository('gitrepos.1',
os.path.join(self.repos_path, '.git'),
'git')
self._repomgr.get_repository('gitrepos.1')
self._dbrepoprov.add_repository('gitrepos.2', self.repos_path, 'git')
self._repomgr.get_repository('gitrepos.2')
def test_no_head_file(self):
self._git_init(bare=True)
os.unlink(os.path.join(self.repos_path, 'HEAD'))
self._dbrepoprov.add_repository('gitrepos', self.repos_path, 'git')
self.assertRaises(TracError, self._repomgr.get_repository, 'gitrepos')
def test_no_objects_dir(self):
self._git_init(bare=True)
rmtree(os.path.join(self.repos_path, 'objects'))
self._dbrepoprov.add_repository('gitrepos', self.repos_path, 'git')
self.assertRaises(TracError, self._repomgr.get_repository, 'gitrepos')
def test_no_refs_dir(self):
self._git_init(bare=True)
rmtree(os.path.join(self.repos_path, 'refs'))
self._dbrepoprov.add_repository('gitrepos', self.repos_path, 'git')
self.assertRaises(TracError, self._repomgr.get_repository, 'gitrepos')
class PersistentCacheTestCase(BaseTestCase):
def test_persistent(self):
self.env.config.set('git', 'persistent_cache', 'enabled')
self._git_init()
self._add_repository()
youngest = self._repository.youngest_rev
self._repomgr.reload_repositories() # clear repository cache
self._commit(datetime(2014, 1, 29, 16, 44, 54, 0, utc))
self.assertEqual(youngest, self._repository.youngest_rev)
self._repository.sync()
self.assertNotEqual(youngest, self._repository.youngest_rev)
def test_non_persistent(self):
self.env.config.set('git', 'persistent_cache', 'disabled')
self._git_init()
self._add_repository()
youngest = self._repository.youngest_rev
self._repomgr.reload_repositories() # clear repository cache
self._commit(datetime(2014, 1, 29, 16, 44, 54, 0, utc))
youngest_2 = self._repository.youngest_rev
self.assertNotEqual(youngest, youngest_2)
self._repository.sync()
self.assertNotEqual(youngest, self._repository.youngest_rev)
self.assertEqual(youngest_2, self._repository.youngest_rev)
def _commit(self, date):
gitignore = os.path.join(self.repos_path, '.gitignore')
create_file(gitignore, date.isoformat())
self._git_commit('-a', '-m', date.isoformat(), date=date)
@property
def _repository(self):
return self._repomgr.get_repository('gitrepos')
class HistoryTimeRangeTestCase(BaseTestCase):
def test_without_cache(self):
self._test_timerange('disabled')
def test_with_cache(self):
self._test_timerange('enabled')
def _test_timerange(self, cached_repository):
self.env.config.set('git', 'cached_repository', cached_repository)
self._git_init()
filename = os.path.join(self.repos_path, '.gitignore')
start = datetime(2000, 1, 1, 0, 0, 0, 0, utc)
ts = datetime(2014, 2, 5, 15, 24, 6, 0, utc)
for idx in xrange(3):
create_file(filename, 'commit-%d.txt' % idx)
self._git_commit('-a', '-m', 'commit %d' % idx, date=ts)
self._add_repository()
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
revs = [repos.youngest_rev]
while True:
parents = repos.parent_revs(revs[-1])
if not parents:
break
revs.extend(parents)
self.assertEqual(4, len(revs))
csets = list(repos.get_changesets(start, ts))
self.assertEqual(1, len(csets))
self.assertEqual(revs[-1], csets[0].rev) # is oldest rev
csets = list(repos.get_changesets(start, ts + timedelta(seconds=1)))
self.assertEqual(revs, [cset.rev for cset in csets])
class GitNormalTestCase(BaseTestCase):
def test_get_node(self):
self.env.config.set('git', 'persistent_cache', 'false')
self.env.config.set('git', 'cached_repository', 'false')
self._git_init()
self._add_repository()
repos = self._repomgr.get_repository('gitrepos')
rev = repos.youngest_rev
self.assertIsNotNone(rev)
self.assertEqual(40, len(rev))
self.assertEqual(rev, repos.get_node('/').rev)
self.assertEqual(rev, repos.get_node('/', rev[:7]).rev)
self.assertEqual(rev, repos.get_node('/.gitignore').rev)
self.assertEqual(rev, repos.get_node('/.gitignore', rev[:7]).rev)
self.assertRaises(NoSuchNode, repos.get_node, '/non-existent')
self.assertRaises(NoSuchNode, repos.get_node, '/non-existent', rev[:7])
self.assertRaises(NoSuchNode, repos.get_node, '/non-existent', rev)
self.assertRaises(NoSuchChangeset,
repos.get_node, '/', 'invalid-revision')
self.assertRaises(NoSuchChangeset,
repos.get_node, '/.gitignore', 'invalid-revision')
self.assertRaises(NoSuchChangeset,
repos.get_node, '/non-existent', 'invalid-revision')
# git_fs doesn't support non-ANSI strings on Windows
if os.name != 'nt':
self._git('branch', u'tïckét10605', 'master')
repos.sync()
self.assertEqual(rev, repos.get_node('/', u'tïckét10605').rev)
self.assertEqual(rev, repos.get_node('/.gitignore',
u'tïckét10605').rev)
def _test_on_empty_repos(self, cached_repository):
self.env.config.set('git', 'persistent_cache', 'false')
self.env.config.set('git', 'cached_repository',
'true' if cached_repository else 'false')
self._git_init(data=False, bare=True)
self._add_repository(bare=True)
repos = self._repomgr.get_repository('gitrepos')
if cached_repository:
# call sync() thrice with empty repository (#11851)
for i in xrange(3):
repos.sync()
rows = self.env.db_query("SELECT value FROM repository "
"WHERE id=%s AND name=%s",
(repos.id, 'youngest_rev'))
self.assertEqual('', rows[0][0])
else:
repos.sync()
youngest_rev = repos.youngest_rev
self.assertIsNone(youngest_rev)
self.assertIsNone(repos.oldest_rev)
self.assertIsNone(repos.normalize_rev(''))
self.assertIsNone(repos.normalize_rev(None))
self.assertIsNone(repos.display_rev(''))
self.assertIsNone(repos.display_rev(None))
self.assertIsNone(repos.short_rev(''))
self.assertIsNone(repos.short_rev(None))
node = repos.get_node('/', youngest_rev)
self.assertEqual([], list(node.get_entries()))
self.assertEqual([], list(node.get_history()))
self.assertRaises(NoSuchNode, repos.get_node, '/path', youngest_rev)
req = MockRequest(self.env, path_info='/browser/gitrepos')
browser_mod = BrowserModule(self.env)
self.assertTrue(browser_mod.match_request(req))
rv = browser_mod.process_request(req)
self.assertEqual('browser.html', rv[0])
self.assertIsNone(rv[1]['rev'])
req = MockRequest(self.env, path_info='/log/gitrepos')
log_mod = LogModule(self.env)
self.assertTrue(log_mod.match_request(req))
rv = log_mod.process_request(req)
self.assertEqual('revisionlog.html', rv[0])
self.assertEqual([], rv[1]['items'])
def test_on_empty_and_cached_repos(self):
self._test_on_empty_repos(True)
def test_on_empty_and_non_cached_repos(self):
self._test_on_empty_repos(False)
class GitRepositoryTestCase(BaseTestCase):
cached_repository = 'disabled'
def setUp(self):
BaseTestCase.setUp(self)
self.env.config.set('git', 'cached_repository', self.cached_repository)
def _create_merge_commit(self):
for idx, branch in enumerate(('alpha', 'beta')):
self._git('checkout', '-b', branch, 'master')
for n in xrange(2):
filename = 'file-%s-%d.txt' % (branch, n)
create_file(os.path.join(self.repos_path, filename))
self._git('add', filename)
self._git_commit('-a', '-m', filename,
date=datetime(2014, 2, 2, 17, 12,
n * 2 + idx))
self._git('checkout', 'alpha')
self._git('merge', '-m', 'Merge branch "beta" to "alpha"', 'beta')
def test_invalid_path_raises(self):
def try_init(reponame):
params = {'name': reponame}
with self.assertRaises(InvalidRepository) as cm:
GitRepository(self.env, '/the/invalid/path', params,
self.env.log)
return cm.exception
e = try_init('')
self.assertEqual('"(default)" is not readable or not a Git '
'repository.', unicode(e))
e = try_init('therepos')
self.assertEqual('"therepos" is not readable or not a Git repository.',
unicode(e))
def test_repository_instance(self):
self._git_init()
self._add_repository('gitrepos')
self.assertEqual(GitRepository,
type(self._repomgr.get_repository('gitrepos')))
def test_reset_head(self):
self._git_init()
create_file(os.path.join(self.repos_path, 'file.txt'), 'text')
self._git('add', 'file.txt')
self._git_commit('-a', '-m', 'test',
date=datetime(2014, 2, 2, 17, 12, 18))
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
youngest_rev = repos.youngest_rev
entries = list(repos.get_node('').get_history())
self.assertEqual(2, len(entries))
self.assertEqual('', entries[0][0])
self.assertEqual(Changeset.EDIT, entries[0][2])
self.assertEqual('', entries[1][0])
self.assertEqual(Changeset.ADD, entries[1][2])
self._git('reset', '--hard', 'HEAD~')
repos.sync()
new_entries = list(repos.get_node('').get_history())
self.assertEqual(1, len(new_entries))
self.assertEqual(new_entries[0], entries[1])
self.assertNotEqual(youngest_rev, repos.youngest_rev)
def test_tags(self):
self._git_init()
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
self.assertEqual(['master'], self._get_quickjump_names(repos))
self._git('tag', 'v1.0', 'master') # add tag
repos.sync()
self.assertEqual(['master', 'v1.0'], self._get_quickjump_names(repos))
self._git('tag', '-d', 'v1.0') # delete tag
repos.sync()
self.assertEqual(['master'], self._get_quickjump_names(repos))
def test_branchs(self):
self._git_init()
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
self.assertEqual(['master'], self._get_quickjump_names(repos))
self._git('branch', 'alpha', 'master') # add branch
repos.sync()
self.assertEqual(['master', 'alpha'], self._get_quickjump_names(repos))
self._git('branch', '-m', 'alpha', 'beta') # rename branch
repos.sync()
self.assertEqual(['master', 'beta'], self._get_quickjump_names(repos))
self._git('branch', '-D', 'beta') # delete branch
repos.sync()
self.assertEqual(['master'], self._get_quickjump_names(repos))
def test_changeset_branches_tags(self):
self._git_init()
self._git('tag', '0.0.1', 'master')
self._git('tag', '-m', 'Root commit', 'initial', 'master')
self._git('branch', 'root', 'master')
self._git('checkout', '-b', 'dev', 'master')
self._git_commit('-m', 'Summary', '--allow-empty')
self._git('tag', '0.1.0dev', 'dev')
self._git('tag', '0.1.0a', 'dev')
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
def get_branches(repos, rev):
rev = repos.normalize_rev(rev)
return list(repos.get_changeset(rev).get_branches())
def get_tags(repos, rev):
rev = repos.normalize_rev(rev)
return list(repos.get_changeset(rev).get_tags())
self.assertEqual([('dev', False), ('master', True), ('root', True)],
get_branches(repos, '0.0.1'))
self.assertEqual([('dev', True)], get_branches(repos, '0.1.0dev'))
self.assertEqual(['0.0.1', 'initial'], get_tags(repos, '0.0.1'))
self.assertEqual(['0.0.1', 'initial'], get_tags(repos, 'initial'))
self.assertEqual(['0.1.0a', '0.1.0dev'], get_tags(repos, '0.1.0dev'))
def test_parent_child_revs(self):
self._git_init()
self._git('branch', 'initial') # root commit
self._create_merge_commit()
self._git('branch', 'latest')
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
rev = repos.normalize_rev('initial')
children = repos.child_revs(rev)
self.assertEqual(2, len(children), 'child_revs: %r' % children)
parents = repos.parent_revs(rev)
self.assertEqual(0, len(parents), 'parent_revs: %r' % parents)
self.assertEqual(1, len(repos.child_revs(children[0])))
self.assertEqual(1, len(repos.child_revs(children[1])))
self.assertEqual([('.gitignore', Node.FILE, Changeset.ADD, None,
None)],
sorted(repos.get_changeset(rev).get_changes()))
rev = repos.normalize_rev('latest')
cset = repos.get_changeset(rev)
children = repos.child_revs(rev)
self.assertEqual(0, len(children), 'child_revs: %r' % children)
parents = repos.parent_revs(rev)
self.assertEqual(2, len(parents), 'parent_revs: %r' % parents)
self.assertEqual(1, len(repos.parent_revs(parents[0])))
self.assertEqual(1, len(repos.parent_revs(parents[1])))
# check the differences against the first parent
def fn_repos_changes(entry):
old_node, new_node, kind, change = entry
if old_node:
old_path, old_rev = old_node.path, old_node.rev
else:
old_path, old_rev = None, None
return new_node.path, kind, change, old_path, old_rev
self.assertEqual(sorted(map(fn_repos_changes,
repos.get_changes('/', parents[0], '/',
rev))),
sorted(cset.get_changes()))
_data_annotations = """\
blob
mark :1
data 14
one
two
three
reset refs/heads/master
commit refs/heads/master
mark :2
author Joe <<EMAIL>> 1467172510 +0000
committer Joe <<EMAIL>> 1467172510 +0000
data 6
blame
M 100644 :1 test.txt
blob
mark :3
data 49
one
two
three
four
five
six
seven
eight
nine
ten
commit refs/heads/master
mark :4
author Joe <<EMAIL>> 1467172511 +0000
committer Joe <<EMAIL>> 1467172511 +0000
data 10
add lines
from :2
M 100644 :3 test.txt
blob
mark :5
data 40
one
two
3
four
five
6
seven
eight
9
ten
commit refs/heads/master
mark :6
author Joe <<EMAIL>> 1467172512 +0000
committer Joe <<EMAIL>> 1467172512 +0000
data 13
modify lines
from :4
M 100644 :5 test.txt
reset refs/heads/master
from :6
"""
def test_get_annotations(self):
self._git_init(data=False)
self._git_fast_import(self._data_annotations)
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
rev1 = 'a7efe353630d02139f255220d71b76fa68eb7132' # root commit
rev2 = 'f928d1b36b8bedf64bcf08667428fdcccf36b21b'
rev3 = '279a097f111c7cb1ef0b9da39735188051fd4f69' # HEAD
self.assertEqual([rev1] * 3,
repos.get_node('test.txt', rev1).get_annotations())
self.assertEqual([rev1] * 3 + [rev2] * 7,
repos.get_node('test.txt', rev2).get_annotations())
expected = [rev1, rev1, rev3, rev2, rev2, rev3, rev2, rev2, rev3, rev2]
self.assertEqual(expected,
repos.get_node('test.txt', rev3).get_annotations())
self.assertEqual(expected,
repos.get_node('test.txt', 'HEAD').get_annotations())
self.assertEqual(expected,
repos.get_node('test.txt').get_annotations())
# * 79dff4ccf842f8e2d2da2ee3e7a2149df63b099b Merge branch 'A'
# |\
# | * 86387120095e9e43573bce61b9da70a8c5d1c1b9 Merge branch 'B' into A
# | |\
# | | * 64e12f96b6b3040cd9edc225734ab2b26a03758b Changed a1
# | * | 67fdcf11e2d083b123b9a79be4fce0600f313f81 Changed a2
# * | | 42fbe758709b2a65aba33e56b2f53cd126c190e3 Changed b2
# | |/
# |/|
# * | 24d94dc08eb77438e4ead192b3f7d1c7bdf1a9e1 Changed b2
# * | 998bf23843c8fd982bbc23f88ec33c4d08114557 Changed b1
# |/
# * c5b01c74e125aa034a1d4ae31dc16f1897a73779 First commit
_data_iter_nodes = """\
blob
mark :1
data 2
a1
blob
mark :2
data 2
a2
blob
mark :3
data 2
b1
blob
mark :4
data 2
b2
reset refs/heads/A
commit refs/heads/A
mark :5
author Joe <<EMAIL>> 1470744252 +0000
committer Joe <<EMAIL>> 1470744252 +0000
data 13
First commit
M 100644 :1 A/a1.txt
M 100644 :2 A/a2.txt
M 100644 :3 B/b1.txt
M 100644 :4 B/b2.txt
blob
mark :6
data 4
b1-1
commit refs/heads/master
mark :7
author Joe <<EMAIL>> 1470744253 +0000
committer Joe <<EMAIL>> 1470744253 +0000
data 11
Changed b1
from :5
M 100644 :6 B/b1.txt
blob
mark :8
data 4
b2-1
commit refs/heads/master
mark :9
author Joe <<EMAIL>> 1470744254 +0000
committer Joe <<EMAIL>> 1470744254 +0000
data 11
Changed b2
from :7
M 100644 :8 B/b2.txt
blob
mark :10
data 4
b2-2
commit refs/heads/master
mark :11
author Joe <<EMAIL>> 1470744255 +0000
committer Joe <<EMAIL>> 1470744255 +0000
data 11
Changed b2
from :9
M 100644 :10 B/b2.txt
blob
mark :12
data 4
a2-1
commit refs/heads/A
mark :13
author Joe <<EMAIL>> 1470744256 +0000
committer Joe <<EMAIL>> 1470744256 +0000
data 11
Changed a2
from :5
M 100644 :12 A/a2.txt
blob
mark :14
data 4
a1-1
commit refs/heads/B
mark :15
author Joe <<EMAIL>> 1470744257 +0000
committer Joe <<EMAIL>> 1470744257 +0000
data 11
Changed a1
from :9
M 100644 :14 A/a1.txt
commit refs/heads/A
mark :16
author Joe <<EMAIL>> 1470744258 +0000
committer Joe <<EMAIL>> 1470744258 +0000
data 24
Merge branch 'B' into A
from :13
merge :15
M 100644 :14 A/a1.txt
M 100644 :6 B/b1.txt
M 100644 :8 B/b2.txt
commit refs/heads/master
mark :17
author Joe <<EMAIL>> 1470744259 +0000
committer Joe <<EMAIL>> 1470744259 +0000
data 17
Merge branch 'A'
from :11
merge :16
M 100644 :14 A/a1.txt
M 100644 :12 A/a2.txt
reset refs/heads/master
from :17
"""
def test_iter_nodes(self):
self._git_init(data=False)
self._git_fast_import(self._data_iter_nodes)
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
mod = BrowserModule(self.env)
root_node = repos.get_node('')
nodes = list(mod._iter_nodes(root_node))
self.assertEqual(['79dff4ccf842f8e2d2da2ee3e7a2149df63b099b'] * 7,
[node.rev for node in nodes])
self.assertEqual([
('79dff4ccf842f8e2d2da2ee3e7a2149df63b099b', ''),
('64e12f96b6b3040cd9edc225734ab2b26a03758b', 'A'),
('64e12f96b6b3040cd9edc225734ab2b26a03758b', 'A/a1.txt'),
('67fdcf11e2d083b123b9a79be4fce0600f313f81', 'A/a2.txt'),
('<KEY>', 'B'),
('998bf23843c8fd982bbc23f88ec33c4d08114557', 'B/b1.txt'),
('<KEY>', 'B/b2.txt'),
], [(node.created_rev, node.path) for node in nodes])
root_node = repos.get_node('',
'86387120095e9e43573bce61b9da70a8c5d1c1b9')
nodes = list(mod._iter_nodes(root_node))
self.assertEqual(['86387120095e9e43573bce61b9da70a8c5d1c1b9'] * 7,
[node.rev for node in nodes])
self.assertEqual([
('86387120095e9e43573bce61b9da70a8c5d1c1b9', ''),
('64e12f96b6b3040cd9edc225734ab2b26a03758b', 'A'),
('64e12f96b6b3040cd9edc225734ab2b26a03758b', 'A/a1.txt'),
('67fdcf11e2d083b123b9a79be4fce0600f313f81', 'A/a2.txt'),
('24d94dc08eb77438e4ead192b3f7d1c7bdf1a9e1', 'B'),
('998bf23843c8fd982bbc23f88ec33c4d08114557', 'B/b1.txt'),
('24d94dc08eb77438e4ead192b3f7d1c7bdf1a9e1', 'B/b2.txt'),
], [(node.created_rev, node.path) for node in nodes])
root_commit = 'c5b01c74e125aa034a1d4ae31dc16f1897a73779'
root_node = repos.get_node('', root_commit)
nodes = list(mod._iter_nodes(root_node))
self.assertEqual([root_commit] * 7, [node.rev for node in nodes])
self.assertEqual([
(root_commit, ''),
(root_commit, 'A'),
(root_commit, 'A/a1.txt'),
(root_commit, 'A/a2.txt'),
(root_commit, 'B'),
(root_commit, 'B/b1.txt'),
(root_commit, 'B/b2.txt'),
], [(node.created_rev, node.path) for node in nodes])
def test_colon_character_in_filename(self):
self._git_init(data=False)
self._git_fast_import(self._data_colon_character_in_filename)
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
rev1 = '382e1e6b85ba20ce8a84af1a875eaa50b8e1e092' # root commit
rev2 = 'd8001832aad079f85a39a54a388a8b15fe31093d'
ADD = Changeset.ADD
MOVE = Changeset.MOVE
FILE = Node.FILE
cset = repos.get_changeset(rev1)
self.assertEqual({('0100644', FILE, ADD, None, None),
('0100644.txt', FILE, ADD, None, None),
(':100644', FILE, ADD, None, None),
(':100644.txt', FILE, ADD, None, None),
('a100644', FILE, ADD, None, None),
('a100644.txt', FILE, ADD, None, None)},
set(cset.get_changes()))
cset = repos.get_changeset(rev2)
self.assertEqual({(':100666', FILE, MOVE, ':100644', rev1)},
set(cset.get_changes()))
_data_colon_character_in_filename = """\
blob
mark :1
data 0
blob
mark :2
data 16
...............
reset refs/heads/master
commit refs/heads/master
mark :3
author Joe <<EMAIL>> 1491387182 +0000
committer Joe <<EMAIL>> 1491387182 +0000
data 9
(#12758)
M 100644 :1 0100644.txt
M 100644 :1 0100644
M 100644 :1 :100644.txt
M 100644 :2 :100644
M 100644 :1 a100644.txt
M 100644 :1 a100644
commit refs/heads/master
mark :4
author Joe <<EMAIL>> 1491387183 +0000
committer Joe <<EMAIL>> 1491387183 +0000
data 16
(#12758) rename
from :3
D :100644
M 100644 :2 :100666
reset refs/heads/master
from :4
"""
def test_submodule(self):
subrepos_path = os.path.join(self.tmpdir, 'subrepos')
submodule_dir = os.path.join(self.repos_path, 'sub')
os.mkdir(subrepos_path)
self._git_init(data=False, bare=True, cwd=subrepos_path)
self._git_fast_import(self._data_submodule, cwd=subrepos_path)
submodule_rev1 = '3e733d786b3529d750ee39edacea2f1c4daadca4'
self._git_init()
self._git('submodule', 'add', subrepos_path, 'sub')
self._git('checkout', submodule_rev1, cwd=submodule_dir)
self._git('add', '.gitmodules')
self._git_commit('-a', '-m', 'init submodule')
self._git('tag', 'v1', 'master')
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
repos.sync()
ADD = Changeset.ADD
EDIT = Changeset.EDIT
FILE = Node.FILE
DIRECTORY = Node.DIRECTORY
rev1 = repos.normalize_rev('v1')
cset1 = repos.get_changeset(rev1)
self.assertEqual([('.gitmodules', FILE, ADD, None, None),
('sub', DIRECTORY, ADD, None, None)],
sorted(cset1.get_changes()))
node1 = repos.get_node('sub', rev1)
self.assertIsNone(node1.get_content())
self.assertIsNone(node1.get_content_length())
self.assertEqual(DIRECTORY, node1.kind)
self.assertEqual({'mode': '160000', 'commit': submodule_rev1},
node1.get_properties())
self.assertEqual([], list(node1.get_entries()))
submodule_rev2 = '409058dc98500b5685c52a091cc9f44f3975113e'
self._git('checkout', submodule_rev2, cwd=submodule_dir)
self._git_commit('-a', '-m', 'change rev of the submodule')
self._git('tag', 'v2', 'master')
repos.sync()
rev2 = repos.normalize_rev('v2')
cset2 = repos.get_changeset(rev2)
self.assertEqual([('sub', DIRECTORY, EDIT, 'sub', rev1)],
sorted(cset2.get_changes()))
node2 = repos.get_node('sub', rev2)
self.assertEqual({'mode': '160000', 'commit': submodule_rev2},
node2.get_properties())
_data_submodule = """\
blob
mark :1
data 0
blob
mark :2
data 16
...............
# <= 3e733d786b3529d750ee39edacea2f1c4daadca4
reset refs/heads/master
commit refs/heads/master
mark :3
author Joe <<EMAIL>> 1512643825 +0000
committer Joe <<EMAIL>> 1512643825 +0000
data 12
root commit
M 100644 :1 001-001.txt
M 100644 :2 001-002.txt
M 100644 :2 001-003.txt
# <= 409058dc98500b5685c52a091cc9f44f3975113e
commit refs/heads/master
mark :4
author Joe <<EMAIL>> 1512643826 +0000
committer Joe <<EMAIL>> 1512643826 +0000
data 10
2nd commit
from :3
M 100644 :1 002-001.txt
M 100644 :2 002-002.txt
reset refs/heads/master
from :4
"""
def _get_quickjump_names(self, repos):
return list(name for type, name, path, rev
in repos.get_quickjump_entries('HEAD'))
class GitCachedRepositoryTestCase(GitRepositoryTestCase):
cached_repository = 'enabled'
def test_repository_instance(self):
self._git_init()
self._add_repository('gitrepos')
self.assertEqual(GitCachedRepository,
type(self._repomgr.get_repository('gitrepos')))
def test_sync(self):
self._git_init()
for idx in xrange(3):
filename = 'file%d.txt' % idx
create_file(os.path.join(self.repos_path, filename))
self._git('add', filename)
self._git_commit('-a', '-m', filename,
date=datetime(2014, 2, 2, 17, 12, idx))
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
revs = [entry[1] for entry in repos.repos.get_node('').get_history()]
revs.reverse()
revs2 = []
def feedback(rev):
revs2.append(rev)
repos.sync(feedback=feedback)
self.assertEqual(revs, revs2)
self.assertEqual(4, len(revs2))
revs2 = []
def feedback_1(rev):
revs2.append(rev)
if len(revs2) == 2:
raise StopSync
def feedback_2(rev):
revs2.append(rev)
try:
repos.sync(feedback=feedback_1, clean=True)
except StopSync:
self.assertEqual(revs[:2], revs2)
repos.sync(feedback=feedback_2) # restart sync
self.assertEqual(revs, revs2)
def test_sync_file_with_invalid_byte_sequence(self):
self._git_init(data=False)
self._git_fast_import("""\
blob
mark :1
data 0
reset refs/heads/master
commit refs/heads/master
mark :2
author <NAME> <<EMAIL>> 1463639119 +0200
committer <NAME> <<EMAIL>> 1463639119 +0200
data 9
(#12322)
M 100644 :1 "\312\326\267\347\307\331.txt"
reset refs/heads/master
from :2
""")
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
revs = []
def feedback(rev):
revs.append(rev)
repos.sync(feedback=feedback)
changes = list(repos.repos.get_changeset(revs[0]).get_changes())
self.assertEqual(1, len(changes))
self.assertEqual(u'\ufffd\u05b7\ufffd\ufffd\ufffd.txt', changes[0][0])
def test_sync_merge(self):
self._git_init()
self._create_merge_commit()
self._add_repository('gitrepos')
repos = self._repomgr.get_repository('gitrepos')
youngest_rev = repos.repos.youngest_rev
oldest_rev = repos.repos.oldest_rev
revs = []
def feedback(rev):
revs.append(rev)
repos.sync(feedback=feedback)
self.assertEqual(6, len(revs))
self.assertEqual(youngest_rev, revs[-1])
self.assertEqual(oldest_rev, revs[0])
revs2 = []
def feedback_1(rev):
revs2.append(rev)
if len(revs2) == 3:
raise StopSync
def feedback_2(rev):
revs2.append(rev)
try:
repos.sync(feedback=feedback_1, clean=True)
except StopSync:
self.assertEqual(revs[:3], revs2)
repos.sync(feedback=feedback_2) # restart sync
self.assertEqual(revs, revs2)
def test_sync_too_many_merges(self):
data = self._generate_data_many_merges(100)
self._git_init(data=False, bare=True)
self._git_fast_import(data)
self._add_repository('gitrepos', bare=True)
repos = self._repomgr.get_repository('gitrepos')
reclimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(80)
repos.sync()
finally:
sys.setrecursionlimit(reclimit)
rows = self.env.db_query("SELECT COUNT(*) FROM revision "
"WHERE repos=%s", (repos.id,))
self.assertEqual(202, rows[0][0])
def _generate_data_many_merges(self, n, timestamp=1400000000):
init = b"""\
blob
mark :1
data 0
reset refs/heads/dev
commit refs/heads/dev
mark :2
author Joe <<EMAIL>> %(timestamp)d +0000
committer Joe <<EMAIL>> %(timestamp)d +0000
data 5
root
M 100644 :1 .gitignore
commit refs/heads/master
mark :3
author Joe <<EMAIL>> %(timestamp)d +0000
committer Joe <<EMAIL>> %(timestamp)d +0000
data 7
master
from :2
M 100644 :1 master.txt
"""
merge = b"""\
commit refs/heads/dev
mark :%(dev)d
author Joe <<EMAIL>> %(timestamp)d +0000
committer Joe <<EMAIL>> %(timestamp)d +0000
data 4
dev
from :2
M 100644 :1 dev%(dev)08d.txt
commit refs/heads/master
mark :%(merge)d
author Joe <<EMAIL>> %(timestamp)d +0000
committer Joe <<EMAIL>> %(timestamp)d +0000
data 19
Merge branch 'dev'
from :%(from)d
merge :%(dev)d
M 100644 :1 dev%(dev)08d.txt
"""
data = io.BytesIO()
data.write(init % {'timestamp': timestamp})
for idx in xrange(n):
data.write(merge % {'timestamp': timestamp,
'dev': 4 + idx * 2,
'merge': 5 + idx * 2,
'from': 3 + idx * 2})
return data.getvalue()
def test_sync_many_refs(self):
n_refs = 1500
data = self._generate_data_many_refs(n_refs)
self._git_init(data=False, bare=True)
self._git_fast_import(data)
self._add_repository('gitrepos', bare=True)
repos = self._repomgr.get_repository('gitrepos')
revs = []
def feedback(rev):
revs.append(rev)
repos.sync(feedback) # create cache
self.assertEqual(n_refs + 1, len(revs))
revs[:] = ()
repos.sync(feedback) # check whether all refs are cached
self.assertEqual(0, len(revs))
rows = self.env.db_query("SELECT COUNT(*) FROM revision "
"WHERE repos=%s", (repos.id,))
self.assertEqual(n_refs + 1, rows[0][0])
def _generate_data_many_refs(self, n, timestamp=1400000000):
root_commit = """\
blob
mark :1
data 0
reset refs/heads/master
commit refs/heads/master
mark :2
author Joe <<EMAIL>> %(timestamp)d +0000
committer Joe <<EMAIL>> %(timestamp)d +0000
data 12
root commit
M 100644 :1 .gitignore
"""
ref_commit = """
commit refs/heads/ref-%(ref)08d
mark :%(mark)d
author Joe <<EMAIL>> %(timestamp)d +0000
committer Joe <<EMAIL>> %(timestamp)d +0000
data 13
ref-%(ref)08d
from :2
"""
data = io.BytesIO()
data.write(to_utf8(root_commit % {'timestamp': timestamp}))
for idx in xrange(n):
data.write(to_utf8(ref_commit % {'timestamp': timestamp + idx,
'mark': idx + 3, 'ref': idx}))
return data.getvalue()
class GitwebProjectsRepositoryProviderTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.projects_base = mkdtemp()
self.projects_list = os.path.join(self.projects_base, 'projects_list')
with open(self.projects_list, 'w') as f:
f.write("""
repos1 user1
repos2.git user+2+<<EMAIL>>
repos3
""")
self.env.config.set('gitweb-repositories', 'projects_list',
self.projects_list)
self.env.config.set('gitweb-repositories', 'projects_base',
self.projects_base)
self.env.config.set('gitweb-repositories', 'projects_url',
'https://example.com/%s')
def tearDown(self):
self.env.shutdown()
rmtree(self.projects_base)
def test_project_list_path_not_found(self):
"""Warning is logged when projects_list file is not found, but
exception is not raised.
"""
os.remove(self.projects_list)
provider = GitwebProjectsRepositoryProvider(self.env)
repositories = list(provider.get_repositories())
self.assertEqual([], repositories)
def test_get_repositories(self):
provider = GitwebProjectsRepositoryProvider(self.env)
repositories = list(provider.get_repositories())
self.assertEqual(3, len(repositories))
self.assertEqual('repos1', repositories[0][0])
self.assertEqual('git', repositories[0][1]['type'])
self.assertEqual(os.path.join(self.projects_base, 'repos1'),
repositories[0][1]['dir'])
self.assertEqual('https://example.com/repos1',
repositories[0][1]['url'])
self.assertEqual('repos2', repositories[1][0])
self.assertEqual('git', repositories[1][1]['type'])
self.assertEqual(os.path.join(self.projects_base, 'repos2.git'),
repositories[1][1]['dir'])
self.assertEqual('https://example.com/repos2',
repositories[1][1]['url'])
self.assertEqual('repos3', repositories[2][0])
self.assertEqual('git', repositories[2][1]['type'])
self.assertEqual(os.path.join(self.projects_base, 'repos3'),
repositories[2][1]['dir'])
self.assertEqual('https://example.com/repos3',
repositories[2][1]['url'])
class StopSync(Exception):
pass
class GitConnectorTestCase(BaseTestCase):
def _git_version_from_system_info(self):
git_version = None
for name, version in self.env.system_info:
if name == 'GIT':
git_version = version
return git_version
def test_get_system_info(self):
self.assertIsNotNone(self._git_version_from_system_info())
def test_suite():
suite = unittest.TestSuite()
if GitCommandMixin.git_bin:
suite.addTest(unittest.makeSuite(SanityCheckingTestCase))
suite.addTest(unittest.makeSuite(PersistentCacheTestCase))
suite.addTest(unittest.makeSuite(HistoryTimeRangeTestCase))
suite.addTest(unittest.makeSuite(GitNormalTestCase))
suite.addTest(unittest.makeSuite(GitRepositoryTestCase))
suite.addTest(unittest.makeSuite(GitCachedRepositoryTestCase))
suite.addTest(unittest.makeSuite(GitConnectorTestCase))
suite.addTest(unittest.makeSuite(GitwebProjectsRepositoryProviderTestCase))
else:
print("SKIP: tracopt/versioncontrol/git/tests/git_fs.py (git cli "
"binary, 'git', not found)")
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
<gh_stars>0
import tkinter as tk # Note: for Python 2.x, use "import Tkinter as tk"
from PIL import ImageTk, Image
import math
import random
import os
import glob
company_name = 'Company\nName\n' # replace with actual company name
canvas_color = '#FDF5E6' # choose background color of canvas
text_color = '#8B8B83' # choose text color
ctd_color = 'darkred' # choose countdown color
CTD_START = 3 # set the start of the countdown
class Position:
"""Create initial position of images around a circular path"""
def __init__(self, x, y, radius):
self.x, self.y = x, y
self.radius = radius
def boundaries(self):
"""Return coords of rectangle surrounding circlular object"""
COS_0, COS_180 = cos(0), cos(180)
SIN_90, SIN_270 = sin(90), sin(270)
return (self.x + self.radius*COS_0, self.y + self.radius*SIN_270,
self.x + self.radius*COS_180, self.y + self.radius*SIN_90)
def circle(x, y, radius, delta_ang, start_ang=0):
"""Endlessly generate coords of circular path every delta_ang degrees"""
ang = start_ang % 360
while True:
yield x + radius*cos(ang), y + radius*sin(ang)
ang = (ang+delta_ang) % 360
def update_position(canvas, id, pic_obj, path_iter):
"""Update position of images"""
if canvas.coords(id) != []:
pic_obj.x, pic_obj.y = next(path_iter)
x0, y0 = canvas.coords(id)
oldx, oldy = x0 // 2, y0 // 2
dx, dy = pic_obj.x - oldx, pic_obj.y - oldy
canvas.move(id, dx, dy)
canvas.after(DELAY, update_position, canvas, id, pic_obj, path_iter)
def accel():
"""Accelerate images for shuffling effect"""
for j in range(len(pictures)):
path_iter = circle(x_start[j], y_start[j],
orbital_radius[j], CP_INCR[j]*5)
next(path_iter)
wdw.after(DELAY, update_position, canvas, img[j], img_obj[j], path_iter)
def reveal():
"""Reveal and display the random presenter"""
ran = random.randrange(0,len(pictures))
canvas.create_text(WD/2, HT/6, text='THE NEXT PRESENTER IS', fill=text_color,
font=('Arial','35'), justify='c')
canvas.create_text(WD/2, HT/1.25, text='%s' % presenters[ran].replace(
"_", " "), fill=text_color,
font=('Arial','30', 'bold'), justify='c')
tmp = Image.open(path+'/Photos/'+pictures[ran]).resize((250, 300),
Image.ANTIALIAS)
tmp1 = ImageTk.PhotoImage(tmp)
canvas.create_image(WD/2, HT/2, image=tmp1)
wdw.mainloop()
def ctd():
"""Display Countdown"""
ctd.txt = canvas.create_text(WD/2, HT/2, text=TIME, fill=ctd_color,
font=('Arial','150', 'bold'))
def tick():
"""Countdown function"""
global TIME
TIME -= 1
canvas.itemconfigure(ctd.txt, text=TIME)
if TIME != 0:
canvas.after(1000, tick)
def delete():
"""Delete shuffling images"""
for i in range(len(pictures)):
canvas.delete(img[i])
def delay():
"""Delays of displayed objects on canvas"""
wdw.after(DELAY, accel)
wdw.after(CTD_START*1000, reveal)
wdw.after(1, ctd)
wdw.after(1, tick)
wdw.after(CTD_START*1000, delete)
b1.config(state='disabled')
path = os.path.dirname(os.path.abspath(__file__))
PRESENTER_NUM = len(glob.glob(path+'/Photos/*.jpg'))
presenters = ['Presenter_'+str(num) for num in range(1, PRESENTER_NUM+1)]
pictures = [presenters[num]+'.jpg' for num in range(len(presenters))]
TIME = CTD_START+1
DELAY = 12
HT, WD = 700, 800
CP_INCR = [round(random.uniform(0.8,1.6),4) for i in range(len(pictures))]
sin = lambda degs: math.sin(math.radians(degs))
cos = lambda degs: math.cos(math.radians(degs))
wdw = tk.Tk()
wdw.title('Next Presenter')
x_start, y_start = [], []
for i in range(len(pictures)):
x_start.append(WD/4+round(random.uniform(-4, 4),2))
y_start.append(HT/4+round(random.uniform(-4, 4),2))
canvas = tk.Canvas(wdw, bg=canvas_color, height=HT, width=WD)
canvas.pack()
logo_tmp = ImageTk.PhotoImage(Image.open('logo.png').resize((150, 95),
Image.ANTIALIAS))
canvas.create_image(WD-5, HT-2, image=logo_tmp, anchor='se')
canvas.create_image(5, HT-5, image=logo_tmp, anchor='sw')
canvas.create_text(WD/2, HT/2, text = company_name,
fill=text_color, font=('Arial','25', 'bold'), justify='c')
img, img_obj, tmp = [], [], []
orbital_radius = []
for x in range(len(pictures)):
orbital_radius.append(round(random.uniform(108,122),2))
for i in range(len(pictures)):
img_obj.append(Position(WD/4, HT/4, 50))
tmp.append(ImageTk.PhotoImage(Image.open(path+'/Photos/'+
pictures[i]).resize((160, 200),
Image.ANTIALIAS)))
img.append(canvas.create_image(100, 200, image=tmp[i]))
path_iter = circle(x_start[i], y_start[i], orbital_radius[i], CP_INCR[i])
next(path_iter)
wdw.after(DELAY, update_position, canvas, img[i], img_obj[i], path_iter)
b1 = tk.Button(wdw, text='Start', width=10, height=3, command=delay,
anchor='c', activeforeground='blue', font=('Arial','20'))
b1.pack(side='left', fill='both', expand=True, padx='20', pady='20')
ext = tk.Button(wdw, text='Close', width=10, height=3, command=wdw.destroy,
anchor='c', activeforeground='blue', font=('Arial','20'))
ext.pack(side='left', fill='both', expand=True, padx='20', pady='20')
wdw.mainloop() |
import sys
import pandas as pd
from relational_querier import RelationalQuerier
# Loads dadtaSUS info into a single .csv
def loadcsv():
srag_2013 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/4919f202-083a-4fac-858d-99fdf1f1d765/download/influd13_limpo_final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2014 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/2182aff1-4e8b-4aee-84fc-8c9f66378a2b/download/influd14_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2015 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/97cabeb6-f09e-47a5-8358-4036fb10b535/download/influd15_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2016 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/dbb0fd9b-1345-47a5-86db-d3d2f4868a11/download/influd16_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2017 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/aab28b3c-f6b8-467f-af0b-44889a062ac6/download/influd17_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2018 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/a7b19adf-c6e6-4349-a309-7a1ec0f016a4/download/influd18_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_201314 = srag_2013.merge(srag_2014, how='outer')
srag_20131415 = srag_201314.merge(srag_2015, how='outer')
srag_2013141516 = srag_20131415.merge(srag_2016, how='outer')
srag_201314151617 = srag_2013141516.merge(srag_2017, how='outer')
srag_20131415161718 = srag_201314151617.merge(srag_2018, how='outer')
return srag_20131415161718
# Generates a .csv and saves it for quicker reruns
def gencsv():
srag_full = loadcsv()
srag_full.to_csv("srag_full.csv", index=True)
print("srag_full.csv has been successfully generated")
def add_data_relational(db, df = None, csv = None):
if df is None and csv is not None:
df = pd.read_csv(csv)
i = 0
for data in df.values:
i+=1
query = """
INSERT INTO SRAG
(ID_MUNICIP ,SEM_NOT ,SG_UF_NOT ,DT_SIN_PRI ,DT_NASC ,NU_IDADE_N ,CS_SEXO ,CS_GESTANT ,
CS_RACA ,CS_ESCOL_N ,SG_UF ,ID_MN_RESI ,ID_OCUPA_N ,VACINA ,FEBRE ,TOSSE ,CALAFRIO ,DISPNEIA ,
GARGANTA ,ARTRALGIA ,MIALGIA ,CONJUNTIV ,CORIZA ,DIARREIA ,OUTRO_SIN ,OUTRO_DES ,CARDIOPATI ,
PNEUMOPATI ,RENAL ,HEMOGLOBI ,IMUNODEPRE ,TABAGISMO ,METABOLICA ,OUT_MORBI ,MORB_DESC ,HOSPITAL ,
DT_INTERNA ,CO_UF_INTE ,CO_MU_INTE ,DT_PCR ,PCR_AMOSTR ,PCR_OUT ,PCR_RES ,PCR_ETIOL ,PCR_TIPO_H ,
PCR_TIPO_N ,DT_CULTURA ,CULT_AMOST ,CULT_OUT ,CULT_RES ,DT_HEMAGLU ,HEMA_RES ,HEMA_ETIOL ,HEM_TIPO_H ,
HEM_TIPO_N ,DT_RAIOX ,RAIOX_RES ,RAIOX_OUT ,CLASSI_FIN ,CLASSI_OUT ,CRITERIO ,TPAUTOCTO ,DOENCA_TRA ,
EVOLUCAO ,DT_OBITO ,DT_ENCERRA ,DT_DIGITA ,SRAG2013FINAL ,OBES_IMC ,OUT_AMOST ,DS_OAGEETI ,DS_OUTMET ,
DS_OUTSUB ,OUT_ANTIV ,DT_COLETA ,DT_ENTUTI ,DT_ANTIVIR ,DT_IFI ,DT_OUTMET ,DT_PCR_1 ,DT_SAIDUTI ,
RES_ADNO ,AMOSTRA ,HEPATICA ,NEUROLOGIC ,OBESIDADE ,PUERPERA ,SIND_DOWN ,RES_FLUA ,RES_FLUB ,UTI ,
IFI ,PCR ,RES_OUTRO ,OUT_METODO ,RES_PARA1 ,RES_PARA2 ,RES_PARA3 ,DESC_RESP ,SATURACAO ,ST_TIPOFI ,
TIPO_PCR ,ANTIVIRAL ,SUPORT_VEN ,RES_VSR ,RES_FLUASU ,DT_UT_DOSE)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);"""
result = db.query(query, data)
print(i)
db.commit()
db = RelationalQuerier()
def main():
if("-gencsv" in sys.argv):
gencsv()
try:
srag_full = pd.read_csv("srag_full.csv")
except FileNotFoundError:
srag_full = loadcsv()
IBGE = pd.read_csv("IBGE_Municipios.csv")
# Uses a dict for optimized city code to city name conversion
municipdict = {}
pd.options.mode.chained_assignment = None
for i in range (len(IBGE['Código Município Completo'])):
IBGE['Código Município Completo'][i] = str(IBGE['Código Município Completo'][i])[0:6]
municipdict[IBGE['Código Município Completo'][i]] = IBGE['Nome_Município'][i]
count = 0
for i in range(len(srag_full['ID_MUNICIP'])):
try:
srag_full['ID_MUNICIP'][i] = municipdict[int(srag_full['ID_MUNICIP'][i])]
except KeyError: # If the city code cant be find deletes the line containing it
print("Erro: Chave " + srag_full['ID_MUNICIP'][i] + " na linha " + str(i) + " nao encontrada, linha sera removida dos dados")
srag_full.drop(i, inplace = True)
count = count + 1
print(str(count) + " linhas foram removidas da tabela pois continham cidades invalidas")
# Resets index column and removes redundant columns
srag_full.reset_index(inplace = True)
srag_full.drop(srag_full.columns[[0, 1]], axis = 1, inplace = True)
srag_full.drop(['NU_ANO', 'SRAG2014FINAL', 'SRAG2015FINAL', 'SRAG2012FINAL', 'SRAG2017FINAL', 'SRAG2018FINAL'], axis = 1, inplace = True)
srag_full.to_csv("srag_full_cities.csv")
return srag_full
if __name__ == '__main__':
try:
srag_full = pd.read_csv("srag_full_cities.csv")
except FileNotFoundError:
main()
print('adicionando dados')
add_data_relational(db, df = srag_full)
|
<reponame>shenyunhang/CSC<filename>tools/ssd/generate_noise_gt.py<gh_stars>10-100
import argparse
import os
import shutil
import subprocess
import sys
import _init_paths
from caffe.proto import caffe_pb2
from google.protobuf import text_format
from xml.etree.ElementTree import parse, Element
import cv2
import numpy as np
import math
from easydict import EasyDict as edict
import utils.im_transforms
def random_sample(height, width):
sampler = edict()
sampler.max_scale = 1.0
sampler.min_scale = 0.1
sampler.max_aspect_ratio = 10.0
sampler.min_aspect_ratio = 0.1
box = utils.im_transforms.SampleBBox(sampler, [height, width])
return box[0] + 1, box[1] + 1, box[2] + 1, box[3] + 1
def random_sift(xmin, ymin, xmax, ymax, height, width):
h = 1.0 * (ymax - ymin + 1)
w = 1.0 * (xmax - xmin + 1)
yctr = ymin + h / 2.0
xctr = xmin + w / 2.0
if math.ceil(w / 2.0) < width - math.ceil(w / 2.0):
xctr = np.random.randint(
math.ceil(w / 2.0), width - math.ceil(w / 2.0))
if math.ceil(h / 2.0) < height - math.ceil(h / 2.0):
yctr = np.random.randint(
math.ceil(h / 2.0), height - math.ceil(h / 2.0))
print xctr, yctr
xmin = xctr - w / 2.0
ymin = yctr - h / 2.0
xmax = xctr + w / 2.0
ymax = yctr + h / 2.0
xmin = int(xmin)
ymin = int(ymin)
xmax = int(xmax)
ymax = int(ymax)
xmin = max(1, xmin)
ymin = max(1, ymin)
xmax = min(width, xmax)
ymax = min(height, ymax)
assert xmin > 0
assert ymin > 0
assert ymin < ymax
assert xmin < xmax
assert ymax <= height
assert xmax <= width
return xmin, ymin, xmax, ymax
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create AnnotatedDatum database")
parser.add_argument(
"root",
help="The root directory which contains the images and annotations.")
parser.add_argument(
"listfile",
help="The file which contains image paths and annotation info.")
parser.add_argument(
"outdir", help="The output directory which stores the database file.")
args = parser.parse_args()
root_dir = args.root
list_file = args.listfile
out_dir = args.outdir
# check if root directory exists
if not os.path.exists(root_dir):
print "root directory: {} does not exist".format(root_dir)
sys.exit()
# add "/" to root directory if needed
if root_dir[-1] != "/":
root_dir += "/"
# check if list file exists
if not os.path.exists(list_file):
print "list file: {} does not exist".format(list_file)
sys.exit()
# check list file format is correct
with open(list_file, "r") as lf:
for line in lf.readlines():
img_file, anno = line.strip("\n").split(" ")
if not os.path.exists(root_dir + img_file):
print "image file: {} does not exist".format(
root_dir + img_file)
sys.exit()
if not os.path.exists(root_dir + anno):
print "annofation file: {} does not exist".format(
root_dir + anno)
sys.exit()
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# make some noise
noise_prob = 0.5
num = 0
with open(list_file, "r") as lf:
for line in lf.readlines():
img_file, anno = line.strip("\n").split(" ")
print num, line
img = cv2.imread(os.path.join(root_dir, img_file))
img_height, img_width, _ = img.shape
doc = parse(os.path.join(root_dir, anno))
root = doc.getroot()
height = int(root.find('size').find('height').text)
width = int(root.find('size').find('width').text)
print height, width
assert height == img_height, 'height mismatch {} vs {}: {}'.format(
height, img_height, line)
assert width == img_width, 'width mismatch {} vs {}: {}'.format(
width, img_width, line)
for v1 in root:
if v1.tag != 'object':
continue
print v1.tag, v1.attrib
for v2 in v1:
if v2.tag != 'bndbox':
continue
if np.random.random() > noise_prob:
continue
print v2.tag, v2.attrib
xmin = int(v2.find('xmin').text)
ymin = int(v2.find('ymin').text)
xmax = int(v2.find('xmax').text)
ymax = int(v2.find('ymax').text)
print xmin, ymin, xmax, ymax
# xmin, ymin, xmax, ymax = random_sift(
# xmin, ymin, xmax, ymax, height, width)
xmin, ymin, xmax, ymax = random_sample(height, width)
v2.find('xmin').text = str(xmin)
v2.find('ymin').text = str(ymin)
v2.find('xmax').text = str(xmax)
v2.find('ymax').text = str(ymax)
print xmin, ymin, xmax, ymax
_, anno_name = os.path.split(anno)
save_path = os.path.join(out_dir, anno_name)
print save_path
doc.write(save_path)
num = num + 1
head, list_name = os.path.split(list_file)
list_name_noise = 'noise_{}'.format(list_name)
list_file_noise = os.path.join(head, list_name_noise)
ori_dir = os.path.abspath(root_dir)
with open(list_file, "r") as lf, open(list_file_noise, 'w') as lfn:
for line in lf.readlines():
img_file, anno = line.strip("\n").split(" ")
_, anno_name = os.path.split(anno)
save_path = os.path.join(out_dir, anno_name)
abs_path = os.path.abspath(save_path)
rel_path = os.path.relpath(abs_path, ori_dir)
lfn.write('{} {}\n'.format(img_file, rel_path))
|
#!/usr/bin/env python
#
# Copyright 2005,2007,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, audio, uhd
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import slider, powermate
from gnuradio.wxgui import stdgui2, fftsink2, form
from optparse import OptionParser
import sys
import math
import wx
#////////////////////////////////////////////////////////////////////////
# Control Stuff
#////////////////////////////////////////////////////////////////////////
class my_top_block (stdgui2.std_top_block):
def __init__(self,frame,panel,vbox,argv):
stdgui2.std_top_block.__init__ (self,frame,panel,vbox,argv)
parser=OptionParser(option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-f", "--freq", type="eng_float", default=146.585e6,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-V", "--volume", type="eng_float", default=None,
help="set volume (default is midpoint)")
parser.add_option("-O", "--audio-output", type="string", default="default",
help="pcm device name. E.g., hw:0,0 or surround51 or /dev/dsp")
parser.add_option("-N", "--no-gui", action="store_true", default=False)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
if options.freq < 1e6:
options.freq *= 1e6
self.frame = frame
self.panel = panel
self.state = "FREQ"
self.freq = 0
self.freq_step = 25e3
self.rxpath = receive_path(options.args, options.spec, options.antenna,
options.gain, options.audio_output)
self.connect(self.rxpath)
self._build_gui(vbox, options.no_gui)
# set initial values
if options.volume is not None:
self.set_volume(options.volume)
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
self.set_gain(self.rxpath.gain) # update gui
self.set_volume(self.rxpath.volume) # update gui
self.set_squelch(self.rxpath.threshold()) # update gui
def _set_status_msg(self, msg, which=0):
self.frame.GetStatusBar().SetStatusText(msg, which)
def _build_gui(self, vbox, no_gui):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
self.src_fft = None
if 0 and not(no_gui):
self.src_fft = fftsink2.fft_sink_c(self.panel,
title="Data from USRP",
fft_size=512,
sample_rate=self.rxpath.if_rate,
ref_scale=32768.0,
ref_level=0,
y_per_div=10,
y_divs=12)
self.connect (self.rxpath.u, self.src_fft)
vbox.Add (self.src_fft.win, 4, wx.EXPAND)
if 1 and not(no_gui):
rx_fft = fftsink2.fft_sink_c(self.panel,
title="Post s/w Resampling",
fft_size=512,
sample_rate=self.rxpath.quad_rate,
ref_level=80,
y_per_div=20)
self.connect (self.rxpath.resamp, rx_fft)
vbox.Add (rx_fft.win, 4, wx.EXPAND)
if 1 and not(no_gui):
post_deemph_fft = fftsink2.fft_sink_f(self.panel,
title="Post Deemph",
fft_size=512,
sample_rate=self.rxpath.audio_rate,
y_per_div=10,
ref_level=-40)
self.connect (self.rxpath.fmrx.deemph, post_deemph_fft)
vbox.Add (post_deemph_fft.win, 4, wx.EXPAND)
if 0:
post_filt_fft = fftsink2.fft_sink_f(self.panel,
title="Post Filter",
fft_size=512,
sample_rate=audio_rate,
y_per_div=10,
ref_level=-40)
self.connect (self.guts.audio_filter, post_filt)
vbox.Add (fft_win4, 4, wx.EXPAND)
# control area form at bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['freq'] = form.float_field(
parent=self.panel, sizer=hbox, label="Freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq,
self._set_status_msg))
#hbox.Add((5,0), 0)
#myform['freq_slider'] = \
# form.quantized_slider_field(parent=self.panel, sizer=hbox, weight=3,
# range=(87.9e6, 108.1e6, 0.1e6),
# callback=self.set_freq)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['volume'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Volume",
weight=3, range=self.volume_range(),
callback=self.set_volume)
hbox.Add((5,0), 0)
myform['squelch'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Squelch",
weight=3, range=self.rxpath.squelch_range(),
callback=self.set_squelch)
g = self.rxpath.u.get_gain_range()
hbox.Add((5,0), 0)
myform['gain'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Gain",
weight=3, range=(g.start(), g.stop(), g.step()),
callback=self.set_gain)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
try:
self.knob = powermate.powermate(self.frame)
self.rot = 0
powermate.EVT_POWERMATE_ROTATE (self.frame, self.on_rotate)
powermate.EVT_POWERMATE_BUTTON (self.frame, self.on_button)
except:
print "FYI: No Powermate or Contour Knob found"
def on_rotate (self, event):
self.rot += event.delta
if (self.state == "FREQ"):
if self.rot >= 3:
self.set_freq(self.freq + self.freq_step)
self.rot -= 3
elif self.rot <=-3:
self.set_freq(self.freq - self.freq_step)
self.rot += 3
else:
step = self.volume_range()[2]
if self.rot >= 3:
self.set_volume(self.rxpath.volume + step)
self.rot -= 3
elif self.rot <=-3:
self.set_volume(self.rxpath.volume - step)
self.rot += 3
def on_button (self, event):
if event.value == 0: # button up
return
self.rot = 0
if self.state == "FREQ":
self.state = "VOL"
else:
self.state = "FREQ"
self.update_status_bar ()
def set_squelch(self, threshold_in_db):
self.rxpath.set_squelch(threshold_in_db)
self.myform['squelch'].set_value(self.rxpath.threshold())
def set_volume (self, vol):
self.rxpath.set_volume(vol)
self.myform['volume'].set_value(self.rxpath.volume)
self.update_status_bar ()
def set_freq(self, target_freq):
r = self.rxpath.set_freq(target_freq)
if r:
self.freq = target_freq
self.myform['freq'].set_value(target_freq) # update displayed value
#self.myform['freq_slider'].set_value(target_freq) # update displayed value
self.update_status_bar()
self._set_status_msg("OK", 0)
return True
self._set_status_msg("Failed", 0)
return False
def set_gain(self, gain):
self.myform['gain'].set_value(gain) # update displayed value
self.rxpath.set_gain(gain)
def update_status_bar (self):
msg = "Volume:%r Setting:%s" % (self.rxpath.volume, self.state)
self._set_status_msg(msg, 1)
if self.src_fft:
self.src_fft.set_baseband_freq(self.freq)
def volume_range(self):
return (-20.0, 0.0, 0.5)
#////////////////////////////////////////////////////////////////////////
# Receive Path
#////////////////////////////////////////////////////////////////////////
USE_SIMPLE_SQUELCH = False
class receive_path(gr.hier_block2):
def __init__(self, args, spec, antenna, gain, audio_output):
gr.hier_block2.__init__(self, "receive_path",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
self.u = uhd.usrp_source(device_addr=args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(spec):
self.u.set_subdev_spec(spec, 0)
# Set the antenna
if(antenna):
self.u.set_antenna(antenna, 0)
self.if_rate = 256e3
self.quad_rate = 64e3
self.audio_rate = 32e3
self.u.set_samp_rate(self.if_rate)
dev_rate = self.u.get_samp_rate()
# Create filter to get actual channel we want
nfilts = 32
chan_coeffs = filter.firdes.low_pass(nfilts, # gain
nfilts*dev_rate, # sampling rate
8e3, # low pass cutoff freq
2e3, # width of trans. band
filter.firdes.WIN_HANN) # filter type
rrate = self.quad_rate / dev_rate
self.resamp = filter.pfb.arb_resampler_ccf(rrate, chan_coeffs, nfilts)
if USE_SIMPLE_SQUELCH:
self.squelch = analog.simple_squelch_cc(20)
else:
self.squelch = analog.standard_squelch(self.audio_rate)
# instantiate the guts of the single channel receiver
self.fmrx = analog.nbfm_rx(self.audio_rate, self.quad_rate)
# audio gain / mute block
self._audio_gain = blocks.multiply_const_ff(1.0)
# sound card as final sink
audio_sink = audio.sink (int(self.audio_rate), audio_output)
# now wire it all together
if USE_SIMPLE_SQUELCH:
self.connect (self.u, self.resamp, self.squelch, self.fmrx,
self._audio_gain, audio_sink)
else:
self.connect (self.u, self.resamp, self.fmrx, self.squelch,
self._audio_gain, audio_sink)
if gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
gain = float(g.start()+g.stop())/2
self.set_gain(gain)
v = self.volume_range()
self.set_volume((v[0]+v[1])/2)
s = self.squelch_range()
self.set_squelch((s[0]+s[1])/2)
def volume_range(self):
return (-20.0, 0.0, 0.5)
def set_volume (self, vol):
g = self.volume_range()
self.volume = max(g[0], min(g[1], vol))
self._update_audio_gain()
def _update_audio_gain(self):
self._audio_gain.set_k(10**(self.volume/10))
def squelch_range(self):
r = self.squelch.squelch_range()
#print "squelch_range: ", r
return r
def set_squelch(self, threshold):
#print "SQL =", threshold
self.squelch.set_threshold(threshold)
def threshold(self):
t = self.squelch.threshold()
#print "t =", t
return t
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
Args:
target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
return True
return False
def set_gain(self, gain):
self.gain = gain
self.u.set_gain(gain)
# ////////////////////////////////////////////////////////////////////////
# Main
# ////////////////////////////////////////////////////////////////////////
if __name__ == '__main__':
app = stdgui2.stdapp (my_top_block, "USRP NBFM RX")
app.MainLoop ()
|
"""Miscellaneous morphology functions."""
import numpy as np
import functools
from scipy import ndimage as ndi
from .._shared.utils import warn
from .selem import _default_selem
# Our function names don't exactly correspond to ndimages.
# This dictionary translates from our names to scipy's.
funcs = ('erosion', 'dilation', 'opening', 'closing')
skimage2ndimage = dict((x, 'grey_' + x) for x in funcs)
# These function names are the same in ndimage.
funcs = ('binary_erosion', 'binary_dilation', 'binary_opening',
'binary_closing', 'black_tophat', 'white_tophat')
skimage2ndimage.update(dict((x, x) for x in funcs))
def default_selem(func):
"""Decorator to add a default structuring element to morphology functions.
Parameters
----------
func : function
A morphology function such as erosion, dilation, opening, closing,
white_tophat, or black_tophat.
Returns
-------
func_out : function
The function, using a default structuring element of same dimension
as the input image with connectivity 1.
"""
@functools.wraps(func)
def func_out(image, selem=None, *args, **kwargs):
if selem is None:
selem = _default_selem(image.ndim)
return func(image, selem=selem, *args, **kwargs)
return func_out
def _check_dtype_supported(ar):
# Should use `issubdtype` for bool below, but there's a bug in numpy 1.7
if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):
raise TypeError("Only bool or integer image types are supported. "
"Got %s." % ar.dtype)
def remove_small_objects(ar, min_size=64, connectivity=1, in_place=False):
"""Remove objects smaller than the specified size.
Expects ar to be an array with labeled objects, and removes objects
smaller than min_size. If `ar` is bool, the image is first labeled.
This leads to potentially different behavior for bool and 0-and-1
arrays.
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the objects of interest. If the array type is
int, the ints must be non-negative.
min_size : int, optional (default: 64)
The smallest allowable object size.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel. Used during
labelling if `ar` is bool.
in_place : bool, optional (default: False)
If ``True``, remove the objects in the input array itself.
Otherwise, make a copy.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small connected components removed.
Examples
--------
>>> from skimage import morphology
>>> a = np.array([[0, 0, 0, 1, 0],
... [1, 1, 1, 0, 0],
... [1, 1, 1, 0, 1]], bool)
>>> b = morphology.remove_small_objects(a, 6)
>>> b
array([[False, False, False, False, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> c = morphology.remove_small_objects(a, 7, connectivity=2)
>>> c
array([[False, False, False, True, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> d = morphology.remove_small_objects(a, 6, in_place=True)
>>> d is a
True
"""
# Raising type error if not int or bool
_check_dtype_supported(ar)
if in_place:
out = ar
else:
out = ar.copy()
if min_size == 0: # shortcut for efficiency
return out
if out.dtype == bool:
selem = ndi.generate_binary_structure(ar.ndim, connectivity)
ccs = np.zeros_like(ar, dtype=np.int32)
ndi.label(ar, selem, output=ccs)
else:
ccs = out
try:
component_sizes = np.bincount(ccs.ravel())
except ValueError:
raise ValueError("Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
if len(component_sizes) == 2 and out.dtype != bool:
warn("Only one label was provided to `remove_small_objects`. "
"Did you mean to use a boolean array?")
too_small = component_sizes < min_size
too_small_mask = too_small[ccs]
out[too_small_mask] = 0
return out
def remove_small_holes(ar, area_threshold=64, connectivity=1, in_place=False,
min_size=None):
"""Remove continguous holes smaller than the specified size.
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the connected components of interest.
area_threshold : int, optional (default: 64)
The maximum area, in pixels, of a contiguous hole that will be filled.
Replaces `min_size`.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel.
in_place : bool, optional (default: False)
If `True`, remove the connected components in the input array itself.
Otherwise, make a copy.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small holes within connected components removed.
Examples
--------
>>> from skimage import morphology
>>> a = np.array([[1, 1, 1, 1, 1, 0],
... [1, 1, 1, 0, 1, 0],
... [1, 0, 0, 1, 1, 0],
... [1, 1, 1, 1, 1, 0]], bool)
>>> b = morphology.remove_small_holes(a, 2)
>>> b
array([[ True, True, True, True, True, False],
[ True, True, True, True, True, False],
[ True, False, False, True, True, False],
[ True, True, True, True, True, False]], dtype=bool)
>>> c = morphology.remove_small_holes(a, 2, connectivity=2)
>>> c
array([[ True, True, True, True, True, False],
[ True, True, True, False, True, False],
[ True, False, False, True, True, False],
[ True, True, True, True, True, False]], dtype=bool)
>>> d = morphology.remove_small_holes(a, 2, in_place=True)
>>> d is a
True
Notes
-----
If the array type is int, it is assumed that it contains already-labeled
objects. The labels are not kept in the output image (this function always
outputs a bool image). It is suggested that labeling is completed after
using this function.
"""
_check_dtype_supported(ar)
# Creates warning if image is an integer image
if ar.dtype != bool:
warn("Any labeled images will be returned as a boolean array. "
"Did you mean to use a boolean array?", UserWarning)
if min_size is not None:
warn("the min_size argument is deprecated and will be removed in " +
"0.16. Use area_threshold instead.")
area_threshold = min_size
if in_place:
out = ar
else:
out = ar.copy()
# Creating the inverse of ar
if in_place:
out = np.logical_not(out, out)
else:
out = np.logical_not(out)
# removing small objects from the inverse of ar
out = remove_small_objects(out, area_threshold, connectivity, in_place)
if in_place:
out = np.logical_not(out, out)
else:
out = np.logical_not(out)
return out
|
####################################
# Driftwood 2D Game Dev. Suite #
# areamanager.py #
# Copyright 2014-2017 #
# <NAME> & <NAME> #
####################################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
import math
from sdl2 import *
import tilemap
def int_greater_than_or_equal_to(x: float) -> int:
return int(math.ceil(x))
def int_smaller_than(x: float) -> int:
return int(math.floor(x - 0.001))
class AreaManager:
"""The Area Manager
This class manages the currently focused area.
Attributes:
driftwood: Base class instance.
filename: Filename of the current area.
tilemap: Tilemap instance for the area's tilemap.
changed: Whether the area should be rebuilt. This is true if the area changed since last checked.
offset: Offset at which to draw the area inside the viewport.
refocused: Whether we have gone to a new area since last checked.
"""
def __init__(self, driftwood):
"""AreaManager class initializer.
Args:
driftwood: Base class instance.
"""
self.driftwood = driftwood
self.filename = ""
self.tilemap = None
self.changed = False
self.offset = [0, 0]
self.refocused = False
self._autospawns = []
self.driftwood.tick.register(self._tick)
def register(self) -> None:
"""Register our tick callback."""
self.driftwood.tick.register(self._tick)
def focus(self, filename: str) -> bool:
"""Load and make active a new area.
Args:
filename: Filename of the area's Tiled map file.
Returns:
True if succeeded, False if failed.
"""
# Input Check
try:
CHECK(filename, str)
except CheckFailure as e:
self.driftwood.log.msg("ERROR", "Area", "focus", "bad argument", e)
return False
# Ask the resource manager for the JSON map file.
map_json = self.driftwood.resource.request_json(filename)
if map_json: # Did we successfully retrieve the map?
self.tilemap = tilemap.Tilemap(self.driftwood, self)
self.filename = filename # Set out current filename.
if not self.tilemap._read(filename, map_json): # Read the tilemap.
self.driftwood.log.msg("ERROR", "Area", "focus", "could not load tilemap", filename)
self.tilemap = None
return False
self.driftwood.log.info("Area", "loaded", filename)
# We have moved areas.
self.refocused = True
# Call world's global on_focus handlers.
self.driftwood.script._call_global_triggers("on_focus")
# If there is an on_focus function defined for this map, call it.
if "on_focus" in self.tilemap.properties:
args = self.tilemap.properties["on_focus"].split(',')
if len(args) < 2:
self.driftwood.log.msg("ERROR", "Area", "Focus", "invalid on_focus event",
self.tilemap.properties["on_focus"])
return True
self.driftwood.script.call(*args)
# Are we autospawning any entities?
for ent in self._autospawns:
print(ent)
self.driftwood.entity.insert(*ent)
self._autospawns = []
return True
else:
self.driftwood.log.msg("ERROR", "Area", "focus", "could not load area", filename)
return False
def _blur(self) -> None:
"""Call the global on_blur functions, and call the map's on_blur, if it has one. This is called when we leave
an area.
"""
# Call the world's global on_blur handlers.
self.driftwood.script._call_global_triggers("on_blur")
if "on_blur" in self.tilemap.properties:
args = self.tilemap.properties["on_blur"].split(',')
if len(args) < 2:
self.driftwood.log.msg("ERROR", "Area", "blur", "invalid on_blur event",
self.tilemap.properties["on_blur"])
return
self.driftwood.script.call(*args)
self.tilemap = None
def _tick(self, seconds_past: float) -> None:
"""Tick callback.
"""
if self.changed: # TODO: Only redraw portions that have changed.
if self.refocused:
self.driftwood.frame.prepare(self.tilemap.width * self.tilemap.tilewidth,
self.tilemap.height * self.tilemap.tileheight)
self.refocused = False
else:
self.driftwood.frame.clear()
self.driftwood.frame.calc_rects()
self.__build_frame()
self.driftwood.frame.finish_frame()
self.changed = False
def __build_frame(self) -> None:
"""Build the frame and pass to WindowManager.
For every tile and entity in each layer, copy its graphic onto the frame, then give the frame to WindowManager
for display.
"""
tilemap = self.tilemap
tilewidth = tilemap.tilewidth
tileheight = tilemap.tileheight
offset = self.offset
# Find the tiles that will show up if we draw them.
x_begin, x_end, y_begin, y_end = self.calculate_visible_tile_bounds()
# Start with the bottom layer and work up.
for l in range(len(tilemap.layers)):
layer = tilemap.layers[l]
srcrect = [-1, -1, tilewidth, tileheight]
dstrect = [-1, -1, tilewidth, tileheight]
# Draw each tile in the layer into its position.
for y in range(y_begin, y_end + 1):
for x in range(x_begin, x_end + 1):
# Retrieve data about the tile.
tile = layer.tiles[y * tilemap.width + x]
tileset = tile.tileset
if not tileset and not tile.gid:
# This is a dummy tile, don't draw it.
continue
member = tile.members[tile._Tile__cur_member]
if member == -1:
# This tile is invisible at this point in its animation, don't draw it.
continue
# Get the source and destination rectangles needed by SDL_RenderCopy.
srcrect[0] = member % tileset.width * tilewidth
srcrect[1] = member // tileset.width * tileheight
dstrect[0] = x * tilewidth + offset[0]
dstrect[1] = y * tileheight + offset[1]
# Copy the tile onto our frame.
r = self.driftwood.frame.copy(tileset.texture, srcrect, dstrect)
if r < 0:
self.driftwood.log.msg("ERROR", "Area", "__build_frame", "SDL", SDL_GetError())
# Draw the lights onto the layer.
for light in self.driftwood.light.layer(l):
srcrect = 0, 0, light.lightmap.width, light.lightmap.height
dstrect = [light.x - light.w // 2, light.y - light.h // 2, light.w, light.h]
dstrect[0] += self.offset[0]
dstrect[1] += self.offset[1]
r = self.driftwood.frame.copy(light.lightmap.texture, srcrect, dstrect, alpha=light.alpha,
blendmode=light.blendmode, colormod=light.colormod)
if r < 0:
self.driftwood.log.msg("ERROR", "Area", "__build_frame", "SDL", SDL_GetError())
arearect = (
0,
0,
self.tilemap.width * self.tilemap.tilewidth,
self.tilemap.height * self.tilemap.tileheight,
)
tall_parts = []
# Draw each entity on the layer into its position.
for entity in self.driftwood.entity.layer(l):
tall_amount = entity.height - self.tilemap.tileheight
# Get the destination rectangle needed by SDL_RenderCopy.
dstrect = [entity.x, entity.y - tall_amount, entity.width, entity.height]
# Draw the layers of the entity.
for srcrect in entity.srcrect():
srcrect = list(srcrect)
# Clip entities so they don't appear outside the area.
clip_left = 0 if arearect[0] <= dstrect[0] else arearect[0] - dstrect[0]
clip_top = 0 if arearect[1] <= dstrect[1] else arearect[1] - dstrect[1]
clip_right = 0 if dstrect[0] + dstrect[2] <= arearect[2] \
else (dstrect[0] + dstrect[2]) - arearect[2]
clip_bot = 0 if dstrect[1] + dstrect[3] <= arearect[3] \
else (dstrect[1] + dstrect[3]) - arearect[3]
srcrect[0] += clip_left
dstrect[0] += clip_left
srcrect[1] += clip_top
dstrect[1] += clip_top
srcrect[2] -= clip_left + clip_right
dstrect[2] -= clip_left + clip_right
srcrect[3] -= clip_top + clip_bot
dstrect[3] -= clip_top + clip_bot
# Area rumble et al.
dstrect[0] += self.offset[0]
dstrect[1] += self.offset[1]
# Copy the entity onto our frame.
r = self.driftwood.frame.copy(entity.spritesheet.texture, srcrect, dstrect)
if r < 0:
self.driftwood.log.msg("ERROR", "Area", "__build_frame", "SDL", SDL_GetError())
if tall_amount: # It's taller than the tile. Figure out where to put the tall part.
dstrect = [entity.x, entity.y - tall_amount, entity.width,
entity.height - (entity.height - tall_amount)]
srcrect[3] = dstrect[3]
# Clip entities so they don't appear outside the area.
clip_left = 0 if arearect[0] <= dstrect[0] else arearect[0] - dstrect[0]
clip_top = 0 if arearect[1] <= dstrect[1] else arearect[1] - dstrect[1]
clip_right = 0 if dstrect[0] + dstrect[2] <= arearect[2] \
else (dstrect[0] + dstrect[2]) - arearect[2]
clip_bot = 0 if dstrect[1] + dstrect[3] <= arearect[3] \
else (dstrect[1] + dstrect[3]) - arearect[3]
srcrect[0] += clip_left
dstrect[0] += clip_left
srcrect[1] += clip_top
dstrect[1] += clip_top
srcrect[2] -= clip_left + clip_right
dstrect[2] -= clip_left + clip_right
srcrect[3] -= clip_top + clip_bot
dstrect[3] -= clip_top + clip_bot
# Area rumble et al.
dstrect[0] += self.offset[0]
dstrect[1] += self.offset[1]
tall_parts.append([entity.spritesheet.texture, srcrect, dstrect])
# Draw the tall bits here.
for tall in tall_parts:
r = self.driftwood.frame.copy(*tall)
if r < 0:
self.driftwood.log.msg("ERROR", "Area", "__build_frame", "SDL", SDL_GetError())
def calculate_visible_tile_bounds(self) -> [int]:
tilemap = self.tilemap
tilewidth = tilemap.tilewidth
tileheight = tilemap.tileheight
offset = self.offset
viewport_width, viewport_height = self.driftwood.window.resolution()
viewport_left_bound = -self.driftwood.frame._frame[2].x
viewport_top_bound = -self.driftwood.frame._frame[2].y
viewport_right_bound = viewport_left_bound + viewport_width
viewport_bottom_bound = viewport_top_bound + viewport_height
# A tile will show up onscreen when it intersects the viewport rectangle. We can express the state of this
# intersection with a set of four inequalities, all of which must be true for the intersection to occur.
# --------------------------------------------------------------------------------------------------------
# viewport_left_bound <= tile_right_bound
# viewport_top_bound <= tile_bottom_bound
# tile_left_bound < viewport_right_bound
# tile_top_bound < viewport_bottom_bound
# The following equations hold. (Unit of measurement is pixels.)
# --------------------------------------------------------------
# tile_left_bound = tile_x_pos * tilewidth + offset[0]
# tile_top_bound = tile_y_pos * tileheight + offset[1]
# tile_right_bound = (tile_x_pos + 1) * tilewidth + offset[0] - 1
# tile_bottom_bound = (tile_y_pos + 1) * tileheight + offset[1] - 1
# Substitute the equations into the inequalities.
# -----------------------------------------------
# viewport_left_bound <= (tile_x_pos + 1) * tilewidth + offset[0] - 1
# viewport_top_bound <= (tile_y_pos + 1) * tileheight + offset[1] - 1
# tile_x_pos * tilewidth + offset[0] < viewport_right_bound
# tile_y_pos * tileheight + offset[1] < viewport_bottom_bound
# Solve for tile_x_pos and tile_y_pos.
# ------------------------------------
# (viewport_left_bound - offset[0] + 1) / tilewidth - 1 <= tile_x_pos
# (viewport_top_bound - offset[1] + 1) / tileheight - 1 <= tile_y_pos
# tile_x_pos < (viewport_right_bound - offset[0]) / tilewidth
# tile_y_pos < (viewport_bottom_bound - offset[1]) / tileheight
# We can now compute the minimum and maximum X and Y coordinates for visible tiles.
x_begin = int_greater_than_or_equal_to((viewport_left_bound - offset[0] + 1) / tilewidth - 1)
y_begin = int_greater_than_or_equal_to((viewport_top_bound - offset[1] + 1) / tileheight - 1)
x_end = int_smaller_than((viewport_right_bound - offset[0]) / tilewidth)
y_end = int_smaller_than((viewport_bottom_bound - offset[1]) / tileheight)
x_begin = max(0, x_begin)
y_begin = max(0, y_begin)
x_end = min(x_end, tilemap.width - 1)
y_end = min(y_end, tilemap.height - 1)
return x_begin, x_end, y_begin, y_end
|
<reponame>menta78/alphaBetaLab<filename>alphaBetaLab/abBathyDataGridder.py
import numpy as np
import matplotlib
#from matplotlib.mlab import griddata
from scipy.interpolate import griddata
import multiprocessing as mp
from warnings import warn
from shapely import geometry as g
import sys
from .abUtils import *
class abBathyDataGridder:
def __init__(self, xs, ys, zs, landPolygons = [], nParallelWorker = 4, verbose = True):
"""
abBathyDataGridder: class to manage fast gridding of sparse bathymetry data.
xs, ys, zs: 1d arrays of x, y and z
landPolygons: list of polygons corresponding to land areas.
NOTE these must be polygons, just coastline is not enough.
If you load coastline from basemap.Basemap.coastsegs,
it should be in the correct format.
If matloblib version <= 1.3 is used it is recommended
an installation of natgrid, which provides bindings
to the ncar interpolating procedures.
"""
self.doGrid = self.doGridParallel
self.x = np.array(xs)
self.y = np.array(ys)
self.z = np.array(zs)
# nxInterpSteps: x size of each patch
self.nxInterpSteps = 100
self.nxInterpExtraSteps = 20
# nyInterpSteps: y size of each patch
self.nyInterpSteps = 100
self.nyInterpExtraSteps = 20
self.setLandPolygons(landPolygons)
self.nParallelWorker = nParallelWorker
self.patchDataThresholdLength = 20
self.verbose = verbose
if (matplotlib.__version__ < '1.4'):
try:
from mpl_toolkits import natgrid
except:
warn('WE RECOMMEND INSTALLING natgrid: matplotlib version is <= 1.3 and natgrid is not installed')
def _print(self, msg):
if self.verbose:
print(msg)
def setLandPolygons(self, landPolygons):
self.landPolygons = [g.Polygon(p) for p in landPolygons]
def _reduceXYZ(self, minx, maxx, dx, miny, maxy, dy):
_x, _y, _z = self.x, self.y, self.z
cond = np.bitwise_and(\
np.bitwise_and(np.greater_equal(_x, minx - dx), np.less_equal(_x, maxx + dx)),\
np.bitwise_and(np.greater_equal(_y, miny - dy), np.less_equal(_y, maxy + dy))\
)
x = _x[cond]
y = _y[cond]
z = _z[cond]
self._print('reducing the number of points')
self._print(' mapping bathy points by gridded cell')
ptsmap = {}
xymap = {}
nx = int(np.floor((maxx - minx)/dx))
ny = int(np.floor((maxy - miny)/dy))
for xi, yi, zi in zip(x, y, z):
xy = (xi, yi)
if not (xy in xymap):
ix = int(np.floor((xi - minx) / dx))
iy = int(np.floor((yi - miny) / dy))
if ix < -2 or ix > nx+2 or iy < -2 or iy > ny+2:
continue
ptslst = ptsmap.get((ix, iy), [])
ptsmap[(ix, iy)] = ptslst
ptslst.append([xi, yi, zi])
xymap[(xi,yi)] = zi
self._print(' creating reduced points list')
self._print(' gathering reduced points')
redx, redy, redz = [], [], []
ptsiter = ptsmap.values()
for cellpts in ptsiter:
cellpts = np.array(cellpts)
clxs = cellpts[:,0]
xr = np.mean(clxs)
clys = cellpts[:,1]
yr = np.mean(clys)
clzs = cellpts[:,2]
zr = np.mean(clzs)
redx.append(xr)
redy.append(yr)
redz.append(zr)
minredx, maxredx = min(redx), max(redx)
while minx < minredx:
minx += dx
while maxx > maxredx:
maxx -= dx
if minx >= maxx:
raise abException\
('x coords of interpolating points are outside of the grid')
minredy, maxredy = min(redy), max(redy)
while miny < minredy:
miny += dy
while maxy > maxredy:
maxy -= dy
if miny >= maxy:
raise abException\
('y coords of interpolating points are outside of the grid')
grdx = np.arange(minx, maxx, dx)
grdy = np.arange(miny, maxy, dy)
self.redx, self.redy, self.redz = np.array(redx), np.array(redy), np.array(redz)
self.grdx, self.grdy = np.array(grdx), np.array(grdy)
self.minx, self.maxx = minx, maxx
self.miny, self.maxy = miny, maxy
self.dx, self.dy = dx, dy
def getNXNYInterpPatch(self):
nx = np.floor(float( len(self.grdx) ) / float(self.nxInterpSteps))
nx = max(nx, 1)
ny = np.floor(float( len(self.grdy) ) / float(self.nyInterpSteps))
ny = max(ny, 1)
return nx, ny
def getInterpPatch(self, ixstep, iystep):
redx, redy, redz = self.redx, self.redy, self.redz
grdx, grdy = self.grdx, self.grdy
dx, dy = self.dy, self.dy
nxExtra = self.nxInterpExtraSteps
nyExtra = self.nyInterpExtraSteps
igrdxStart = ixstep*self.nxInterpSteps
igrdxEnd = min( len(grdx), (ixstep + 1)*self.nxInterpSteps )
if len(grdx) - igrdxEnd < self.nxInterpSteps:
igrdxEnd = len(grdx) + 1
igrdyStart = iystep*self.nyInterpSteps
igrdyEnd = min( len(grdy), (iystep + 1)*self.nyInterpSteps )
if len(grdy) - igrdyEnd < self.nyInterpSteps:
igrdyEnd = len(grdy) + 1
pgrdx = grdx[igrdxStart:igrdxEnd]
pgrdy = grdy[igrdyStart:igrdyEnd]
minx, maxx = min(pgrdx) - dx*nxExtra, max(pgrdx) + dx*(nxExtra + 1)
miny, maxy = min(pgrdy) - dy*nyExtra, max(pgrdy) + dy*(nyExtra + 1)
condx = np.logical_and(redx >= minx, redx <= maxx)
condy = np.logical_and(redy >= miny, redy <= maxy)
cond = np.logical_and(condx, condy)
predx = redx[cond]
predy = redy[cond]
predz = redz[cond]
return predx, predy, predz, pgrdx, pgrdy
def _progress(self, percent):
if self.verbose:
sys.stdout.write('\r progress: {:2.1f} %'.format(percent))
sys.stdout.flush()
def doGridSerial(self, minx, maxx, dx, miny, maxy, dy):
self._print('')
self._print('')
self._print('reducing input data')
self._reduceXYZ(minx, maxx, dx, miny, maxy, dy)
self._print(' ... done')
self._print('')
grdx, grdy = self.grdx, self.grdy
grdz = np.zeros((len(grdy), len(grdx)))
npatchx, npatchy = self.getNXNYInterpPatch()
npatchx, npatchy = int(npatchx), int(npatchy)
ntot = npatchx*npatchy
patchGenerator = ((ipx, ipy) for ipx in range(npatchx) for ipy in range(npatchy))
self._print('interpolating and gethering the results ...')
global btDtGridder
btDtGridder = self
intpPatches = map(_intpOnePatch, patchGenerator)
for pgrdx, pgrdy, pgrdz, iix, iiy in intpPatches:
perc = float(iix*npatchy + iiy)/ntot*100
self._progress(perc)
indxx = np.where(np.in1d(grdx, pgrdx))[0]
indxy = np.where(np.in1d(grdy, pgrdy))[0]
ix, iy = np.meshgrid(indxx, indxy)
grdz[iy, ix] = pgrdz
self._print(' ... done')
self.doNanLandCells(grdx, grdy, grdz)
return grdx, grdy, grdz
def doGridParallel(self, minx, maxx, dx, miny, maxy, dy):
self._print('')
self._print('')
self._print('reducing input data')
self._reduceXYZ(minx, maxx, dx, miny, maxy, dy)
self._print(' ... done')
self._print('')
grdx, grdy = self.grdx, self.grdy
grdz = np.zeros((len(grdy), len(grdx)))
npatchx, npatchy = self.getNXNYInterpPatch()
npatchx, npatchy = int(npatchx), int(npatchy)
ntot = npatchx*npatchy
patchGenerator = ((ipx, ipy) for ipx in range(npatchx) for ipy in range(npatchy))
self._print('interpolating and gethering results ...')
global btDtGridder
btDtGridder = self
p = mp.Pool(self.nParallelWorker)
if self.nParallelWorker > 1:
intpPatches = p.imap(_intpOnePatch, patchGenerator)
else:
intpPatches = map(_intpOnePatch, patchGenerator)
for pgrdx, pgrdy, pgrdz, iix, iiy in intpPatches:
perc = float(iix*npatchy + iiy)/ntot*100
self._progress(perc)
indxx = np.where(np.in1d(grdx, pgrdx))[0]
indxy = np.where(np.in1d(grdy, pgrdy))[0]
ix, iy = np.meshgrid(indxx, indxy)
grdz[iy, ix] = pgrdz
p.close()
self._print(' ... done')
self.doNanLandCells(grdx, grdy, grdz)
return grdx, grdy, grdz
def _getLandPointIndexes(self, grdx, grdy, grdz):
global landPolygons, xcs, ycs, dx, dy
xcs, ycs = grdx, grdy
dx, dy = self.dx, self.dy
ixcs, iycs = range(len(xcs)), range(len(ycs))
ixcs, iycs = np.meshgrid(ixcs, iycs)
ixcs, iycs = ixcs.flatten(), iycs.flatten()
if len(self.landPolygons) == 0:
return [], []
landPolygons = self.landPolygons
p = mp.Pool(self.nParallelWorker)
grdzflatten = grdz.flatten()
if self.nParallelWorker > 1:
inLandIxIy = p.imap(_computePointInLand, zip(ixcs, iycs, grdzflatten))
else:
inLandIxIy = map(_computePointInLand, zip(ixcs, iycs, grdzflatten))
ntot = len(grdzflatten)
nx = grdz.shape[1]
landxindxs, landyindxs = [], []
for inLand, ix, iy in inLandIxIy:
ii = iy*nx + ix
if ii % 50 == 0:
perc = float(ii)/ntot*100.
self._progress(perc)
if inLand:
landxindxs.append(ix)
landyindxs.append(iy)
p.close()
return landxindxs, landyindxs
def doNanLandCells(self, grdx, grdy, grdz):
self._print('setting land points to nan')
self._print(' getting land point indexes ...')
landxindxs, landyindxs = self._getLandPointIndexes(grdx, grdy, grdz)
self._print(' ... done')
if len(landxindxs) > 0:
grdz[np.array(landyindxs), np.array(landxindxs)] = np.nan
pass
def _composeIntpErrorMessage(ix, iy, pgrdx, pgrdy):
minx, maxx = min(pgrdx), max(pgrdx)
miny, maxy = min(pgrdy), max(pgrdy)
msg = 'too few data to interpolate.'
msg += ' patch: ix, iy == ' + str(ix) + ', ' + str(iy) + '. '
msg += ' location: '
msg += ' minx, maxx == ' + str(minx) + ', ' + str(maxx) + '; '
msg += ' miny, maxy == ' + str(miny) + ', ' + str(maxy) + '. '
return msg
def _intpOnePatch(patch):
ix, iy = patch
predx, predy, predz, pgrdx, pgrdy = btDtGridder.getInterpPatch(*patch)
if len(predx) < 3:
msg = _composeIntpErrorMessage(ix, iy, pgrdx, pgrdy)
msg += 'This patch looks on land. Setting the whole patch to nan'
btDtGridder._print(msg)
pgrdz = np.ones((len(pgrdy), len(pgrdx)))*np.nan
return pgrdx, pgrdy, pgrdz, ix, iy
elif len(predx) < btDtGridder.patchDataThresholdLength:
msg = _composeIntpErrorMessage(ix, iy, pgrdx, pgrdy)
msg += 'Try with a bigger value of gridder.nxInterpSteps, gridder.nyInterpSteps'
raise abException(msg)
pts = np.array([predx, predy]).T
pgrdxMtx, pgrdyMtx = np.meshgrid(pgrdx, pgrdy)
pgrdzFlt = griddata((predx, predy), predz, (pgrdxMtx.flatten(), pgrdyMtx.flatten()))
pgrdz = pgrdzFlt.reshape(pgrdxMtx.shape)
return pgrdx, pgrdy, pgrdz, ix, iy
def _computePointInLand(pointCrds):
ix, iy, z = pointCrds
if np.isnan(z):
inLand = True
else:
x0, y0 = xcs[ix], ycs[iy]
x1, y1 = x0 + dx, y0 + dy
cell = g.Polygon([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
inLand = False
for lp in landPolygons:
#if lp.contains(cell) or lp.crosses(cell) or lp.equals(cell) or lp.within(cell):
if lp.intersects(cell):
inLand = True
break
return inLand, ix, iy
|
import pytest
from django.core import mail as djmail
from django.utils.timezone import now
from pretix.base.models import Event, Organizer, Team, User
@pytest.fixture
def organizer():
return Organizer.objects.create(name='Dummy', slug='dummy')
@pytest.fixture
def event(organizer):
event = Event.objects.create(
organizer=organizer, name='Dummy', slug='dummy',
date_from=now()
)
return event
@pytest.fixture
def admin_team(organizer):
return Team.objects.create(organizer=organizer, can_change_teams=True, name='Admin team')
@pytest.fixture
def admin_user(admin_team):
u = User.objects.create_user('<EMAIL>', 'dummy')
admin_team.members.add(u)
return u
@pytest.mark.django_db
def test_list_of_teams(event, admin_user, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.get('/control/organizer/dummy/teams')
assert 'Admin team' in resp.rendered_content
@pytest.mark.django_db
def test_team_detail_view(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.get('/control/organizer/dummy/team/{}/'.format(admin_team.pk))
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
@pytest.mark.django_db
def test_team_add_user(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
u = User.objects.create_user('<EMAIL>', 'dummy')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'user': u.email
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert u.email in resp.rendered_content
assert u in admin_team.members.all()
@pytest.mark.django_db
def test_team_create_invite(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
djmail.outbox = []
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'user': '<EMAIL>'
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert '<EMAIL>' in resp.rendered_content
assert admin_team.invites.first().email == '<EMAIL>'
assert len(djmail.outbox) == 1
@pytest.mark.django_db
def test_team_create_token(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
djmail.outbox = []
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'name': 'Test token'
}, follow=True)
assert 'Test token' in resp.rendered_content
assert admin_team.tokens.first().name == 'Test token'
assert admin_team.tokens.first().token in resp.rendered_content
@pytest.mark.django_db
def test_team_remove_token(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
tk = admin_team.tokens.create(name='Test token')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-token': str(tk.pk)
}, follow=True)
assert tk.token not in resp.rendered_content
assert 'Test token' in resp.rendered_content
tk.refresh_from_db()
assert not tk.active
@pytest.mark.django_db
def test_team_revoke_invite(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
inv = admin_team.invites.create(email='<EMAIL>')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-invite': str(inv.pk)
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert not admin_team.invites.exists()
@pytest.mark.django_db
def test_team_remove_user(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
u = User.objects.create_user('<EMAIL>', 'dummy')
admin_team.members.add(u)
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': u.pk
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert u not in admin_team.members.all()
@pytest.mark.django_db
def test_team_remove_last_admin(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' in resp.rendered_content
assert admin_user in admin_team.members.all()
t2 = Team.objects.create(organizer=event.organizer, name='Admin team 2')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' in resp.rendered_content
assert admin_user in admin_team.members.all()
t2.members.add(admin_user)
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' in resp.rendered_content
assert admin_user in admin_team.members.all()
t2.can_change_teams = True
t2.save()
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' not in resp.rendered_content
assert admin_user not in admin_team.members.all()
@pytest.mark.django_db
def test_create_team(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
client.post('/control/organizer/dummy/team/add', {
'name': 'Foo',
'can_create_events': 'on',
'limit_events': str(event.pk),
'can_change_event_settings': 'on'
}, follow=True)
t = Team.objects.last()
assert t.can_change_event_settings
assert t.can_create_events
assert not t.can_change_organizer_settings
assert list(t.limit_events.all()) == [event]
assert list(t.members.all()) == [admin_user]
@pytest.mark.django_db
def test_update_team(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
client.post('/control/organizer/dummy/team/{}/edit'.format(admin_team.pk), {
'name': 'Admin',
'can_change_teams': 'on',
'limit_events': str(event.pk),
'can_change_event_settings': 'on'
}, follow=True)
admin_team.refresh_from_db()
assert admin_team.can_change_event_settings
assert not admin_team.can_change_organizer_settings
assert list(admin_team.limit_events.all()) == [event]
@pytest.mark.django_db
def test_update_last_team_to_be_no_admin(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.post('/control/organizer/dummy/team/{}/edit'.format(admin_team.pk), {
'name': 'Admin',
'can_change_event_settings': 'on'
}, follow=True)
assert 'alert-danger' in resp.rendered_content
@pytest.mark.django_db
def test_remove_team(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
t2 = Team.objects.create(organizer=event.organizer, name='Admin team 2')
resp = client.post('/control/organizer/dummy/team/{}/delete'.format(t2.pk), {}, follow=True)
assert Team.objects.count() == 1
assert 'alert-success' in resp.rendered_content
@pytest.mark.django_db
def test_remove_last_admin_team(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.post('/control/organizer/dummy/team/{}/delete'.format(admin_team.pk), {}, follow=True)
assert Team.objects.count() == 1
assert 'alert-danger' in resp.rendered_content
@pytest.mark.django_db
def test_invite_invalid_token(event, admin_team, client):
i = admin_team.invites.create(email='<EMAIL>')
resp = client.get('/control/invite/foo{}bar'.format(i.token), follow=True)
assert b'alert-danger' in resp.content
assert b'invalid link' in resp.content
@pytest.mark.django_db
def test_invite_existing_team_member(event, admin_team, client):
u = User.objects.create_user('<EMAIL>', 'dummy')
admin_team.members.add(u)
client.login(email='<EMAIL>', password='<PASSWORD>')
i = admin_team.invites.create(email='<EMAIL>')
resp = client.get('/control/invite/{}'.format(i.token), follow=True)
assert b'alert-danger' in resp.content
assert b'already are part of' in resp.content
@pytest.mark.django_db
def test_invite_authenticated(event, admin_team, client):
u = User.objects.create_user('<EMAIL>', 'dummy')
client.login(email='<EMAIL>', password='<PASSWORD>')
i = admin_team.invites.create(email='<EMAIL>')
resp = client.get('/control/invite/{}'.format(i.token), follow=True)
assert b'alert-success' in resp.content
assert u in admin_team.members.all()
assert not admin_team.invites.exists()
@pytest.mark.django_db
def test_invite_new_user(event, admin_team, client):
i = admin_team.invites.create(email='<EMAIL>')
resp = client.get('/control/invite/{}'.format(i.token), follow=True)
assert b'<form' in resp.content
resp = client.post('/control/invite/{}'.format(i.token), {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'password_repeat': '<PASSWORD>'
}, follow=True)
assert b'alert-success' in resp.content
assert admin_team.members.filter(email='<EMAIL>').exists()
assert not admin_team.invites.exists()
|
#!/usr/bin/env python
# coding: utf-8
# # Bimodal distribution (mixture of two 1d Gaussians)
# In[1]:
import os
try:
import seaborn as sns
except:
get_ipython().run_line_magic('pip', 'install seaborn')
import seaborn as sns
try:
import matplotlib.pyplot as plt
except:
get_ipython().run_line_magic('pip', 'install matplotlib')
import matplotlib.pyplot as plt
try:
import scipy
except:
get_ipython().run_line_magic('pip', 'install scipy')
import scipy
from scipy.stats import norm
try:
import jax
except:
get_ipython().run_line_magic('pip', 'install jax jaxlib')
import jax
import jax.numpy as jnp
# In[2]:
dev_mode = "DEV_MODE" in os.environ
if dev_mode:
import sys
sys.path.append("scripts")
import pyprobml_utils as pml
from latexify import latexify
latexify(width_scale_factor=2, fig_height=1.5)
# In[3]:
# Define two normal distrubutions and their corresponding weights.
LINE_WIDTH = 2
mu = [0, 2]
sigma = [1, 0.05]
distributions = [norm(loc=mu[i], scale=sigma[i]) for i in range(2)]
weights = [0.5, 0.5]
# Define a set of x points for graphing.
x = jnp.linspace(-2, 2 * mu[1], 600)
# Combine the two distributions by their weights, evaluated at the x points.
p = sum(weights[i] * distributions[i].pdf(x) for i in range(2))
# Calculate the mean of the final distribution.
mean_p = jnp.mean(x * p)
# Plot the final distribution and its mean.
p = sum(weights[i] * distributions[i].pdf(x) for i in range(2))
plt.plot(
x,
p,
"black",
linewidth=LINE_WIDTH,
label="$0.5\mathcal{N}(x|0,2) + 0.5\mathcal{N}(x|2,0.05)$",
)
plt.vlines(mean_p, ymin=0, ymax=max(p), color="red", linewidth=LINE_WIDTH, label="mean")
plt.xlabel("$x$")
plt.ylabel("$p(x)$")
plt.legend(bbox_to_anchor=(1, 1))
# format axes
sns.despine()
if dev_mode:
pml.savefig("bimodalSpike_latexified.pdf");
# In[4]:
# Another example, with two modes
def make_graph(
ax, data, color=None, linestyle=None, label=None, xlabel=None, ylabel=None
):
LINE_WIDTH = 2
x = data["x"]
weights = data["weights"]
distributions = data["distributions"]
p = sum(weights[i] * distributions[i].pdf(x) for i in range(len(distributions)))
ax.plot(
x,
p,
color=color,
linestyle=linestyle,
linewidth=LINE_WIDTH,
label=label,
)
plt.legend(bbox_to_anchor=(1, 1))
if xlabel:
plt.xlabel("$x$")
if ylabel:
plt.ylabel("$p(x)$")
# data for both distributions
data = dict()
mu = [0, 2]
sigma = [0.5, 0.5]
weights = [0.5, 0.5]
data = {
"distributions": [norm(loc=mu[i], scale=sigma[i]) for i in range(2)],
"weights": weights,
"x": jnp.linspace(-2, 2 * mu[1], 600),
}
# plot first distribution
plt.figure()
ax = plt.gca()
mu = [0]
sigma = [0.5]
data1 = {
"distributions": [norm(loc=mu[i], scale=sigma[i]) for i in range(1)],
"weights": [data["weights"][0]],
"x": data["x"],
}
make_graph(ax, data1, color="g", linestyle="dashdot", label="$0.5\mathcal{N}(x|0,0.5)$")
# plot second distribution
data2 = dict()
mu = [2]
sigma = [0.5]
data2 = {
"distributions": [norm(loc=mu[i], scale=sigma[i]) for i in range(1)],
"weights": [data["weights"][0]],
"x": data["x"],
}
make_graph(ax, data2, color="r", linestyle="dashdot", label="$0.5\mathcal{N}(x|2,0.5)$")
# Plot both distribution
make_graph(
ax,
data,
color="k",
linestyle="dashed",
label="$0.5\mathcal{N}(x|0,0.5) + 0.5\mathcal{N}(x|2,0.5)$",
xlabel="$x$",
ylabel="$p(x)$",
)
# format axes
sns.despine()
if dev_mode:
pml.savefig("bimodalDistribution_latexified.pdf")
|
<reponame>EulerWong/director
from director import lcmUtils
from director import objectmodel as om
from director import visualization as vis
from director.utime import getUtime
from director import transformUtils
from director.debugVis import DebugData
from director import ioUtils
from director import robotstate
from director import applogic as app
from director import vtkAll as vtk
from director.lcmframe import frameFromPositionMessage, positionMessageFromFrame
from director.simpletimer import SimpleTimer
from director.shallowCopy import shallowCopy
from director import roboturdf
from director import filterUtils
import director.vtkNumpy as vnp
import os
import math
import numpy as np
from director import drcargs
import drc as lcmdrc
from bot_core.pose_t import pose_t
from bot_core.robot_state_t import robot_state_t
import functools
import json
from PythonQt import QtGui, QtCore
_footMeshes = None
_footMeshFiles = []
_modelName = "valkyrie" # either atlas_v3/v4/v5 or valkyrie
_pelvisLink = '' # pelvis
_leftFootLink = '' # l_foot
_rightFootLink = '' # r_foot
with open(drcargs.args().directorConfigFile) as directorConfigFile:
directorConfig = json.load(directorConfigFile)
_modelName = directorConfig['modelName']
directorConfigDirectory = os.path.dirname(os.path.abspath(directorConfigFile.name))
if 'leftFootMeshFiles' in directorConfig:
_footMeshFiles.append( directorConfig['leftFootMeshFiles'] )
_footMeshFiles.append( directorConfig['rightFootMeshFiles'] )
for j in range(0,2):
for i in range(len(_footMeshFiles[j])):
_footMeshFiles[j][i] = os.path.join(directorConfigDirectory, _footMeshFiles[j][i])
if 'pelvisLink' in directorConfig:
_pelvisLink = directorConfig['pelvisLink']
if 'leftFootLink' in directorConfig:
_leftFootLink = directorConfig['leftFootLink']
_rightFootLink = directorConfig['rightFootLink']
DEFAULT_PARAM_SET = 'Drake Nominal'
DEFAULT_STEP_PARAMS = {'BDI': {'Min Num Steps': 0,
'Max Num Steps': 12,
'Min Step Width': 0.20,
'Nominal Step Width': 0.26,
'Nominal Forward Step': 0.15,
'Max Forward Step': 0.40,
'Max Step Width': 0.4,
'Max Upward Step': 0.18,
'Max Downward Step': 0.18,
'Behavior': 0,
'Leading Foot': 0,
'Swing Height': 0.05,
'Drake Swing Speed': 0.2,
'Drake Instep Shift': 0.0275,
'Drake Min Hold Time': 2.0,
'Support Contact Groups': 0,
'Prevent Swing Undershoot': 0,
'Prevent Swing Overshoot': 0,
'Map Mode': 0,
'IHMC Transfer Time': 1.0,
'IHMC Swing Time': 1.2},
'Drake Nominal': {'Min Num Steps': 0,
'Max Num Steps': 16,
'Min Step Width': 0.20,
'Nominal Step Width': 0.26,
'Nominal Forward Step': 0.26,
'Max Forward Step': 0.30,
'Max Step Width': 0.32,
'Max Upward Step': 0.18,
'Max Downward Step': 0.18,
'Behavior': 2,
'Leading Foot': 0,
'Swing Height': 0.03,
'Drake Swing Speed': 0.6,
'Drake Instep Shift': 0.005,
'Drake Min Hold Time': 1.0,
'Support Contact Groups': 0,
'Prevent Swing Undershoot': 0,
'Prevent Swing Overshoot': 0,
'Map Mode': 0,
'IHMC Transfer Time': 1.0,
'IHMC Swing Time': 1.2},
'IHMC Nominal': {'Min Num Steps': 0,
'Max Num Steps': 16,
'Min Step Width': 0.20,
'Nominal Step Width': 0.26,
'Nominal Forward Step': 0.26,
'Max Forward Step': 0.30,
'Max Step Width': 0.32,
'Max Upward Step': 0.18,
'Max Downward Step': 0.18,
'Behavior': 2,
'Leading Foot': 0,
'Swing Height': 0.05,
'Drake Swing Speed': 0.2,
'Drake Instep Shift': 0.0275,
'Drake Min Hold Time': 2.0,
'Support Contact Groups': 0,
'Prevent Swing Undershoot': 0,
'Prevent Swing Overshoot': 0,
'Map Mode': 0,
'IHMC Transfer Time': 1.0,
'IHMC Swing Time': 1.2}}
DEFAULT_STEP_PARAMS['Terrain'] = DEFAULT_STEP_PARAMS['Drake Nominal'].copy()
DEFAULT_STEP_PARAMS['Terrain'].update({'Drake Min Hold Time': 1.0,
'Drake Swing Speed': 0.6,
'Swing Height': 0.05,
'Max Forward Step': 0.36,
'Max Num Steps': 6,
'Nominal Step Width': 0.22,
'Map Mode': 1})
DEFAULT_STEP_PARAMS['Stairs'] = DEFAULT_STEP_PARAMS['Drake Nominal'].copy()
DEFAULT_STEP_PARAMS['Stairs'].update({'Drake Min Hold Time': 2.0,
'Swing Height': 0.05,
'Max Num Steps': 8,
'Min Num Steps': 8,
'Drake Swing Speed': 0.6,
'Support Contact Groups': lcmdrc.footstep_params_t.SUPPORT_GROUPS_MIDFOOT_TOE,
'Map Mode': 2})
DEFAULT_STEP_PARAMS['Polaris Platform'] = DEFAULT_STEP_PARAMS['Drake Nominal'].copy()
DEFAULT_STEP_PARAMS['Polaris Platform'].update({'Drake Min Hold Time': 2.0,
'Prevent Swing Undershoot': 1,
'Swing Height': 0.05,
'Map Mode': 1})
DEFAULT_CONTACT_SLICES = {(0.05, 0.3): np.array([[-0.13, -0.13, 0.13, 0.13],
[0.0562, -0.0562, 0.0562, -0.0562]]),
(0.3, .75): np.array([[-0.13, -0.13, 0.25, 0.25],
[.25, -.25, .25, -.25]]),
(0.75, 1.05): np.array([[-0.2, -0.2, 0.25, 0.25],
[.4, -.4, .4, -.4]]),
(1.05, 1.85): np.array([[-0.35, -0.35, 0.28, 0.28],
[.4, -.4, .4, -.4]])
}
def loadFootMeshes():
meshes = []
for i in range(0,2):
d = DebugData()
for footMeshFile in _footMeshFiles[i]:
d.addPolyData(ioUtils.readPolyData( footMeshFile , computeNormals=True))
t = vtk.vtkTransform()
t.Scale(0.98, 0.98, 0.98)
pd = filterUtils.transformPolyData(d.getPolyData(), t)
meshes.append(pd)
return meshes
def getLeftFootMesh():
return shallowCopy(getFootMeshes()[0])
def getRightFootMesh():
return shallowCopy(getFootMeshes()[1])
def getLeftFootColor():
return [1.0, 1.0, 0.0]
def getRightFootColor():
return [0.33, 1.0, 0.0]
def getFootMeshes():
global _footMeshes
if not _footMeshes:
_footMeshes = loadFootMeshes()
return _footMeshes
def getFootstepsFolder():
obj = om.findObjectByName('footstep plan')
if obj is None:
obj = om.getOrCreateContainer('footstep plan', parentObj=om.getOrCreateContainer('planning'))
obj.setIcon(om.Icons.Feet)
om.collapse(obj)
return obj
def getWalkingVolumesFolder():
obj = om.findObjectByName('walking volumes')
if obj is None:
obj = om.getOrCreateContainer('walking volumes', parentObj=getFootstepsFolder())
om.collapse(obj)
return obj
def getTerrainSlicesFolder():
obj = om.findObjectByName('terrain slices')
if obj is None:
obj = om.getOrCreateContainer('terrain slices', parentObj=getFootstepsFolder())
obj.setProperty('Visible', False)
om.collapse(obj)
return obj
def getBDIAdjustedFootstepsFolder():
obj = om.findObjectByName('BDI adj footstep plan')
if obj is None:
obj = om.getOrCreateContainer('BDI adj footstep plan')
obj.setIcon(om.Icons.Feet)
om.collapse(obj)
return obj
class FootstepsDriver(object):
def __init__(self, jointController):
self.jointController = jointController
self.lastFootstepPlan = None
self.lastFootstepRequest = None
self.goalSteps = None
self.lastWalkingPlan = None
self.walkingPlanCallback = None
self.default_step_params = DEFAULT_STEP_PARAMS
self.contact_slices = DEFAULT_CONTACT_SLICES
self.show_contact_slices = False
self.toolbarWidget = None
### Stuff pertaining to rendering BDI-frame steps
self.poseAlt = None
self.bdi_plan = None
self.bdi_plan_adjusted = None
view = app.getDRCView()
self.altRobotModel, self.altJointController = roboturdf.loadRobotModel('alt model', view, parent='alt model', color=roboturdf.getRobotOrangeColor(), visible=False)
self.altRobotModel.setProperty('Visible', False)
self.showBDIPlan = False # hide the BDI plans when created
self.altChannel = "POSE_BODY_ALT"
self.altSubscribe = None
#enable this to used the alt model to render a different state
#self.altJointController.addLCMUpdater("EST_ROBOT_STATE_ALT")
self._setupSubscriptions()
self._setupProperties()
self.showToolbarWidget()
# If we're a consoleapp and have no main window execButton won't exist
if hasattr(self, 'execButton'):
self.execButton.setEnabled(False)
self.committedPlans = []
def _setupProperties(self):
self.params = om.ObjectModelItem('Footstep Params')
self.defaults_map = ['Drake Nominal', 'BDI', 'IHMC Nominal', 'Terrain', 'Stairs', 'Polaris Platform']
self.params.addProperty('Defaults', 0, attributes=om.PropertyAttributes(enumNames=self.defaults_map))
self.params.addProperty('Behavior', 0, attributes=om.PropertyAttributes(enumNames=['BDI Stepping', 'BDI Walking', 'Drake Walking']))
self.params.addProperty('Leading Foot', 1, attributes=om.PropertyAttributes(enumNames=['Auto', 'Left', 'Right']))
self.leading_foot_map = [lcmdrc.footstep_plan_params_t.LEAD_AUTO,
lcmdrc.footstep_plan_params_t.LEAD_LEFT,
lcmdrc.footstep_plan_params_t.LEAD_RIGHT]
# self.params.addProperty('Map Command', 0, attributes=om.PropertyAttributes(enumNames=['Full Heightmap', 'Flat Ground', 'Z Normals']))
self.params.addProperty('Map Mode', 0, attributes=om.PropertyAttributes(enumNames=['Foot Plane', 'Terrain Heights & Normals', 'Terrain Heights, Z Normals', 'Horizontal Plane']))
self.map_mode_map = [
lcmdrc.footstep_plan_params_t.FOOT_PLANE,
lcmdrc.footstep_plan_params_t.TERRAIN_HEIGHTS_AND_NORMALS,
lcmdrc.footstep_plan_params_t.TERRAIN_HEIGHTS_Z_NORMALS,
lcmdrc.footstep_plan_params_t.HORIZONTAL_PLANE
]
# self.params.addProperty('Heights Source', attributes=om.PropertyAttributes(enumNames=['Map Data', 'Foot Plane']))
# self.params.addProperty('Normals Source', attributes=om.PropertyAttributes(enumNames=['Map Data', 'Foot Plane']))
self.params.addProperty('Min Num Steps', None, attributes=om.PropertyAttributes(decimals=0, minimum=0, maximum=30, singleStep=1))
self.params.addProperty('Max Num Steps', None, attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=30, singleStep=1))
self.params.addProperty('Min Step Width', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.1, maximum=0.35, singleStep=0.01))
self.params.addProperty('Nominal Step Width', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.21, maximum=0.4, singleStep=0.01))
self.params.addProperty('Max Step Width', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.22, maximum=0.5, singleStep=0.01))
self.params.addProperty('Nominal Forward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Max Forward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Swing Height', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.005))
self.params.addProperty('Max Upward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Max Downward Step', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=0.5, singleStep=0.01))
self.params.addProperty('Drake Swing Speed', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.05, maximum=5.0, singleStep=0.05))
self.params.addProperty('Drake Min Hold Time', None, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=10.0, singleStep=0.05))
self.params.addProperty('Drake Instep Shift', None, attributes=om.PropertyAttributes(decimals=4, minimum=-0.3, maximum=0.3, singleStep=0.0005))
self.behavior_lcm_map = {
0: lcmdrc.footstep_plan_params_t.BEHAVIOR_BDI_STEPPING,
1: lcmdrc.footstep_plan_params_t.BEHAVIOR_BDI_WALKING,
2: lcmdrc.footstep_plan_params_t.BEHAVIOR_WALKING}
self.params.addProperty('Planner Mode', 0, attributes=om.PropertyAttributes(enumNames=['Fast MIQP', 'Slow MISOCP']))
self.params.addProperty('Support Contact Groups', 0, attributes=om.PropertyAttributes(enumNames=['Whole Foot', 'Front 2/3', 'Back 2/3']))
self.params.addProperty('Prevent Swing Undershoot', 0, attributes=om.PropertyAttributes(enumNames=['False', 'True']))
self.params.addProperty('Prevent Swing Overshoot', 0, attributes=om.PropertyAttributes(enumNames=['False', 'True']))
self.params.addProperty('IHMC Transfer Time', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.6, maximum=5.0, singleStep=0.05))
self.params.addProperty('IHMC Swing Time', None, attributes=om.PropertyAttributes(decimals=2, minimum=0.6, maximum=5.0, singleStep=0.05))
self.applyDefaults(DEFAULT_PARAM_SET)
def applyDefaults(self, set_name):
defaults = self.default_step_params[set_name]
for k, v in defaults.iteritems():
self.params.setProperty(k, v)
def _setupSubscriptions(self):
useHistoricalLoader = False
historicalLoader = lcmUtils.HistoricalLCMLoader('drc', 'software/drc_lcmtypes/lcmtypes', os.getenv('DRC_BASE')) if useHistoricalLoader else None
lcmUtils.addSubscriber('FOOTSTEP_PLAN_RESPONSE', lcmdrc.footstep_plan_t, self.onFootstepPlan, historicalLoader)
lcmUtils.addSubscriber('WALKING_TRAJ_RESPONSE', lcmdrc.robot_plan_t, self.onWalkingPlan)
lcmUtils.addSubscriber('WALKING_SIMULATION_TRAJ_RESPONSE', lcmdrc.robot_plan_t, self.onWalkingPlan)
### Related to BDI-frame adjustment:
self.altSubscribe = lcmUtils.addSubscriber( self.altChannel , pose_t, self.onPoseAlt)
self.altSubscribe.setSpeedLimit(60)
sub2 = lcmUtils.addSubscriber('BDI_ADJUSTED_FOOTSTEP_PLAN', lcmdrc.footstep_plan_t, self.onBDIAdjustedFootstepPlan)
sub2.setSpeedLimit(1) # was 5 but was slow rendering
def changeSubscriptionAlt(self, newAltChannel="POSE_BODY_ALT"):
# used to monitor a different pose e.g. POSE_BODY_LOGGED in playback
self.altChannel = newAltChannel
lcmUtils.removeSubscriber ( self.altSubscribe )
self.altSubscribe = lcmUtils.addSubscriber( self.altChannel , pose_t, self.onPoseAlt)
self.altSubscribe.setSpeedLimit(60)
##############################
def getDefaultStepParams(self):
default_step_params = lcmdrc.footstep_params_t()
default_step_params.step_speed = self.params.properties.drake_swing_speed
default_step_params.drake_min_hold_time = self.params.properties.drake_min_hold_time
default_step_params.drake_instep_shift = self.params.properties.drake_instep_shift
default_step_params.step_height = self.params.properties.swing_height
default_step_params.constrain_full_foot_pose = True
default_step_params.bdi_step_duration = 2.0
default_step_params.bdi_sway_duration = 0.0
default_step_params.bdi_lift_height = 0.065
default_step_params.bdi_toe_off = 1
default_step_params.bdi_knee_nominal = 0.0
default_step_params.bdi_max_foot_vel = 0.0
default_step_params.bdi_sway_end_dist = 0.02
default_step_params.bdi_step_end_dist = 0.02
default_step_params.mu = 1.0
default_step_params.ihmc_transfer_time = self.params.properties.ihmc_transfer_time
default_step_params.ihmc_swing_time = self.params.properties.ihmc_swing_time
default_step_params.support_contact_groups = self.params.properties.support_contact_groups
default_step_params.prevent_swing_undershoot = self.params.properties.prevent_swing_undershoot
default_step_params.prevent_swing_overshoot = self.params.properties.prevent_swing_overshoot
return default_step_params
def onWalkingPlan(self, msg):
self.lastWalkingPlan = msg
if self.walkingPlanCallback:
self.walkingPlanCallback(self.lastWalkingPlan)
def onBDIAdjustedFootstepPlan(self, msg):
folder = getBDIAdjustedFootstepsFolder()
om.removeFromObjectModel(folder)
folder = getBDIAdjustedFootstepsFolder()
self.drawFootstepPlan(msg, folder)
def onFootstepPlan(self, msg):
#self.clearFootstepPlan()
self.lastFootstepPlan = msg
planFolder = getFootstepsFolder()
self.drawFootstepPlan( self.lastFootstepPlan , planFolder)
self.transformPlanToBDIFrame( self.lastFootstepPlan )
self.showToolbarWidget()
def showToolbarWidget(self):
if app.getMainWindow() is None:
return
if self.toolbarWidget:
self.execButton.setEnabled(True)
return
w = QtGui.QWidget()
l = QtGui.QHBoxLayout(w)
label = QtGui.QLabel('Walk plan:')
execButton = QtGui.QPushButton('')
execButton.setIcon(QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_MediaPlay))
clearButton = QtGui.QPushButton('')
clearButton.setIcon(QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_TrashIcon))
stopButton = QtGui.QPushButton('')
stopButton.setIcon(QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_MediaStop))
l.addWidget(label)
l.addWidget(execButton)
l.addWidget(stopButton)
l.addWidget(clearButton)
l.setContentsMargins(0, 0, 0, 0)
execButton.setShortcut(QtGui.QKeySequence('Ctrl+Return'))
execButton.connect('clicked()', self.onExecClicked)
clearButton.connect('clicked()', self.onClearClicked)
stopButton.connect('clicked()', self.sendStopWalking)
self.execButton = execButton
self.stopButton = stopButton
self.toolbarWidget = app.getMainWindow().toolBar().addWidget(w)
self.execButton.show()
def onExecClicked(self):
self.commitFootstepPlan(self.lastFootstepPlan)
om.removeFromObjectModel(om.findObjectByName('footstep widget'))
walkGoal = om.findObjectByName('walking goal')
if walkGoal:
walkGoal.setProperty('Edit', False)
self.execButton.setEnabled(False)
def onClearClicked(self):
om.removeFromObjectModel(om.findObjectByName('walking goal'))
om.removeFromObjectModel(om.findObjectByName('footstep widget'))
om.removeFromObjectModel(om.findObjectByName('LCM GL'))
self.clearFootstepPlan()
if self.toolbarWidget:
self.execButton.setEnabled(False)
def clearFootstepPlan(self):
self.lastFootstepPlan = None
om.removeFromObjectModel(getFootstepsFolder())
def drawFootstepPlan(self, msg, folder, left_color=None, right_color=None, alpha=1.0):
for step in folder.children():
om.removeFromObjectModel(step)
allTransforms = []
volFolder = getWalkingVolumesFolder()
map(om.removeFromObjectModel, volFolder.children())
slicesFolder = getTerrainSlicesFolder()
map(om.removeFromObjectModel, slicesFolder.children())
for i, footstep in enumerate(msg.footsteps):
trans = footstep.pos.translation
trans = [trans.x, trans.y, trans.z]
quat = footstep.pos.rotation
quat = [quat.w, quat.x, quat.y, quat.z]
footstepTransform = transformUtils.transformFromPose(trans, quat)
allTransforms.append(footstepTransform)
if i < 2:
continue
if footstep.is_right_foot:
mesh = getRightFootMesh()
if (right_color is None):
color = getRightFootColor()
else:
color = right_color
else:
mesh = getLeftFootMesh()
if (left_color is None):
color = getLeftFootColor()
else:
color = left_color
# add gradual shading to steps to indicate destination
frac = float(i)/ float(msg.num_steps-1)
this_color = [0,0,0]
this_color[0] = 0.25*color[0] + 0.75*frac*color[0]
this_color[1] = 0.25*color[1] + 0.75*frac*color[1]
this_color[2] = 0.25*color[2] + 0.75*frac*color[2]
if self.show_contact_slices:
self.drawContactVolumes(footstepTransform, color)
contact_pts_left, contact_pts_right = FootstepsDriver.getContactPts()
if footstep.is_right_foot:
sole_offset = np.mean(contact_pts_right, axis=0)
else:
sole_offset = np.mean(contact_pts_left, axis=0)
t_sole_prev = frameFromPositionMessage(msg.footsteps[i-2].pos)
t_sole_prev.PreMultiply()
t_sole_prev.Translate(sole_offset)
t_sole = transformUtils.copyFrame(footstepTransform)
t_sole.Translate(sole_offset)
yaw = np.arctan2(t_sole.GetPosition()[1] - t_sole_prev.GetPosition()[1],
t_sole.GetPosition()[0] - t_sole_prev.GetPosition()[0])
T_terrain_to_world = transformUtils.frameFromPositionAndRPY([t_sole_prev.GetPosition()[0], t_sole_prev.GetPosition()[1], 0],
[0, 0, math.degrees(yaw)])
path_dist = np.array(footstep.terrain_path_dist)
height = np.array(footstep.terrain_height)
# if np.any(height >= trans[2]):
terrain_pts_in_local = np.vstack((path_dist, np.zeros(len(footstep.terrain_path_dist)), height))
d = DebugData()
for j in range(terrain_pts_in_local.shape[1]-1):
d.addLine(terrain_pts_in_local[:,j], terrain_pts_in_local[:,j+1], radius=0.01)
obj = vis.showPolyData(d.getPolyData(), 'terrain slice', parent=slicesFolder, visible=slicesFolder.getProperty('Visible'), color=[.8,.8,.3])
obj.actor.SetUserTransform(T_terrain_to_world)
renderInfeasibility = False
if renderInfeasibility and footstep.infeasibility > 1e-6:
d = DebugData()
start = allTransforms[i-1].GetPosition()
end = footstepTransform.GetPosition()
d.addArrow(start, end, 0.02, 0.005,
startHead=True,
endHead=True)
vis.showPolyData(d.getPolyData(), 'infeasibility %d -> %d' % (i-2, i-1), parent=folder, color=[1, 0.2, 0.2])
stepName = 'step %d' % (i-1)
obj = vis.showPolyData(mesh, stepName, color=this_color, alpha=alpha, parent=folder)
obj.setIcon(om.Icons.Feet)
frameObj = vis.showFrame(footstepTransform, stepName + ' frame', parent=obj, scale=0.3, visible=False)
obj.actor.SetUserTransform(footstepTransform)
obj.addProperty('Support Contact Groups', footstep.params.support_contact_groups, attributes=om.PropertyAttributes(enumNames=['Whole Foot', 'Front 2/3', 'Back 2/3']))
obj.properties.setPropertyIndex('Support Contact Groups', 0)
obj.footstep_index = i
obj.footstep_property_callback = obj.properties.connectPropertyChanged(functools.partial(self.onFootstepPropertyChanged, obj))
self.drawContactPts(obj, footstep, color=this_color)
def drawContactVolumes(self, footstepTransform, color):
volFolder = getWalkingVolumesFolder()
for zs, xy in self.contact_slices.iteritems():
points0 = np.vstack((xy, zs[0] + np.zeros((1,xy.shape[1]))))
points1 = np.vstack((xy, zs[1] + np.zeros((1,xy.shape[1]))))
points = np.hstack((points0, points1))
points = points + np.array([[0.05],[0],[-0.0811]])
points = points.T
polyData = vnp.getVtkPolyDataFromNumpyPoints(points.copy())
vol_mesh = filterUtils.computeDelaunay3D(polyData)
obj = vis.showPolyData(vol_mesh, 'walking volume', parent=volFolder, alpha=0.5, visible=self.show_contact_slices, color=color)
obj.actor.SetUserTransform(footstepTransform)
def onFootstepPropertyChanged(self, obj, propertySet, propertyName):
if propertyName == "Support Contact Groups":
self.lastFootstepPlan.footsteps[obj.footstep_index].params.support_contact_groups = obj.properties.support_contact_groups
self.sendUpdatePlanRequest()
def drawContactPts(self, obj, footstep, **kwargs):
leftPoints, rightPoints = FootstepsDriver.getContactPts(footstep.params.support_contact_groups)
contact_pts = rightPoints if footstep.is_right_foot else leftPoints
d = DebugData()
for pt in contact_pts:
d.addSphere(pt, radius=0.01)
d_obj = vis.showPolyData(d.getPolyData(), "contact points", parent=obj, **kwargs)
d_obj.actor.SetUserTransform(obj.actor.GetUserTransform())
@staticmethod
def getContactPts(support_contact_groups = lcmdrc.footstep_params_t.SUPPORT_GROUPS_HEEL_TOE):
'''
hard coded Location of the Drake contact points relative to foot frame. this should be read from URDF
'''
contact_pts_left = np.zeros((4,3))
contact_pts_right = np.zeros((4,3))
if "atlas" in _modelName: # atlas_v3/v4/v5
if support_contact_groups == lcmdrc.footstep_params_t.SUPPORT_GROUPS_HEEL_TOE:
contact_pts_left[0,:] = [-0.0876, 0.0626, -0.07645]
contact_pts_left[1,:] = [-0.0876, -0.0626, -0.07645]
contact_pts_left[2,:] = [0.1728, 0.0626, -0.07645]
contact_pts_left[3,:] = [0.1728, -0.0626, -0.07645]
elif support_contact_groups == lcmdrc.footstep_params_t.SUPPORT_GROUPS_MIDFOOT_TOE:
contact_pts_left[0,:] = [-0.0008, 0.0626, -0.07645]
contact_pts_left[1,:] = [-0.0008, -0.0626, -0.07645]
contact_pts_left[2,:] = [0.1728, 0.0626, -0.07645]
contact_pts_left[3,:] = [0.1728, -0.0626, -0.07645]
elif support_contact_groups == lcmdrc.footstep_params_t.SUPPORT_GROUPS_HEEL_MIDFOOT:
contact_pts_left[0,:] = [-0.0876, 0.0626, -0.07645]
contact_pts_left[1,:] = [-0.0876, -0.0626, -0.07645]
contact_pts_left[2,:] = [0.086, 0.0626, -0.07645]
contact_pts_left[3,:] = [0.086, -0.0626, -0.07645]
else:
raise ValueError("Unrecognized support contact group: {:d}".format(support_contact_groups))
contact_pts_right = contact_pts_left.copy()
elif (_modelName == "valkyrie"): #valkyrie
#these values were taken from ihmc code: ValkyriePhysicalProperties.java
#they are also used in createFootstepList in lcm2ros_ihmc.cpp
if support_contact_groups == lcmdrc.footstep_params_t.SUPPORT_GROUPS_HEEL_TOE:
contact_pts_left[0,:] = [-0.038, 0.055, -0.09]
contact_pts_left[1,:] = [-0.038, -0.055, -0.09]
contact_pts_left[2,:] = [0.172, 0.055, -0.09]
contact_pts_left[3,:] = [0.172, -0.055, -0.09]
elif support_contact_groups == lcmdrc.footstep_params_t.SUPPORT_GROUPS_MIDFOOT_TOE:
contact_pts_left[0,:] = [0.032, 0.055, -0.09]
contact_pts_left[1,:] = [0.032, -0.055, -0.09]
contact_pts_left[2,:] = [0.172, 0.055, -0.09]
contact_pts_left[3,:] = [0.172, -0.055, -0.09]
elif support_contact_groups == lcmdrc.footstep_params_t.SUPPORT_GROUPS_HEEL_MIDFOOT:
contact_pts_left[0,:] = [-0.038, 0.055, -0.09]
contact_pts_left[1,:] = [-0.038, -0.055, -0.09]
contact_pts_left[2,:] = [0.102, 0.055, -0.09]
contact_pts_left[3,:] = [0.102, -0.055, -0.09]
else:
raise ValueError("Unrecognized support contact group: {:d}".format(support_contact_groups))
contact_pts_right = contact_pts_left.copy()
else:
print _modelName
raise ValueError("modelName not recognised")
return contact_pts_left, contact_pts_right
@staticmethod
def getFeetMidPoint(model, useWorldZ=True):
'''
Returns a frame in world coordinate system that is the average of the left
and right foot reference point positions in world frame, the average of the
left and right foot yaw in world frame, and Z axis aligned with world Z.
The foot reference point is the average of the foot contact points in the foot frame.
'''
contact_pts_left, contact_pts_right = FootstepsDriver.getContactPts()
contact_pts_mid_left = np.mean(contact_pts_left, axis=0) # mid point on foot relative to foot frame
contact_pts_mid_right = np.mean(contact_pts_right, axis=0) # mid point on foot relative to foot frame
t_lf_mid = model.getLinkFrame(_leftFootLink)
t_lf_mid.PreMultiply()
t_lf_mid.Translate(contact_pts_mid_left)
t_rf_mid = model.getLinkFrame(_rightFootLink)
t_rf_mid.PreMultiply()
t_rf_mid.Translate(contact_pts_mid_right)
if "atlas" in _modelName: # atlas_v3/v4/v5
t_feet_mid = transformUtils.frameInterpolate(t_lf_mid, t_rf_mid, 0.5)
elif (_modelName == "valkyrie"): # valkyrie
t_feet_mid = transformUtils.frameInterpolate(t_lf_mid, t_rf_mid, 0.5)
else:
raise ValueError("Model Name not recognised")
if useWorldZ:
rpy = [0.0, 0.0, np.degrees(transformUtils.rollPitchYawFromTransform(t_feet_mid)[2])]
return transformUtils.frameFromPositionAndRPY(t_feet_mid.GetPosition(), rpy)
else:
return t_feet_mid
@staticmethod
def debugDrawFootPoints(model):
pts_left, pts_right = FootstepsDriver.getContactPts()
d = DebugData()
for linkName in [_leftFootLink, _rightFootLink]:
t = model.getLinkFrame(linkName)
d.addFrame(t, scale=0.2)
if (linkName is _leftFootLink):
pts = pts_left
else:
pts = pts_right
footMidPoint = np.mean(pts, axis=0)
for p in pts.tolist() + [footMidPoint.tolist()]:
t.TransformPoint(p, p)
d.addSphere(p, radius=0.015)
midpt = FootstepsDriver.getFeetMidPoint(model)
d.addFrame(midpt, scale=0.2)
vis.showPolyData(d.getPolyData(), 'foot points debug', parent='debug', colorByName='RGB255')
def createGoalSteps(self, model, pose):
distanceForward = 1.0
fr = model.getLinkFrame(_leftFootLink)
fl = model.getLinkFrame(_rightFootLink)
pelvisT = model.getLinkFrame(_pelvisLink)
xaxis = [1.0, 0.0, 0.0]
pelvisT.TransformVector(xaxis, xaxis)
xaxis = np.array(xaxis)
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
numGoalSteps = 3
is_right_foot = True
self.goalSteps = []
for i in range(numGoalSteps):
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
if is_right_foot:
t.Translate(fr.GetPosition())
else:
t.Translate(fl.GetPosition())
t.Translate(xaxis*distanceForward)
distanceForward += 0.15
is_right_foot = not is_right_foot
step = lcmdrc.footstep_t()
step.pos = positionMessageFromFrame(t)
step.is_right_foot = is_right_foot
step.params = self.getDefaultStepParams()
self.goalSteps.append(step)
request = self.constructFootstepPlanRequest(pose)
request.num_goal_steps = len(self.goalSteps)
request.goal_steps = self.goalSteps
self.sendFootstepPlanRequest(request)
def onStepModified(self, ndx, frameObj):
self.lastFootstepPlan.footsteps[ndx+2].pos = positionMessageFromFrame(frameObj.transform)
self.lastFootstepPlan.footsteps[ndx+2].fixed_x = True
self.lastFootstepPlan.footsteps[ndx+2].fixed_y = True
self.lastFootstepPlan.footsteps[ndx+2].fixed_yaw = True
self.sendUpdatePlanRequest()
def sendUpdatePlanRequest(self):
msg = lcmdrc.footstep_check_request_t()
msg.initial_state = self.lastFootstepRequest.initial_state
msg.footstep_plan = self.lastFootstepPlan
msg.snap_to_terrain = True
msg.compute_infeasibility = False
self.sendFootstepPlanCheckRequest(msg)
def updateRequest(self):
if self.lastFootstepRequest is not None:
msg = self.lastFootstepRequest
msg = self.applyParams(msg)
self.sendFootstepPlanRequest(msg)
def constructFootstepPlanRequest(self, pose, goalFrame=None):
msg = lcmdrc.footstep_plan_request_t()
msg.utime = getUtime()
state_msg = robotstate.drakePoseToRobotState(pose)
msg.initial_state = state_msg
if goalFrame is None:
goalFrame = vtk.vtkTransform()
msg.goal_pos = positionMessageFromFrame(goalFrame)
msg = self.applyParams(msg)
msg = self.applySafeRegions(msg)
return msg
def applyParams(self, msg):
msg.params = lcmdrc.footstep_plan_params_t()
msg.params.max_num_steps = self.params.properties.max_num_steps
msg.params.min_num_steps = self.params.properties.min_num_steps
msg.params.min_step_width = self.params.properties.min_step_width
msg.params.nom_step_width = self.params.properties.nominal_step_width
msg.params.max_step_width = self.params.properties.max_step_width
msg.params.nom_forward_step = self.params.properties.nominal_forward_step
msg.params.max_forward_step = self.params.properties.max_forward_step
msg.params.nom_upward_step = self.params.properties.max_upward_step
msg.params.nom_downward_step = self.params.properties.max_downward_step
msg.params.planning_mode = self.params.properties.planner_mode
msg.params.behavior = self.behavior_lcm_map[self.params.properties.behavior]
# msg.params.use_map_heights = self.params.properties.heights_source == 0
# msg.params.use_map_normals = self.params.properties.normals_source == 0
msg.params.map_mode = self.map_mode_map[self.params.properties.map_mode]
# msg.params.map_command = self.map_command_lcm_map[self.params.properties.map_command]
msg.params.leading_foot = self.leading_foot_map[self.params.properties.leading_foot]
msg.default_step_params = self.getDefaultStepParams()
return msg
def applySafeRegions(self, msg):
safe_regions_folder = om.findObjectByName('Safe terrain regions')
safe_terrain_regions = []
if safe_regions_folder:
for obj in safe_regions_folder.children():
if obj.getProperty('Enabled for Walking'):
safe_terrain_regions.append(obj.safe_region)
msg.num_iris_regions = len(safe_terrain_regions)
for r in safe_terrain_regions:
msg.iris_regions.append(r.to_iris_region_t())
return msg
def sendFootstepPlanCheckRequest(self, request, waitForResponse=False, waitTimeout=5000):
assert isinstance(request, lcmdrc.footstep_check_request_t)
requestChannel = 'FOOTSTEP_CHECK_REQUEST'
responseChannel = 'FOOTSTEP_PLAN_RESPONSE'
if waitForResponse:
if waitTimeout == 0:
helper = lcmUtils.MessageResponseHelper(responseChannel, lcmdrc.footstep_plan_t)
lcmUtils.publish(requestChannel, request)
return helper
return lcmUtils.MessageResponseHelper.publishAndWait(requestChannel, request,
responseChannel, lcmdrc.footstep_plan_t, waitTimeout)
else:
lcmUtils.publish(requestChannel, request)
def sendFootstepPlanRequest(self, request, waitForResponse=False, waitTimeout=5000):
assert isinstance(request, lcmdrc.footstep_plan_request_t)
self.lastFootstepRequest = request
requestChannel = 'FOOTSTEP_PLAN_REQUEST'
responseChannel = 'FOOTSTEP_PLAN_RESPONSE'
if waitForResponse:
if waitTimeout == 0:
helper = lcmUtils.MessageResponseHelper(responseChannel, lcmdrc.footstep_plan_t)
lcmUtils.publish(requestChannel, request)
return helper
return lcmUtils.MessageResponseHelper.publishAndWait(requestChannel, request,
responseChannel, lcmdrc.footstep_plan_t, waitTimeout)
else:
lcmUtils.publish(requestChannel, request)
def sendWalkingPlanRequest(self, footstepPlan, startPose, waitForResponse=False, waitTimeout=5000, req_type='traj'):
msg = lcmdrc.walking_plan_request_t()
msg.utime = getUtime()
state_msg = robotstate.drakePoseToRobotState(startPose)
msg.initial_state = state_msg
msg.new_nominal_state = msg.initial_state
msg.use_new_nominal_state = True
msg.footstep_plan = footstepPlan
if req_type == 'traj':
requestChannel = 'WALKING_TRAJ_REQUEST'
responseChannel = 'WALKING_TRAJ_RESPONSE'
response_type = lcmdrc.robot_plan_t
elif req_type == 'controller':
requestChannel = 'WALKING_CONTROLLER_PLAN_REQUEST'
responseChannel = 'WALKING_CONTROLLER_PLAN_RESPONSE'
response_type = lcmdrc.walking_plan_t
elif req_type == 'simulate_drake':
requestChannel = 'WALKING_SIMULATION_DRAKE_REQUEST'
responseChannel = 'WALKING_SIMULATION_TRAJ_RESPONSE'
response_type = lcmdrc.robot_plan_t
else:
raise ValueError("Invalid request type: {:s}".format(req_type))
if waitForResponse:
if waitTimeout == 0:
helper = lcmUtils.MessageResponseHelper(responseChannel, response_type)
lcmUtils.publish(requestChannel, msg)
return helper
return lcmUtils.MessageResponseHelper.publishAndWait(requestChannel, msg,
responseChannel, response_type, waitTimeout)
else:
lcmUtils.publish(requestChannel, msg)
def sendStopWalking(self):
msg = lcmdrc.plan_control_t()
msg.utime = getUtime()
msg.control = lcmdrc.plan_control_t.TERMINATE
lcmUtils.publish('STOP_WALKING', msg)
def commitFootstepPlan(self, footstepPlan):
for previousPlan in self.committedPlans:
if previousPlan.utime == footstepPlan.utime:
raise Exception("Footstep plan was already executed. Execution of the plan is no longer allowed for safety reasons. You should request a new footstep plan.")
self.committedPlans.append(footstepPlan)
self.drawFootstepPlan(footstepPlan, getFootstepsFolder(), alpha=0.3)
if footstepPlan.params.behavior in (lcmdrc.footstep_plan_params_t.BEHAVIOR_BDI_STEPPING,
lcmdrc.footstep_plan_params_t.BEHAVIOR_BDI_WALKING):
self._commitFootstepPlanBDI(footstepPlan)
elif footstepPlan.params.behavior == lcmdrc.footstep_plan_params_t.BEHAVIOR_WALKING:
self._commitFootstepPlanDrake(footstepPlan)
def _commitFootstepPlanDrake(self, footstepPlan):
startPose = self.jointController.getPose('EST_ROBOT_STATE')
self.sendWalkingPlanRequest(footstepPlan, startPose, req_type='controller')
def _commitFootstepPlanBDI(self, footstepPlan):
footstepPlan.utime = getUtime()
lcmUtils.publish('COMMITTED_FOOTSTEP_PLAN', footstepPlan)
def sendHaltSimulationDrakeRequest(self):
msg = lcmdrc.utime_t()
msg.utime = getUtime()
lcmUtils.publish('HALT_DRAKE_SIMULATION', msg)
####################### BDI Adjustment Logic and Visualization ##################
def onPoseAlt(self,msg):
self.poseAlt = msg
# Set the xyzrpy of this pose to equal that estimated by BDI
rpy = transformUtils.quaternionToRollPitchYaw(msg.orientation)
pose = self.jointController.q.copy()
pose[0:3] = msg.pos
pose[3:6] = rpy
self.altJointController.setPose("ERS Alt", pose)
def onBDIAdjustedFootstepPlan(self,msg):
self.bdi_plan_adjusted = msg.decode( msg.encode() ) # decode and encode ensures deepcopy
if (self.showBDIPlan is True):
self.drawBDIFootstepPlanAdjusted()
#else:
# print "not showing adjusted bdi plan"
def transformPlanToBDIFrame(self, plan):
if (self.poseAlt is None):
# print "haven't received POSE_BODY_ALT"
return
# TODO: This transformation should be rewritten using the LOCAL_TO_LOCAL_ALT frame
# instead of using FK here
t_bodybdi = transformUtils.transformFromPose(self.poseAlt.pos, self.poseAlt.orientation)
t_bodybdi.PostMultiply()
current_pose = self.jointController.q
t_bodymain = transformUtils.transformFromPose( current_pose[0:3] , transformUtils.rollPitchYawToQuaternion(current_pose[3:6]) )
t_bodymain.PostMultiply()
# iterate and transform
self.bdi_plan = plan.decode( plan.encode() ) # decode and encode ensures deepcopy
for i, footstep in enumerate(self.bdi_plan.footsteps):
step = footstep.pos
t_step = frameFromPositionMessage(step)
t_body_to_step = vtk.vtkTransform()
t_body_to_step.DeepCopy(t_step)
t_body_to_step.PostMultiply()
t_body_to_step.Concatenate(t_bodymain.GetLinearInverse())
t_stepbdi = vtk.vtkTransform()
t_stepbdi.DeepCopy(t_body_to_step)
t_stepbdi.PostMultiply()
t_stepbdi.Concatenate(t_bodybdi)
footstep.pos = positionMessageFromFrame(t_stepbdi)
if (self.showBDIPlan is True):
self.drawBDIFootstepPlan()
#else:
# print "not showing bdi plan"
def drawBDIFootstepPlan(self):
if (self.bdi_plan is None):
return
folder = om.getOrCreateContainer("BDI footstep plan")
om.removeFromObjectModel(folder)
folder = om.getOrCreateContainer("BDI footstep plan")
folder.setIcon(om.Icons.Feet)
om.collapse(folder)
self.drawFootstepPlan(self.bdi_plan, folder, [0.0, 0.0, 1.0] , [1.0, 0.0, 0.0])
def drawBDIFootstepPlanAdjusted(self):
if (self.bdi_plan_adjusted is None):
return
folder = om.getOrCreateContainer('BDI adj footstep plan')
om.removeFromObjectModel(folder)
folder = om.getOrCreateContainer('BDI adj footstep plan')
folder.setIcon(om.Icons.Feet)
om.collapse(folder)
self.drawFootstepPlan(self.bdi_plan_adjusted, folder, [1.0, 1.0, 0.0] , [0.0, 1.0, 1.0])
class FootstepRequestGenerator(object):
def __init__(self, footstepsDriver):
self.footstepsDriver = footstepsDriver
@staticmethod
def getRobotStanceFrame(robotModel):
return FootstepsDriver.getFeetMidPoint(robotModel)
@staticmethod
def makeStepFrames(stepFrames, relativeFrame=None, showFrames=False):
frames = []
for i, stepFrame in enumerate(stepFrames):
stepFrame = transformUtils.frameFromPositionAndRPY(stepFrame, [0,0,0])
stepFrame.PostMultiply()
if relativeFrame:
stepFrame.Concatenate(relativeFrame)
if showFrames:
obj = vis.updateFrame(stepFrame, 'step frame %d' % i, parent='step frames', scale=0.2)
stepFrame = obj.transform
frames.append(stepFrame)
return frames
def makeStepMessages(self, stepFrames, leadingFoot, snapToTerrain=False):
assert leadingFoot in ('left', 'right')
isRightFootOffset = 0 if leadingFoot == 'left' else 1
leftPoints, rightPoints = FootstepsDriver.getContactPts()
# note, assumes symmetrical feet. the loop below should be
# updated to alternate between left/right contact point sets
footOriginToSole = -np.mean(leftPoints, axis=0)
stepMessages = []
for i, stepFrame in enumerate(stepFrames):
t = transformUtils.copyFrame(stepFrame)
t.PreMultiply()
t.Translate(footOriginToSole)
step = lcmdrc.footstep_t()
step.pos = positionMessageFromFrame(t)
step.is_right_foot = (i + isRightFootOffset) % 2
step.params = self.footstepsDriver.getDefaultStepParams()
step.fixed_x = True
step.fixed_y = True
step.fixed_z = True
step.fixed_roll = True
step.fixed_pitch = True
step.fixed_yaw = True
if snapToTerrain:
step.fixed_z = False
step.fixed_roll = False
step.fixed_pitch = False
stepMessages.append(step)
return stepMessages
def makeFootstepRequest(self, startPose, stepFrames, leadingFoot, numberOfFillSteps=0, snapToTerrain=False):
stepMessages = self.makeStepMessages(stepFrames, leadingFoot, snapToTerrain=snapToTerrain)
request = self.footstepsDriver.constructFootstepPlanRequest(startPose)
request.num_goal_steps = len(stepMessages)
request.goal_steps = stepMessages
request.params.leading_foot = lcmdrc.footstep_plan_params_t.LEAD_LEFT if leadingFoot == 'left' else lcmdrc.footstep_plan_params_t.LEAD_RIGHT
request.params.max_num_steps = len(stepMessages) + numberOfFillSteps
return request
|
<gh_stars>1-10
import random
import threading
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.client.coap import CoAP
from coapthon.messages.request import Request
from coapthon.utils import generate_random_token
__author__ = '<NAME>'
class _RequestContext(object):
def __init__(self, request, callback=None):
self.request = request
if callback:
self.callback = callback
else:
self.response = None
self.responded = threading.Event()
class HelperClient(object):
"""
Helper Client class to perform requests to remote servers in a simplified way.
"""
def __init__(self, server, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize a client to perform request to a server.
:param server: the remote CoAP server
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
self.server = server
self.protocol = CoAP(self.server, random.randint(1, 65535), self._wait_response, sock=sock,
cb_ignore_read_exception=cb_ignore_read_exception, cb_ignore_write_exception=cb_ignore_write_exception)
self.requests_lock = threading.RLock()
self.requests = dict()
def _wait_response(self, message):
"""
Private function to get responses from the server.
:param message: the received message
"""
if message.code == defines.Codes.CONTINUE.number:
return
with self.requests_lock:
if message.token not in self.requests:
return
context = self.requests[message.token]
if message.timeouted:
# Message is actually the original timed out request (not the response), discard content
message = None
if hasattr(context, 'callback'):
if not hasattr(context.request, 'observe'):
# OBSERVE stays until cancelled, for all others we're done
del self.requests[message.token]
context.callback(message)
else:
# Signal that a response is available to blocking call
context.response = message
context.responded.set()
def stop(self):
"""
Stop the client.
"""
self.protocol.close()
with self.requests_lock:
# Unblock/signal waiters
for token in self.requests:
context = self.requests[token]
if hasattr(context, 'callback'):
context.callback(None)
else:
context.responded.set()
def close(self):
"""
Close the client.
"""
self.stop()
def cancel_observe_token(self, token, explicit, timeout=None): # pragma: no cover
"""
Delete observing on the remote server.
:param token: the observe token
:param explicit: if explicitly cancel
:type explicit: bool
"""
with self.requests_lock:
if token not in self.requests:
return
if not hasattr(self.requests[token].request, 'observe'):
return
context = self.requests[token]
del self.requests[token]
self.protocol.end_observation(token)
if not explicit:
return
request = self.mk_request(defines.Codes.GET, context.request.uri_path)
# RFC7641 explicit cancel is by sending OBSERVE=1 with the same token,
# not by an unsolicited RST (which would be ignored)
request.token = token
request.observe = 1
self.send_request(request, callback=None, timeout=timeout)
def cancel_observing(self, response, explicit): # pragma: no cover
"""
Delete observing on the remote server.
:param response: the last received response
:param explicit: if explicitly cancel using token
:type send_rst: bool
"""
self.cancel_observe_token(self, response.token, explicit)
def get(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.GET, path)
request.token = generate_random_token(2)
for k, v in kwargs.iteritems():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def observe(self, path, callback, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET with observe on a certain path.
:param path: the path
:param callback: the callback function to invoke upon notifications
:param timeout: the timeout of the request
:return: the response to the observe request
"""
request = self.mk_request(defines.Codes.GET, path)
request.token = generate_random_token(2)
request.observe = 0
for k, v in kwargs.iteritems():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def delete(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a DELETE on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.DELETE, path)
request.token = generate_random_token(2)
for k, v in kwargs.iteritems():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def post(self, path, payload, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a POST on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.POST, path)
request.token = generate_random_token(2)
request.payload = payload
for k, v in kwargs.iteritems():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def put(self, path, payload, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a PUT on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.PUT, path)
request.token = generate_random_token(2)
request.payload = payload
for k, v in kwargs.iteritems():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def discover(self, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a Discover request on the server.
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.GET, defines.DISCOVERY_URL)
request.token = generate_random_token(2)
for k, v in kwargs.iteritems():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def send_request(self, request, callback=None, timeout=None): # pragma: no cover
"""
Send a request to the remote server.
:param request: the request to send
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response (synchronous), or the token (for asynchronous callback)
"""
with self.requests_lock:
# Same requests from the same endpoint must have different tokens
# Ensure there is a unique token in case the other side issues a
# delayed response after a standalone ACK
while request.token in self.requests:
request.token = generate_random_token(2)
context = _RequestContext(request, callback)
self.requests[request.token] = context
self.protocol.send_message(request)
if callback:
# So that requester can cancel asynchronous OBSERVE
return request.token
# Wait for response
context.responded.wait(timeout)
del self.requests[request.token]
return context.response
def send_empty(self, empty): # pragma: no cover
"""
Send empty message.
:param empty: the empty message
"""
self.protocol.send_message(empty)
def mk_request(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
return request
|
<filename>src/runners.py<gh_stars>0
from tqdm import trange
from src import metrics
import numpy as np
import torch
import os
from torch.utils.tensorboard import SummaryWriter
def train(
net,
criterion,
optimizer,
lr_scheduler,
train_dataloader,
test_dataloader,
n_epochs,
device,
save_path,
save_every_n_epochs,
):
"""Trains the model on the supplied train data during n_epochs.
Args:
net (torch.nn.Module): model to train. normally defined in the models module
criterion (torch.nn.modules.loss.Module): loss function
optimizer (torch.optim): optimizer function
lr_scheduler (torch.optim.lr_scheduler): learning rate scheduler
train_dataloader (torch.utils.data.dataloader.DataLoader): training set
test_dataloader (torch.utils.data.dataloader.DataLoader): test set
n_epochs (int): number of epochs to train
device (str): 'cuda' or 'cpu'
save_path (str): folder where the checkpoints of the models will be dumped
save_every_n_epochs (int): number of epochs after checkpoint
Returns:
model (torch.nn.Module): model trained
"""
log_dir = os.path.join(save_path, "logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
sw = SummaryWriter(log_dir=log_dir)
net = net.to(device)
pb = trange(n_epochs + 1, desc="", leave=True)
# Calculate initial loss
for epoch in pb:
lr_scheduler.step()
net.train()
log_params_distribution(writer=sw, model=net, epoch=epoch)
if epoch % save_every_n_epochs == 0:
torch.save(
{
"alias": os.path.split(save_path)[-1],
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
os.path.join(save_path, f"e_{epoch:03d}.pt"),
)
# Log train metrics
metrics_dict_train = eval_epoch(
net=net,
dataloader=train_dataloader,
device=device,
metric_names=["crossentropy", "accuracy"],
)
metrics_train = [
name + ": " + str(round(value, 3))
for (name, value) in metrics_dict_train.items()
]
avg_loss = metrics_dict_train["crossentropy"]
avg_acc = metrics_dict_train["accuracy"]
sw.add_scalar("train/crossentropy", avg_loss, epoch)
sw.add_scalar("train/accuracy", avg_acc, epoch)
# Log test metrics
metrics_dict_test = eval_epoch(
net=net,
dataloader=test_dataloader,
device=device,
metric_names=["crossentropy", "accuracy"],
)
metrics_test = [
name + ": " + str(round(value, 3))
for (name, value) in metrics_dict_test.items()
]
avg_loss_test = metrics_dict_test["crossentropy"]
avg_acc_test = metrics_dict_test["accuracy"]
sw.add_scalar("test/crossentropy", avg_loss_test, epoch)
sw.add_scalar("test/accuracy", avg_acc_test, epoch)
# Build the progress bar description
pb.set_description(
f"[EPOCH: {epoch}] [LR: {round(lr_scheduler.get_last_lr()[0], 8)}] Train_{' | Train_'.join(metrics_train)} | Test_{' | Test_'.join(metrics_test)}"
)
avg_loss = train_epoch(
net=net,
criterion=criterion,
optimizer=optimizer,
dataloader=train_dataloader,
device=device,
)
return net
def log_params_distribution(writer, model, epoch):
for name, param in model.named_parameters():
writer.add_histogram(name, param, epoch)
def train_epoch(net, criterion, optimizer, dataloader, device):
avg_loss = 0
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
loss = train_step(
net=net,
criterion=criterion,
optimizer=optimizer,
inputs=inputs,
labels=labels,
)
loss = loss.cpu().item()
avg_loss += loss
avg_loss /= i + 1
return avg_loss
def eval_epoch(net, dataloader, device, metric_names=("accuracy",)):
y_true = []
y_pred = []
metrics_dict = {}
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs)
y_true.append(labels.cpu().data.numpy())
y_pred.append(outputs.cpu().data.numpy())
y_true = np.concatenate(y_true, axis=0)
y_pred = np.concatenate(y_pred, axis=0)
for metric_name in metric_names:
f = getattr(metrics, metric_name)
metrics_dict[metric_name] = f(y_true=y_true, y_pred=y_pred)
return metrics_dict
def train_step(net, criterion, optimizer, inputs, labels):
# zero the parameter gradients
net.train() # Network in train mode
optimizer.zero_grad()
# Forward prop
outputs = net(inputs)
loss = criterion(outputs, labels)
# Backward prop
loss.backward()
# Train step
optimizer.step()
return loss
|
# Copyright 2018 Amazon Research Cambridge
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import datetime
import json
from decimal import Decimal
from annotation.schema.annotations_rds import create_session
session = create_session()
users = session.execute("""
SELECT user
FROM claim
GROUP BY user
UNION
SELECT user
FROM annotation
GROUP BY user;
""")
# Manually exclude users from the reports (in case they are not currently active)
# NB: This is a duplicate of the list in oracle_eval.py (line 278)
exclude_list = ['esservis', 'flynna', 'guest', 'hjingnin',
'hokathle', 'mpearsal', 'stefom', 'chrchrs']
user_list = [user[0] for user in users if user[0] not in exclude_list]
def get_user_cube():
return ",\n".join(["sum(case user when '{0}' then total else 0 end) as '{0}'".format(user) for user in user_list])
def get_stats(name, testing):
testing = {"testing": testing}
weekly_live_breakdown = session.execute("""
SELECT
date_format(created, '%Y-%U') AS week_number,
str_to_date(concat(date_format(created, '%Y-%U'),'-',0),'%Y-%U-%w') as week_commencing,
count(*) as total
FROM claim
WHERE testing = :testing
GROUP BY week_number
ORDER BY week_number;
""", testing)
daily_live_breakdown = session.execute("""
SELECT
date_format(created, '%Y-%m-%d') as Date,
count(*) as total
FROM claim
WHERE testing = :testing
GROUP BY date
ORDER BY date;
""", testing)
user_cube = get_user_cube()
weekly_user_live_breakdown = session.execute("""
select week_number,
week_commencing,
{0}
from (
SELECT
date_format(created, '%Y-%U') AS week_number,
str_to_date(concat(date_format(created, '%Y-%U'),'-',0),'%Y-%U-%w') as week_commencing,
user,
count(*) as total
FROM claim
WHERE testing = :testing
GROUP BY week_number,user
ORDER BY week_number) as a
group by week_number
ORDER BY week_number;
""".format(user_cube), testing)
weekly_time_user_live_breakdown = session.execute("""
select week_number,
week_commencing,
{0}
from (
SELECT
date_format(created, '%Y-%U') AS week_number,
str_to_date(concat(date_format(created, '%Y-%U'),'-',0),'%Y-%U-%w') as week_commencing,
user,
sum(timeTakenToAnnotate) as total
FROM claim
WHERE testing = :testing
GROUP BY week_number,user
ORDER BY week_number) as a
group by week_number
ORDER BY week_number;
""".format(user_cube), testing)
daily_user_live_breakdown = session.execute("""
select
Date,
{0}
from (
SELECT
date_format(created, '%Y-%m-%d') as Date,
user,
count(*) as total
FROM claim
WHERE testing = :testing
GROUP BY Date,user
ORDER BY Date) as a
group by Date
ORDER BY Date;
""".format(user_cube), testing)
daily_time_user_live_breakdown = session.execute("""
select
Date,
{0}
from (
SELECT
date_format(created, '%Y-%m-%d') as Date,
user,
sum(timeTakenToAnnotate) as total
FROM claim
WHERE testing = :testing
GROUP BY Date,user
ORDER BY Date) as a
group by Date
ORDER BY Date;
""".format(user_cube), testing)
lwk, lw = coerce_all(weekly_user_live_breakdown)
ldk, ld = coerce_all(daily_user_live_breakdown)
lwtk, ltw = coerce_all(weekly_time_user_live_breakdown, time=True)
ldtk, ltd = coerce_all(daily_time_user_live_breakdown, time=True)
dbk, db = coerce_all(daily_live_breakdown)
wbk, wb = coerce_all(weekly_live_breakdown)
save_csv(name+"_weekly", lwk, lw)
save_csv(name+"_time_weekly", lwtk, ltw)
save_csv(name+"_daily", ldk, ld)
save_csv(name+"_time_daily", ldtk, ltd)
save_csv(name+"_daily_totals", dbk, db)
save_csv(name+"_weekly_totals", wbk, wb)
def get_stats_wf2(name, testing):
testing = {"testing": testing}
weekly_live_breakdown = session.execute("""
SELECT
date_format(created, '%Y-%U') AS week_number,
str_to_date(concat(date_format(created, '%Y-%U'),'-',0),'%Y-%U-%w') as week_commencing,
count(*) as total
FROM annotation
WHERE isTestMode = :testing
GROUP BY week_number
ORDER BY week_number;
""", testing)
daily_live_breakdown = session.execute("""
SELECT
date_format(created, '%Y-%m-%d') as Date,
count(*) as total
FROM annotation
WHERE isTestMode = :testing
GROUP BY date
ORDER BY date;
""", testing)
user_cube = get_user_cube()
weekly_user_live_breakdown = session.execute("""
select week_number,
week_commencing,
{0}
from (
SELECT
date_format(created, '%Y-%U') AS week_number,
str_to_date(concat(date_format(created, '%Y-%U'),'-',0),'%Y-%U-%w') as week_commencing,
user,
count(*) as total
FROM annotation
WHERE isTestMode = :testing
GROUP BY week_number,user
ORDER BY week_number) as a
group by week_number
ORDER BY week_number;
""".format(user_cube), testing)
weekly_time_user_live_breakdown = session.execute("""
select week_number,
week_commencing,
{0}
from (
SELECT
date_format(created, '%Y-%U') AS week_number,
str_to_date(concat(date_format(created, '%Y-%U'),'-',0),'%Y-%U-%w') as week_commencing,
user,
sum(timeTakenToAnnotate) as total
FROM annotation
WHERE isTestMode = :testing
GROUP BY week_number,user
ORDER BY week_number) as a
group by week_number
ORDER BY week_number;
""".format(user_cube), testing)
daily_user_live_breakdown = session.execute("""
select
Date,
{0}
from (
SELECT
date_format(created, '%Y-%m-%d') as Date,
user,
count(*) as total
FROM annotation
WHERE isTestMode = :testing
GROUP BY Date,user
ORDER BY Date) as a
group by Date
ORDER BY Date;
""".format(user_cube), testing)
daily_time_user_live_breakdown = session.execute("""
select
Date,
{0}
from (
SELECT
date_format(created, '%Y-%m-%d') as Date,
user,
sum(timeTakenToAnnotate) as total
FROM annotation
WHERE isTestMode = :testing
GROUP BY Date,user
ORDER BY Date) as a
group by Date
ORDER BY Date;
""".format(user_cube), testing)
flagged_claims = session.execute("""
SELECT claim_id, user
FROM annotation
WHERE isTestMode = :testing
AND annotation.verifiable < 0
""", testing)
save_html(name+"_flagged_claims", flagged_claims)
lwk, lw = coerce_all(weekly_user_live_breakdown)
ldk, ld = coerce_all(daily_user_live_breakdown)
lwtk, ltw = coerce_all(weekly_time_user_live_breakdown, time=True)
ldtk, ltd = coerce_all(daily_time_user_live_breakdown, time=True)
dbk, db = coerce_all(daily_live_breakdown)
wbk, wb = coerce_all(weekly_live_breakdown)
save_csv(name+"_weekly_wf2", lwk, lw)
save_csv(name+"_time_weekly_wf2", lwtk, ltw)
save_csv(name+"_daily_wf2", ldk, ld)
save_csv(name+"_time_daily_wf2", ldtk, ltd)
save_csv(name+"_daily_totals_wf2", dbk, db)
save_csv(name+"_weekly_totals_wf2", wbk, wb)
def coerce(line, is_time_spent):
newline = []
for item in line:
if type(item) == Decimal:
if is_time_spent:
newline.append(convert(item, add_days=False, to_hours_dec=True))
else:
newline.append(str(item))
elif type(item) == datetime.date:
newline.append(str(item))
else:
newline.append(item)
return newline
def coerce_all(records, time=False):
all_lines = []
for line in records:
all_lines.append(coerce(line, time))
return records.keys(), all_lines
def save_csv(filename, header, report_lines):
if not os.path.exists('data/reports'):
os.mkdir('data/reports')
with open("data/reports/"+filename+".csv", "w+") as f:
writer = csv.writer(f)
writer.writerow(header)
for line in report_lines:
writer.writerow(line)
def save_html(filename, flagged_claims):
with open('data/reports/'+filename+'.html', 'w', encoding='utf-8') as f:
f.write('<!DOCTYPE html>\n<html>\n')
f.write('<body style="margin-left: 2%;">\n')
for claim in flagged_claims:
claim_link = 'https://fever-annotate.corp.amazon.com/#!/label-claims/%d' % claim[0]
user = claim[1]
f.write('%s: <a href=%s>%s</a><br/>\n' % (user, claim_link, claim_link))
f.write('\n</body>')
f.write('\n</html>')
def done_today(testing):
testing = {"testing": testing}
qry = """
SELECT
date_format(created, '%Y-%m-%d') as Date,
count(*) as total
FROM claim
WHERE testing = :testing
AND date_format(created, '%Y-%m-%d') = date_format(NOW(), '%Y-%m-%d')
GROUP BY date
order by Date
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
return res.first()[1]
def convert(sec_dec, add_days=True, to_hours_dec=False):
if not sec_dec:
return 'N/A'
if sec_dec == 0:
return '0'
sec = int(sec_dec)
if to_hours_dec:
return "%.2f" % (float(sec)/60/60)
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if add_days:
time_spent_str = "%d days; %d hours, %d minutes" % (d, h, m)
else:
time_spent_str = "%d hours, %d minutes" % (h, m)
return time_spent_str
def time_today(testing):
testing = {"testing": testing}
qry = """
SELECT
date_format(created, '%Y-%m-%d') as Date,
sum(timeTakenToAnnotate) as total
FROM claim
WHERE testing = :testing
AND date_format(created, '%Y-%m-%d') = date_format(NOW(), '%Y-%m-%d')
GROUP BY date
order by Date
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
# Return time spent in day,hours,minutes
seconds_spent = res.first()[1]
return convert(seconds_spent)
def done_week(testing, offset):
wk = datetime.datetime.utcnow() + datetime.timedelta(weeks=offset)
print(wk.strftime("%Y-%U"))
testing = {"testing": testing, "dwk": wk.strftime("%Y-%U")}
qry = """
SELECT
count(*) as total
FROM claim
WHERE testing = :testing
AND date_format(created, '%Y-%U') = :dwk
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
return res.first()[0]
def time_week(testing, offset):
wk = datetime.datetime.utcnow() + datetime.timedelta(weeks=offset)
print(wk.strftime("%Y-%U"))
testing = {"testing": testing, "dwk": wk.strftime("%Y-%U")}
qry = """
SELECT
sum(timeTakenToAnnotate) as total
FROM claim
WHERE testing = :testing
AND date_format(created, '%Y-%U') = :dwk
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
# Return time spent in hours,minutes,seconds
seconds_spent = res.first()[0]
return convert(seconds_spent)
def done(testing):
testing = {"testing": testing}
qry = """
SELECT
count(*) as total
FROM claim
WHERE testing = :testing
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
return res.first()[0]
def done_today_wf2(testing):
testing = {"testing": testing}
qry = """
SELECT
date_format(created, '%Y-%m-%d') as Date,
count(*) as total
FROM annotation
WHERE isTestMode = :testing
AND date_format(created, '%Y-%m-%d') = date_format(NOW(), '%Y-%m-%d')
GROUP BY date
order by Date
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
return res.first()[1]
def total_left_reval_wf2():
qry = """
SELECT count(done)
FROM (SELECT count(annotation.id) AS done
FROM claim
LEFT JOIN annotation ON annotation.claim_id = claim.id
AND annotation.isForReportingOnly = 0
AND annotation.isTestMode = 0
WHERE claim.isReval = 1
AND claim.isOracle = 0
AND claim.testing = 0
GROUP BY claim.id
HAVING done = :num) AS cad;
"""
num4_left = session.execute(qry, {"num": 1}).first()[0] * 4
num3_left = session.execute(qry, {"num": 2}).first()[0] * 3
num2_left = session.execute(qry, {"num": 3}).first()[0] * 2
num1_left = session.execute(qry, {"num": 4}).first()[0]
qry = """
SELECT count(done)
FROM (SELECT count(annotation.id) AS done
FROM claim
LEFT JOIN annotation ON annotation.claim_id = claim.id
AND annotation.isForReportingOnly = 0
AND annotation.isTestMode = 0
WHERE claim.isReval = 0
AND claim.isOracle = 0
AND claim.testing = 0
GROUP BY claim.id
HAVING done = :num) AS cad;
"""
num_noreval_left = session.execute(qry, {"num": 0}).first()[0]
return num_noreval_left + num1_left + num2_left + num3_left + num4_left
def time_today_wf2(testing):
testing = {"testing": testing}
qry = """
SELECT
date_format(created, '%Y-%m-%d') as Date,
sum(timeTakenToAnnotate) as total
FROM annotation
WHERE isTestMode = :testing
AND date_format(created, '%Y-%m-%d') = date_format(NOW(), '%Y-%m-%d')
GROUP BY date
order by Date
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
# Return time spent in hours,minutes,seconds
seconds_spent = res.first()[1]
return convert(seconds_spent)
def done_week_wf2(testing, offset):
wk = datetime.datetime.utcnow() + datetime.timedelta(weeks=offset)
print(wk.strftime("%Y-%U"))
testing = {"testing": testing, "dwk": wk.strftime("%Y-%U")}
qry = """
SELECT
count(*) as total
FROM annotation
WHERE isTestMode = :testing
AND date_format(created, '%Y-%U') = :dwk
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
return res.first()[0]
def time_week_wf2(testing, offset):
wk = datetime.datetime.utcnow() + datetime.timedelta(weeks=offset)
print(wk.strftime("%Y-%U"))
testing = {"testing": testing, "dwk": wk.strftime("%Y-%U")}
qry = """
SELECT
sum(timeTakenToAnnotate) as total
FROM annotation
WHERE isTestMode = :testing
AND date_format(created, '%Y-%U') = :dwk
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
# Return time spent in hours,minutes,seconds
seconds_spent = res.first()[0]
return convert(seconds_spent)
def done_wf2(testing):
testing = {"testing": testing}
qry = """
SELECT
count(*) as total
FROM annotation
WHERE isTestMode = :testing
"""
res = session.execute(qry, testing)
if res.rowcount == 0:
return 0
else:
return res.first()[0]
get_stats("live", 0)
get_stats("sandbox", 1)
get_stats_wf2("live", 0)
get_stats_wf2("sandbox", 1)
report = {"live_count": done(0),
"sandbox_count": done(1),
"last_run": str(datetime.datetime.now()),
"done_today": done_today(0),
"time_today": time_today(0),
"done_this_week": done_week(0, 0),
"time_this_week": time_week(0, 0),
"done_last_week": done_week(0, -1),
"time_last_week": time_week(0, -1),
}
json.dump(report, open("data/state.json", "w+"))
report = {"live_count": done_wf2(0),
"sandbox_count": done_wf2(1),
"last_run": str(datetime.datetime.now()),
"done_today": done_today_wf2(0),
"time_today": time_today_wf2(0),
"total_left": total_left_reval_wf2(),
"done_this_week": done_week_wf2(0, 0),
"time_this_week": time_week_wf2(0, 0),
"done_last_week": done_week_wf2(0, -1),
"time_last_week": time_week_wf2(0, -1),
}
json.dump(report, open("data/state_wf2.json", "w+"))
|
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
import threading
from openvisualizer.bspemulator.bspmodule import BspModule
class BspUart(BspModule):
""" Emulates the 'uart' BSP module """
_name = 'BspUart'
INTR_TX = 'uart.tx'
INTR_RX = 'uart.rx'
BAUDRATE = 115200
XOFF = 0x13
XON = 0x11
XONXOFF_ESCAPE = 0x12
XONXOFF_MASK = 0x10
def __init__(self, motehandler):
# initialize the parent
super(BspUart, self).__init__(motehandler)
# local variables
self.timeline = self.engine.timeline
self.interrupts_enabled = False
self.tx_interrupt_flag = False
self.rx_interrupt_flag = False
self.uart_rx_buffer = []
self.uart_rx_buffer_sem = threading.Semaphore()
self.uart_rx_buffer_sem.acquire()
self.uart_rx_buffer_lock = threading.Lock()
self.uart_tx_buffer = [] # the bytes to be sent over UART
self.uart_tx_next = None # the byte that was just signaled to mote
self.uart_tx_buffer_lock = threading.Lock()
self.wait_for_done_reading = threading.Lock()
self.wait_for_done_reading.acquire()
self.f_xon_xoff_escaping = False
self.xon_xoff_escaped_byte = 0
# ======================== public ==========================================
# === interact with UART
def read(self):
""" Read a byte from the mote. """
# wait for something to appear in the RX buffer
self.uart_rx_buffer_sem.acquire()
# copy uart_rx_buffer
with self.uart_rx_buffer_lock:
assert len(self.uart_rx_buffer) > 0
return_val = [chr(b) for b in self.uart_rx_buffer]
self.uart_rx_buffer = []
# return that element
return return_val
def write(self, bytes_to_write):
""" Write a string of bytes to the mote. """
assert len(bytes_to_write)
if len(self.uart_tx_buffer) != 0:
return 0
with self.uart_tx_buffer_lock:
self.uart_tx_buffer = [ord(b) for b in bytes_to_write]
self.engine.pause()
self._schedule_next_tx()
self.engine.resume()
return len(bytes_to_write)
def done_reading(self):
self.wait_for_done_reading.release()
# === commands
def cmd_init(self):
""" Emulates: void uart_init() """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_init')
# remember that module has been intialized
self.is_initialized = True
def cmd_enable_interrupts(self):
""" Emulates: void uart_enableInterrupts() """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_enable_interrupts')
# update variables
self.interrupts_enabled = True
def cmd_disable_interrupts(self):
""" Emulates: void cmd_disable_interrupts() """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_disableInterrupts')
# update variables
self.interrupts_enabled = False
def cmd_clear_rx_interrupts(self):
""" Emulates: void uart_clearRxInterrupts() """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_clear_rx_interrupts')
# update variables
self.rx_interrupt_flag = False
def cmd_clear_tx_interrupts(self):
""" Emulates: void uart_clearTxInterrupts() """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_clear_tx_interrupts')
# update variables
self.tx_interrupt_flag = False
def cmd_write_byte(self, byte_to_write):
""" Emulates: void uart_writeByte(uint8_t byte_to_write) """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_write_byte byte_to_write=' + str(byte_to_write))
# set tx interrupt flag
self.tx_interrupt_flag = True
# calculate the time at which the byte will have been sent
done_sending_time = self.timeline.get_current_time() + float(1.0 / float(self.BAUDRATE))
# schedule uart TX interrupt in 1/BAUDRATE seconds
self.timeline.schedule_event(done_sending_time, self.motehandler.get_id(), self.intr_tx, self.INTR_TX)
if byte_to_write == self.XON or byte_to_write == self.XOFF or byte_to_write == self.XONXOFF_ESCAPE:
self.f_xon_xoff_escaping = True
self.xon_xoff_escaped_byte = byte_to_write
# add to receive buffer
with self.uart_rx_buffer_lock:
self.uart_rx_buffer += [self.XONXOFF_ESCAPE]
else:
# add to receive buffer
with self.uart_rx_buffer_lock:
self.uart_rx_buffer += [byte_to_write]
# release the semaphore indicating there is something in RX buffer
self.uart_rx_buffer_sem.release()
# wait for the moteProbe to be done reading
self.wait_for_done_reading.acquire()
def cmd_set_cts(self, state):
""" Emulates: void uart_setCTS(bool state) """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_set_cts state=' + str(state))
# set tx interrupt flag
self.tx_interrupt_flag = True
# calculate the time at which the byte will have been sent
done_sending_time = self.timeline.get_current_time() + float(1.0 / float(self.BAUDRATE))
# schedule uart TX interrupt in 1/BAUDRATE seconds
self.timeline.schedule_event(done_sending_time, self.motehandler.get_id(), self.intr_tx, self.INTR_TX)
# add to receive buffer
with self.uart_rx_buffer_lock:
if state:
self.uart_rx_buffer += [self.XON]
else:
self.uart_rx_buffer += [self.XOFF]
# release the semaphore indicating there is something in RX buffer
self.uart_rx_buffer_sem.release()
# wait for the moteProbe to be done reading
self.wait_for_done_reading.acquire()
def cmd_write_circular_buffer_fastsim(self, buf):
""" Emulates: void uart_writeCircularBuffer_FASTSIM(uint8_t* buffer, uint8_t len) """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_write_circular_buffer_fastsim buffer=' + str(buf))
self._write_buffer(buf)
def uart_write_buffer_by_len_fastsim(self, buf):
""" Emulates: void uart_writeBufferByLen_FASTSIM(uint8_t* buffer, uint8_t len) """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('uart_write_buffer_by_len_fastsim buffer=' + str(buf))
self._write_buffer(buf)
def _write_buffer(self, buf):
# set tx interrupt flag
self.tx_interrupt_flag = True
# calculate the time at which the buffer will have been sent
done_sending_time = self.timeline.get_current_time() + float(float(len(buf)) / float(self.BAUDRATE))
# schedule uart TX interrupt in len(buffer)/BAUDRATE seconds
self.timeline.schedule_event(done_sending_time, self.motehandler.get_id(), self.intr_tx, self.INTR_TX)
# add to receive buffer
with self.uart_rx_buffer_lock:
i = 0
while i != len(buf):
if buf[i] == self.XON or buf[i] == self.XOFF or buf[i] == self.XONXOFF_ESCAPE:
new_item = (self.XONXOFF_ESCAPE, buf[i] ^ self.XONXOFF_MASK)
buf[i:i + 1] = new_item
i += 1
self.uart_rx_buffer += buf
# release the semaphore indicating there is something in RX buffer
self.uart_rx_buffer_sem.release()
# wait for the moteProbe to be done reading
self.wait_for_done_reading.acquire()
def cmd_read_byte(self):
""" Emulates: uint8_t uart_readByte()"""
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('cmd_read_byte')
# retrieve the byte last sent
with self.uart_tx_buffer_lock:
return self.uart_tx_next
# ======================== interrupts ======================================
def intr_tx(self):
""" Mote is done sending a byte over the UART. """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('intr_tx')
if self.f_xon_xoff_escaping:
self.f_xon_xoff_escaping = False
# set tx interrupt flag
self.tx_interrupt_flag = True
# calculate the time at which the byte will have been sent
done_sending_time = self.timeline.get_current_time() + float(1.0 / float(self.BAUDRATE))
# schedule uart TX interrupt in 1/BAUDRATE seconds
self.timeline.schedule_event(done_sending_time, self.motehandler.get_id(), self.intr_tx, self.INTR_TX)
# add to receive buffer
with self.uart_rx_buffer_lock:
self.uart_rx_buffer += [self.xon_xoff_escaped_byte ^ self.XONXOFF_MASK]
# release the semaphore indicating there is something in RX buffer
self.uart_rx_buffer_sem.release()
# wait for the moteProbe to be done reading
self.wait_for_done_reading.acquire()
else:
# send interrupt to mote
self.motehandler.mote.uart_isr_tx()
# do *not* kick the scheduler
return False
def intr_rx(self):
""" Interrupt to indicate to mote it received a byte from the UART. """
# log the activity
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('intr_rx')
with self.uart_tx_buffer_lock:
# make sure there is a byte to TX
assert len(self.uart_tx_buffer)
# get the byte that is being transmitted
self.uart_tx_next = self.uart_tx_buffer.pop(0)
# schedule the next interrupt, if any bytes left
if len(self.uart_tx_buffer):
self._schedule_next_tx()
# send RX interrupt to mote
self.motehandler.mote.uart_isr_rx()
# do *not* kick the scheduler
return False
# ======================== private =========================================
def _schedule_next_tx(self):
# calculate time at which byte will get out
time_next_tx = self.timeline.get_current_time() + float(1.0 / float(self.BAUDRATE))
# schedule that event
self.timeline.schedule_event(
time_next_tx,
self.motehandler.get_id(),
self.intr_rx,
self.INTR_RX,
)
|
from django.db import models
from django.utils import timezone
from users.models import User
from teams.models import Team
from datetime import datetime, timedelta
class ContestManager(models.Manager):
"""
Helper method used to fetch all unstarted, active or past contests.
"""
def unstarted(self):
unstarted_contests = set()
for contest in super(ContestManager, self).get_queryset():
if contest.contest_start is None:
unstarted_contests.add(contest)
return unstarted_contests
def active(self):
active_contests = set()
for contest in super(ContestManager, self).get_queryset():
if contest.contest_start is not None and contest.contest_end() > timezone.now():
active_contests.add(contest)
return active_contests
def past(self):
past_contests = set()
for contest in super(ContestManager, self).get_queryset():
if contest.contest_start is not None and contest.contest_end() <= timezone.now():
past_contests.add(contest)
return past_contests
class Contest(models.Model):
"""
A Contest object has any number of Problem objects associated with it as well as the fields below
"""
title = models.CharField(max_length=128)
date_created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE, related_name="contest_creator")
languages = models.CharField(max_length=64)
contest_length = models.TimeField(null=True, blank=True)
contest_start = models.DateTimeField(null=True, blank=True)
time_penalty = models.CharField(max_length=20, null=True, blank=True)
autojudge_enabled = models.BooleanField(max_length=1, default=False)
autojudge_review = models.CharField(max_length=128, null=True, blank=True)
problem_description = models.FileField(upload_to='uploads/', null=True, blank=True)
contest_admins = models.ManyToManyField(User, related_name="contest_admins", blank=True)
contest_participants = models.ManyToManyField(Team, related_name="contest_participants", blank=True)
objects = ContestManager()
def contest_end(self):
if self.contest_start is None:
return datetime.max.replace(tzinfo=timezone.utc)
return self.contest_start + timedelta(seconds=self.contest_length.hour*3600+self.contest_length.minute*60)
'''def time_remaining(self):
if self.contest_start is None:
return "Not started"
seconds = (self.contest_end() - timezone.now()).seconds
return "%d:%02d:%02d remaining"%(seconds//3600, seconds%3600//60, seconds%60)'''
def __str__(self):
return self.title
class Problem(models.Model):
"""
A Problem object has any number of ProblemInput and ProblemSolution objects associated with it as well as the fields below
"""
name = models.CharField(max_length=2048, null=True, blank=True)
input_description = models.TextField(null=True, blank=True)
output_description = models.TextField(null=True, blank=True)
sample_input = models.FileField(upload_to='uploads/', null=True, blank=True)
sample_output = models.FileField(upload_to='uploads/', null=True, blank=True)
contest = models.ForeignKey(Contest, null=True, blank=True, on_delete=models.CASCADE)
timeout = models.IntegerField(default=5, blank=True)
class ProblemInput(models.Model):
problem = models.ForeignKey(Problem, related_name = 'problem_input')
program_input = models.FileField(upload_to='uploads/', null=True, blank=False)
class ProblemSolution(models.Model):
problem = models.ForeignKey(Problem, related_name = 'problem_solution')
solution = models.FileField(upload_to='uploads/', null=True, blank=True)
class Participant(models.Model):
"""
A Participant object is created when a team accept the invitation to participate in a contest.
"""
contest = models.ForeignKey(Contest, null=True, blank=True, on_delete=models.CASCADE)
team = models.ForeignKey(Team, null=True, blank=True, on_delete=models.CASCADE)
score = models.IntegerField
class Submission(models.Model):
run_id = models.IntegerField(default=0) # run_id is not the primary key of submission.
# It should be unique within a contest, count start from 1 and increment
# as later submission comes in.
team = models.ForeignKey(Team, null = True)
problem = models.ForeignKey(Problem, on_delete=models.CASCADE, null=True)
code_file = models.FileField(upload_to='uploads/', null=True, blank=True)
timestamp = models.DateTimeField(auto_now=True)
original_filename = models.CharField(max_length=128, null=True, blank=True)
JUDGE_RESULT = (
('YES', 'Yes'),
('WRONG', 'Wrong Answer'),
('OFE', 'Output Format Error'),
('IE', 'Incomplete Error'),
('EO', 'Excessive Output'),
('CE', 'Compilation Error'),
('RTE', 'Run-Time Error'),
('TLE', 'Time-Limit Exceeded'),
('OTHER', 'Other-Contact Staff'),
)
SUBMISSION_STATE_CHOICES = (
('NEW', 'New'),
('YES', 'Yes'),
('NO', 'No'),
)
state = models.CharField(max_length=20, choices=SUBMISSION_STATE_CHOICES, default='NEW')
result = models.CharField(max_length=20, choices=JUDGE_RESULT, null=True)
def __str__(self):
return str(self.run_id)
class ContestTemplate(models.Model):
title = models.CharField(max_length=128)
creator = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE, related_name="contesttemplate_creator")
languages = models.CharField(max_length=64)
contest_length = models.TimeField(null=True, blank=True)
time_penalty = models.CharField(max_length=20, null=True, blank=True)
autojudge_enabled = models.BooleanField(max_length=1, default=False)
autojudge_review = models.CharField(max_length=128, null=True, blank=True)
contest_admins = models.ManyToManyField(User, related_name="contesttemplate_admins", blank=True)
contest_participants = models.ManyToManyField(Team, related_name="contesttemplate_participants", blank=True)
def __str__(self):
return self.title
class Notification(models.Model):
"""
A Notification object is created when judge return the result of a submission to participant.
It is deleted when participant close the notification.
"""
submission = models.ForeignKey(Submission, on_delete=models.CASCADE)
class ContestInvite(models.Model):
"""
A ContestInvite object is created when contest admin add a team as participant of a contest.
It is deleted when team accept or decline the invitation.
"""
contest = models.ForeignKey(Contest)
team = models.ForeignKey(Team)
|
#! /bin/env python
"""
This script will return the paths that distutils will use for installing
a package. To use this script, execute it the same way that you would
execute setup.py, but instead of providing 'install' or 'build' as the
command, specify 'purelib' or 'platlib' and the corresponding path
will be printed. The 'purelib' command will print the install location
for .py files, while the 'platlib' command will print the install location
of binary modules (.so or .dll).
Written by <NAME>, Feb 25, 2006.
"""
import string
import sys
import os
def get_install_path(command, *args):
"""Return the module install path, given the arguments that were
provided to setup.py. The paths that you can request are 'purelib'
for the .py installation directory and 'platlib' for the binary
module installation directory.
"""
# convert setup args into an option dictionary
options = {}
for arg in args:
if arg == '--':
break
if arg[0:2] == "--":
try:
option, value = string.split(arg,"=")
options[option] = value
except ValueError:
options[option] = 1
# check for the prefix and exec_prefix
try:
prefix = options["--prefix"]
except KeyError:
prefix = None
try:
exec_prefix = options["--exec-prefix"]
except KeyError:
exec_prefix = prefix
# if prefix or exec_prefix aren't set, use default system values
if prefix == None:
prefix = sys.prefix
if exec_prefix == None:
exec_prefix = sys.exec_prefix
# replace backslashes with slashes
if os.name != 'posix':
prefix = string.replace(prefix, os.sep, "/")
exec_prefix = string.replace(exec_prefix, os.sep, "/")
# get rid of trailing separator
if prefix != "" and prefix[-1] == "/":
prefix = prefix[0:-1]
if exec_prefix != "" and exec_prefix[-1] == "/":
exec_prefix = exec_prefix[0:-1]
# check for "home" install scheme
try:
home = options["--home"]
if os.name != 'posix':
home = string.replace(home, os.sep, "/")
if home != "" and home[-1] == "/":
home = home[0:-1]
except KeyError:
home = None
# apply "home" install scheme, but not for Windows with python < 2.4
# (distutils didn't allow home scheme for windows until 2.4)
if home != None and not (os.name != 'posix' and sys.version < '2.4'):
purelib = home+'/lib/python'
platlib = home+'/lib/python'
scripts = home+'/bin'
data = home
elif os.name == 'posix':
ver = sys.version[0:3]
purelib = prefix+'/lib/python'+ver+'/site-packages'
platlib = exec_prefix+'/lib/python'+ver+'/site-packages'
scripts = prefix+'/bin'
data = prefix
elif sys.version < '2.2':
purelib = prefix
platlib = prefix
scripts = prefix+'/Scripts'
data = prefix
else:
purelib = prefix+'/Lib/site-packages'
platlib = prefix+'/Lib/site-packages'
scripts = prefix+'/Scripts'
data = prefix
# allow direct setting of install directories
try:
purelib = options["--install-purelib"]
except KeyError:
pass
try:
platlib = options["--install-platlib"]
except KeyError:
pass
try:
scripts = options["--install-scripts"]
except KeyError:
pass
try:
data = options["--install-data"]
except KeyError:
pass
# return the information that was asked for
if command == 'purelib':
return purelib
elif command == 'platlib':
return platlib
elif command == 'scripts':
return scripts
elif command == 'data':
return data
if __name__ == "__main__":
print apply(get_install_path, sys.argv[1:])
|
#!/usr/bin/env python
# coding: utf-8
# # >>>>>>>>>>>>>>>>>>>>Tarea número 3 <<<<<<<<<<<<<<<<<<<<<<<<
# # Estudiante: <NAME>
# # Ejercicio #1
# In[2]:
import os
import pandas as pd
import numpy as np
from math import pi
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, ward, single, complete,average,linkage, fcluster
import scipy.cluster.hierarchy as sch
from scipy.spatial.distance import pdist
from sklearn.preprocessing import StandardScaler
# In[3]:
# Función para calcular los centroides de cada cluster¶
def centroide(num_cluster, datos, clusters):
ind = clusters == num_cluster
return(pd.DataFrame(datos[ind].mean()).T)
# In[4]:
# Función para graficar los gráficos de Barras para la interpretación de clústeres
def bar_plot(centros, labels, cluster = None, var = None):
from math import ceil, floor
from seaborn import color_palette
colores = color_palette()
minimo = floor(centros.min()) if floor(centros.min()) < 0 else 0
def inside_plot(valores, labels, titulo):
plt.barh(range(len(valores)), valores, 1/1.5, color = colores)
plt.xlim(minimo, ceil(centros.max()))
plt.title(titulo)
if var is not None:
centros = np.array([n[[x in var for x in labels]] for n in centros])
colores = [colores[x % len(colores)] for x, i in enumerate(labels) if i in var]
labels = labels[[x in var for x in labels]]
if cluster is None:
for i in range(centros.shape[0]):
plt.subplot(1, centros.shape[0], i + 1)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if i == 0 else plt.yticks([])
else:
pos = 1
for i in cluster:
plt.subplot(1, len(cluster), pos)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if pos == 1 else plt.yticks([])
pos += 1
# In[5]:
# Función para graficar los gráficos tipo Radar para la interpretación de clústeres
def radar_plot(centros, labels):
from math import pi
centros = np.array([((n - min(n)) / (max(n) - min(n)) * 100) if
max(n) != min(n) else (n/n * 50) for n in centros.T])
angulos = [n / float(len(labels)) * 2 * pi for n in range(len(labels))]
angulos += angulos[:1]
ax = plt.subplot(111, polar = True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
plt.xticks(angulos[:-1], labels)
ax.set_rlabel_position(0)
plt.yticks([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
["10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"],
color = "grey", size = 8)
plt.ylim(-10, 100)
for i in range(centros.shape[1]):
valores = centros[:, i].tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth = 1, linestyle = 'solid',
label = 'Cluster ' + str(i))
ax.fill(angulos, valores, alpha = 0.3)
plt.legend(loc='upper right', bbox_to_anchor = (0.1, 0.1))
# ### a) Cargue la tabla de datos SpotifyTop2018 40 V2.csv
# In[7]:
os.chdir("/Users/heinerleivagmail.com")
print(os.getcwd())
data = pd.read_csv('SpotifyTop2018_40_V2.csv',delimiter=',',decimal=".")
print(data)
# In[8]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(data)
data.loc[:,:] = scaled_values
print(data)
datos = data
# In[9]:
ward_res = ward(datos) #Ward
single_res = single(datos) #Salto mínimo
complete_res = complete(datos) #Salto Máxim
average_res = average(datos) #Promedio
# ### b) Ejecute un Clustering Jerarquico con la agregacion del Salto Maximo, Salto Mınimo, Promedio y Ward. Grafique el dendograma con cortes para dos y tres clusteres.
# In[10]:
dendrogram(average_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(complete_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(single_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(ward_res,labels= datos.index.tolist())
# Agrega cortes con 2 y 3 clústeres con agregación de Ward
ax = plt.gca()
limites = ax.get_xbound()
ax.plot(limites, [11, 11], '--', c='k')
ax.plot(limites, [9.4, 9.4], '--', c='k')
ax.text(limites[1], 11, ' dos clústeres', va='center', fontdict={'size': 15})
ax.text(limites[1], 9.4, ' tres clústeres', va='center', fontdict={'size': 15})
plt.xlabel("Orden en el eje X./nPor hacer la normalizacion de los datos el cluster 3 quedo muy cerca del 2")
plt.ylabel("Distancia o Agregación")
# ### c) Usando tres clusteres interprete los resultados del ejercicio anterior para el caso de agregacion de Ward usando graficos de barras y graficos tipo Radar.
# In[11]:
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (20, 8))
bar_plot(centros, datos.columns)
# In[12]:
# Interpretación 3 Clústeres - Gráfico Radar plot con Ward
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# ### Interpretacion
# In[31]:
# Analisis:
# Cluster 1 (azul), este cluster se caracteriza por tener los niveles mas altos (100) en accousticness, es decir, las
# canciones en este cluster son las mas acusticas, tambien, tiene el mayor speechiness, es decir, hay muchas palabras
# en las canciones que estan en este cluster, ademas cuenta con el mayor numero en liveness (es decir hay publico en
# la cancion), tambien tiene los niveles mas altos de valence (mucha postitividad en las canciones), el time_signature
# que representa la cantidad de beats que hay en cada barra de medida y por ultimo danceability, que son las canciones
# que tienen mayor potencial para ser bailable, a modo general en este cluster se agrupan las canciones quee son mas
# positivas, mas aptas para bailar, con mayor sonido, mayor presencia de publico, es decir, son las canciones mas "alegres",
# por otro lado este cluster se caracteriza por tener canciones que tienen niveles 0 de instrumentalidad, su duracion en
# milisegundos es baja, su energy es moderada baja al igual que su loudness, es decir su sonoridad en la pista es baja.
# Cluster 2 (naranja): este se representa por tener las canciones que tienen mayor duracion en milisegundos, asi como
# las canciones que se encuentran en este cluster cuentan con tempo son las que tienen mayores beats por minuto (variable
# tempo). Ademas su acousticness es moderado, es decir estas canciones presentan algo de acustica y su speechiness, que
# es la presencia de palabras en las canciones tiende a ser bajo. En las demas variables este cluster presenta bajos niveles
# entonces se puede decir que este cluster se caracteriza por tener las canciones con mayor duracion, con mas beats por
# minuto y son canciones que combinan acustica y letras en sus estrofas.
#Cluster 3 (verde): en este caso las canciones que pertenecen a este cluster se caracterizan por tener los mas altos
# beats por minuto, presentan mucha instrumentalidad, su time_signature es alto, lo que representa altos beats en cada
# barra o medida, su intensidad es bastante alta (energy) y su sonoridad en decibiles tambien es bastante alta. Las
# canciones en este grupo se caracterizan por altamente instrumentales con nula cantidad de voces en sus piezas, y son
# canciones bastante intensas y con los beats mas altos por minuto, son canciones que son relativamente bailables, y su
# positividad musical es moderada y no presenta publico en sus piezas. Son canciones por asi decirlo, meramente instrumen-
# tales con poco o nulo registro de voz por parte de tun cantante.
# ### d) Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun la clasificacion Jerarquica (usando tres clusteres).
# In[13]:
# Importando datos
campo = pd.read_csv('SpotifyTop2018_40_V2.csv',delimiter=',',decimal=".")
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(campo)
campo.loc[:,:] = scaled_values
datosx = campo
#Asignando variables
cal = datosx.iloc[:,[0,1,2,3,4,5,6,7,8,9,10]].values
# In[14]:
# Definiendo parametros de dendrograma
clustering_jerarquico = linkage(cal, 'ward')
# In[15]:
# Ploteando dendrograma
dendrogram = sch.dendrogram(clustering_jerarquico)
# In[16]:
# Asignando cluster a cada variable
clusters = fcluster(clustering_jerarquico, t=9.4, criterion = 'distance') #t corresponde al corte para obtener los 3
# clusters
clusters
# In[17]:
# Creando clusters en cada fila
datosx['target'] = clusters
# In[18]:
# Guardando nueva variable generada
campo.to_csv("/Users/heinerleivagmail.com/SpotifyTop2018_40_V3.csv")
# In[19]:
# Llamando DF creado con la asignacion de cada cluster (tabla ya esta normalizada)
df = pd.read_csv('SpotifyTop2018_40_V3.csv',delimiter=',',decimal=".")
# Separando variables numericas
x = df.iloc[:,[0,1,2,3,4,5,6,7,8,9,10]].values
# Separando los clusters obtenidos
y = df.iloc[:,[11]].values
# In[20]:
# Definiendo parametros del nuevo PCA a partir del Dendrograma
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(datosx)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['Componente 0', 'Componente 1'])
finalDf = pd.concat([principalDf, df.iloc[:,[12]]], axis = 1)
finalDf.head(10)
# In[21]:
# Definicion de la estructura del PCA con colores respectivos
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Componente 0', fontsize = 15)
ax.set_ylabel('Componente 1', fontsize = 15)
ax.set_title('Plano Principal', fontsize = 20)
targets = [1, 2, 3]
colors = ['g', 'r', 'b']
for target, color in zip(targets,colors):
indicesToKeep = finalDf['target'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'Componente 0']
, finalDf.loc[indicesToKeep, 'Componente 1']
, c = color
, s = 50)
ax.legend(targets)
ax.grid()
# # Ejercicio 2
# ### a) Efectue un Clustering Jerarquico usando solo las variables numericas y de una interpretacion usando 3 clusteres.
# In[6]:
os.chdir("/Users/heinerleivagmail.com")
print(os.getcwd())
corazon = pd.read_csv('SAheart.csv',delimiter=';',decimal=".")
print(corazon.head())
print(corazon.shape)
# In[7]:
corazon2 = pd.DataFrame(data=corazon, columns=['sbp', 'tobacco', 'ldl',
'adiposity','typea','obesity','alcohol','age'])
print(corazon2)
print(corazon2.shape)
corazon2.describe()
# In[8]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(corazon2)
corazon2.loc[:,:] = scaled_values
print(corazon2)
datos = corazon2
# In[9]:
ward_res = ward(datos) #Ward
single_res = single(datos) #Salto mínimo
complete_res = complete(datos) #Salto Máximo
average_res = average(datos) #Promedio
# In[10]:
dendrogram(average_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(complete_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(single_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(ward_res,labels= datos.index.tolist())
# Agrega cortes solo en 3 clústeres con agregación de Ward
ax = plt.gca()
limites = ax.get_xbound()
ax.plot(limites, [20.7, 20.7], '--', c='k')
ax.text(limites[1], 20.7, ' tres clústeres', va='center', fontdict={'size': 15})
plt.xlabel("Orden en el eje X")
plt.ylabel("Distancia o Agregación")
# In[11]:
# Graficos de barras con Ward
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (30, 10))
bar_plot(centros, datos.columns)
# In[12]:
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# ### Interpretacion
# In[32]:
# Para este segundo caso se puede ver como el cluster 1 (azul): son los individuos que estan sanos, ya que solo presentan
# un comportamiento tipo A alto muy alto, que los hace mas competitivos, orientados al trabajo, etc., en lo demas
# no presentan ninguna otra caracteristica.
# Cluster 2 (naranja): se caracteriza por tener a los individuos que tienen las edades mas altas, asi como la presion
# cardiaca, adiposidad y obesidad mas altas, asi como el colesterol, mientras que en otros parametros como el comporta-
# miento del tipo A (menos de 40%) y los niveles de alcohol estan bajos, es decir, no son consumidores de alcohol.
# En este cluster se pueden agrupar a todas aquellas personas que ya son avanzadas de edad y que presentan altos
# grados de obesidad y con ello colesterol y una presion cardiaca mas alta, y que ademas tienen una ligera tendencia
# a ser del comportamiento tipo A.
# En el cluster 3 (verde) se puede ver como los individuos de este grupo son los que tienen mas vicios (consumen mayores
# indices de alcohol y fuman mucho) ademas, presentan las edades altas de igual forma y su adiposidad tambien alcanza
# casi el 90%, por otro lado, presentan mas de un 60% de obesidad, y mas de un 40% de colesterol, ademas su presion
# cardiaca tambien es muy alta, pero su comportamiento tipo A es muy bajo, al parecer en este grupo estan las personas
# que son mayores tienen vicios, y ademas cuentan con presiones sanguineas altas.
# ### b) Efectue un Clustering Jerarquico usando las variables numericas y las variables categoricas. Luego de una interpretacion usando 3 clusteres.
# In[13]:
os.chdir("/Users/heinerleivagmail.com")
print(os.getcwd())
datos2 = pd.read_csv('SAheart.csv',delimiter=';',decimal=".")
print(datos.head())
print(datos.shape)
# In[14]:
def recodificar(col, nuevo_codigo):
col_cod = pd.Series(col, copy=True)
for llave, valor in nuevo_codigo.items():
col_cod.replace(llave, valor, inplace=True)
return col_cod
# In[15]:
# Conviertiendo la variables en Dummy
datos_dummies = pd.get_dummies(datos2)
print(datos_dummies.head())
print(datos_dummies.dtypes)
# In[16]:
# Centrando y normalizando los datos convertidos en dummies
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(datos_dummies)
datos_dummies.loc[:,:] = scaled_values
print(datos_dummies)
datos_dummy = datos_dummies
# In[17]:
ward_res = ward(datos_dummy) # Ward
dendrogram(ward_res,labels= datos_dummy.index.tolist())
# Agrega cortes en 3 clústeres con agregación de Ward
ax = plt.gca()
limites = ax.get_xbound()
ax.plot(limites, [28, 28], '--', c='k')
ax.text(limites[1], 28, ' tres clústeres', va='center', fontdict={'size': 15})
plt.xlabel("Orden en el eje X")
plt.ylabel("Distancia o Agregación")
# In[18]:
# Grafico de Barras con Ward
grupos = fcluster(linkage(pdist(datos_dummy), method = 'ward', metric='binary'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos_dummy, grupos),
centroide(1, datos_dummy, grupos),
centroide(2, datos_dummy, grupos)]))
print(centros)
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos_dummy.columns)
# In[26]:
# Graficos Radar Plot con Ward
grupos = fcluster(linkage(pdist(datos_dummy), method = 'ward', metric='binary'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos_dummy, grupos),
centroide(1, datos_dummy, grupos),
centroide(2, datos_dummy, grupos)]))
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos_dummy.columns)
# ### Interpretacion
# In[91]:
# Incluyendo las variables cualitativas convertidas en Dummies y una vez que se normalizaron todas las variables se
# tiene lo siguiente:
# Cluster 1 (azul): se sigue manteniendo la variable de typea A como alta, sin embargo otras como adiposidad, edad,
# colesterol, consumo de tabaco, presion arterial alta y diagnostico de enfermedad del corazon positivo se han ahora
# agregado, generando que este grupo sea de los individuos que tienen mayor edad y que tienen enfermedades adicionales
# y que ademas consumen bastante tabaco y consumen alcohol y es importante mencionar que ya han sido diagnosticados
# con enfermedad cardiaca, ademas que de que tienen historial medico de que se les ha diagnosticado a familiares este
# mismo padecimiento. Este grupo va a estar conformada por personas con diagnostico en pie de enfermedades cardiacas,
# que ademas han tenido familiares con esta misma condicion y que ademas tienen otras enfermedades adicionales y
# presentan algun vicio.
# Cluster 2 (Naranja): para este cluster, los pacientes presentan los mas altos indices de obesidad, tienen adicional
# los mayores indices de herencia familiar en cuanto a enfermedades del corazon, pero no han sido diagnosticados por
# este padecimiento, pero presentan alta adiposidad, otros factores como colesterol se encuentra presenta en algunos
# individuos pero es bajo, y algunos consumen tabaco, ne cuanto a la edad no es tan alta como en el cluster 1, pero ya
# individuos que pueden estar en una edad media.
# Cluster 3 (verde): finalmente se tienen a los individuos del cluster verde, que estos son los que tienen las menores
# edades, pero presentan altos indices de toma de alcohol, cuentan con un ligero comportamiento tipo A, que los vuelve
# individuos mas competitivos, orientados a resultados, etc., ademas no han tenido ni diagnostico de enfermedad cardiacana
# ni en su familia se ha reportado historial medico de esta enfermedad, son por decirlo, los pacientes "sanos"
# ### c) Explique las diferencias de los dos ejercicios anteriores ¿Cual le parece mas interesante? ¿Por que?
# In[93]:
# Las diferencias radican en que sabiendo si la persona ha tenido historial familiar y si se le ha diagnosticado de
# enfermedad coronaria se puede hacer un mejor analisis y mas completo, que solo con las variables numericas, ya que
# ellas por si solas representan ciertos niveles, pero no se pueden sacar conclusiones ni entender excesivamente bien
# como se puede comportar esta enfermedad tomando en cuenta diferentes variables como la edad, los vicios, la obesidad
# presion alta, el typea, etc., con la intromision de las variables categoricas se ve que hay correlacion entre haber
# tenido historial familiar con esta enfermedad y sumado a un estilo de vida no saludable, se puede llegar a padecer
# esta enfermedad con el transcurso de los anos, otra como la typea A, que si una persona tienen este comportamiento
# y ademas, ingiere alcohol, aunque no haya tenido historial familiar ni han sido diagnosticados es mas probable
# que pueda tener en algun momento alguna enfermedad cardiaca o padecerla, por otro lado se ve que las personas que tienen
# los valores mas altos (cluster azul) solo poco menos de un 40% de los casos han sido hereditarias, mientras que todos
# en este grupo ya presentan enfermedad cardiaca y ademas, hay variables que la agraban como la obesidad, tener tipo A,
# la edad, tener colesterol, adiposidad y fumar, se ve como esta combinacion de factores pueden agrabar la salud de las
# personas y que no en todos los casos estas enfermedades son hereditarias, sino que tambien van relacionadas con la
# alimentacion y estilo de vida de las personas.
# Indudablemente me parece mas interesante el segundo analisis, ya que en realidad con este se puede ver,
# que cualquier persona puede tener un infarto al corazon y que no van ligados necesariamente a historial familiar
# (es algo que potencia) y que si las personas no practican habitos saludables de alimentacion, y ademas, tienen vicios
# y son personas que pasan muy estresadas y ansiosas (typea A) pueden tener un infarto o ser diganosticados con esta
# enfermedad. Aqui la leccion es tener habitos alimenticios saludables, no ingerir alcohol ni tabaco en exceso y tener
# una vida mas tranquila.
# # Ejercicio # 3
# ### Dendrogramas construidos a ¨pie¨, en orden respectivo: Salto Mínimo, Salto Máximo y Promedio.
# In[19]:
#Configuraciones para imagen
import pandas as pd
pd.options.display.max_rows = 10
# In[20]:
from IPython.display import Image
Image(filename='/Users/heinerleivagmail.com/Minimo.png')
# In[114]:
from IPython.display import Image
Image(filename='/Users/heinerleivagmail.com/Maximo.png')
# In[115]:
from IPython.display import Image
Image(filename='/Users/heinerleivagmail.com/Promedio.png')
# # Ejercicio #4
# ### a) Programe una clase en Python que tiene un atributo tipo DataFrame, ademas de los metodos usuales que tiene toda clase, tendra un metodo que calcula la matriz de distancias, para esto usara la distancia de Chebychev entre dos vectores que se definio arriba
# In[164]:
class Chebychev:
def __init__(self, data):
self.__data = data
@property
def data(self):
return self.__data
def __chebychev(self, x, y):
return abs(max(x) - max(y))
def __grupos(self):
return fclusterdata(self.data, 3, method = 'ward', metric=self.__chebychev, criterion = 'maxclust') - 1
def __centroide(self, num_cluster, datos, clusters):
ind = clusters == num_cluster
return(pd.DataFrame(datos[ind].mean()).T)
def centros(self):
grupos = self.__grupos()
return np.array(pd.concat([self.__centroide(0, self.data, grupos),
self.__centroide(1, self.data, grupos),
self.__centroide(2, self.data, grupos)]))
def bar_plot(self, cluster = None, var = None):
from math import ceil, floor
from seaborn import color_palette
centros = self.centros()
labels = self.data.columns
colores = color_palette()
minimo = floor(centros.min()) if floor(centros.min()) < 0 else 0
def inside_plot( valores, labels, titulo):
plt.barh(range(len(valores)), valores, 1/1.5, color = colores)
plt.xlim(minimo, ceil(centros.max()))
plt.title(titulo)
if var is not None:
centros = np.array([n[[x in var for x in labels]] for n in centros])
colores = [colores[x % len(colores)] for x, i in enumerate(labels) if i in var]
labels = labels[[x in var for x in labels]]
if cluster is None:
for i in range(centros.shape[0]):
plt.subplot(1, centros.shape[0], i + 1)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if i == 0 else plt.yticks([])
else:
pos = 1
for i in cluster:
plt.subplot(1, len(cluster), pos)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if pos == 1 else plt.yticks([])
pos += 1
def radar_plot(self):
from math import pi
centros = self.centros()
labels = self.data.columns
centros = np.array([((n - min(n)) / (max(n) - min(n)) * 100) if
max(n) != min(n) else (n/n * 50) for n in centros.T])
angulos = [n / float(len(labels)) * 2 * pi for n in range(len(labels))]
angulos += angulos[:1]
ax = plt.subplot(111, polar = True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
plt.xticks(angulos[:-1], labels)
ax.set_rlabel_position(0)
plt.yticks([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
["10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"],
color = "grey", size = 8)
plt.ylim(-10, 100)
for i in range(centros.shape[1]):
valores = centros[:, i].tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth = 1, linestyle = 'solid',
label = 'Cluster ' + str(i))
ax.fill(angulos, valores, alpha = 0.3)
plt.legend(loc='upper right', bbox_to_anchor = (0.1, 0.1))
# ### b) Calcule la matriz de distancias usando la distancia de Chebychev para la tabla de datos EjemploEstudiantes.csv.
# In[182]:
# Cargando dataset
estudiantes = pd.read_csv('EjemploEstudiantes.csv', delimiter=';', decimal=',', header=0, index_col=0)
# Cargando matriz de distancias para cargar en los plots
test = Chebychev(estudiantes)
# ### c) Para la tabla de datos EjemploEstudiantes.csv ejecute un Clustering Jerarquico usando la distancia de Chebychev programada por usted y la agregacion Ward, compare el resultado respecto a usar distancia euclidiana y agregacion de Ward. (Debe investigar como usar una distancia propia en scipy.cluster.hierarchy)
# ### Clustering de Barras usando distancia de Chebyshev
# In[183]:
val = Chebychev(estudiantes)
val.bar_plot()
# ### Radar Plot usando la distancia de Chebyshev
# In[184]:
val = Chebychev(estudiantes)
val.radar_plot()
# ### Comparando resultados usando distancia euclidea y agregacion de Ward
# ### Clustering de Barras usando distancia euclidea y agregacion de Ward
# In[185]:
grupos = fcluster(linkage(pdist(estudiantes), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, estudiantes, grupos),
centroide(1, estudiantes, grupos),
centroide(2, estudiantes, grupos)]))
print(centros)
plt.figure(1, figsize = (12, 8))
bar_plot(centros, estudiantes.columns)
# ### Radar Plot usando distancia Euclidea y agregacion de Ward
# In[186]:
grupos = fcluster(linkage(pdist(estudiantes), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, estudiantes, grupos),
centroide(1, estudiantes, grupos),
centroide(2, estudiantes, grupos)]))
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, estudiantes.columns)
# #### Resultados de la programacion propia con Chevishev y con distancia Euclidea y agregacion de Ward son diferentes.
|
# Generated by Django 2.2.14 on 2020-09-01 10:25
from django.db import migrations
def create_initial_owners(apps, schema_editor):
Owner = apps.get_model("traffic_control", "Owner")
initial_owners = (
("City of Helsinki", "<NAME>"),
("State", "Valtio"),
("Private", "Yksityinen"),
("Unknown", "Ei tiedossa"),
)
for name_en, name_fi in initial_owners:
Owner.objects.get_or_create(name_en=name_en, name_fi=name_fi)
def _set_owner_relations_for_model(Owner, Model):
"""
Naively set owner relation for all instances of given Model
based on the existing owner string value.
"""
helsinki = Owner.objects.get(name_en="City of Helsinki")
state = Owner.objects.get(name_en="State")
private = Owner.objects.get(name_en="Private")
unknown = Owner.objects.get(name_en="Unknown")
post_migrate_owner_str = "__migrated_to_use_owner_relation__"
Model.objects.filter(owner__icontains="helsin").update(
owner_obj=helsinki, owner=post_migrate_owner_str
)
Model.objects.filter(owner__icontains="valtio").update(
owner_obj=state, owner=post_migrate_owner_str
)
Model.objects.filter(owner__icontains="yksityi").update(
owner_obj=private, owner=post_migrate_owner_str
)
for instance in Model.objects.filter(owner_obj=None).exclude(owner=""):
owner, _created = Owner.objects.get_or_create(
name_fi=instance.owner, name_en="translation missing"
)
instance.owner_obj = owner
instance.owner = post_migrate_owner_str
instance.save(update_fields=["owner_obj", "owner"])
Model.objects.filter(owner_obj=None).update(owner_obj=unknown)
def set_owner_relations(apps, schema_editor):
Owner = apps.get_model("traffic_control", "Owner")
models = (
"AdditionalSignPlan",
"AdditionalSignReal",
"BarrierPlan",
"BarrierReal",
"MountPlan",
"MountReal",
"RoadMarkingPlan",
"RoadMarkingReal",
"SignpostPlan",
"SignpostReal",
"TrafficLightPlan",
"TrafficLightReal",
"TrafficSignPlan",
"TrafficSignReal",
)
for model in models:
Model = apps.get_model("traffic_control", model)
_set_owner_relations_for_model(Owner, Model)
def set_owner_strings(apps, schema_editor):
"""
Set owner string value to owner instance finnish name for
all instances of all models that have a relation to the
Owner model.
"""
Owner = apps.get_model("traffic_control", "Owner")
models = (
"AdditionalSignPlan",
"AdditionalSignReal",
"BarrierPlan",
"BarrierReal",
"MountPlan",
"MountReal",
"RoadMarkingPlan",
"RoadMarkingReal",
"SignpostPlan",
"SignpostReal",
"TrafficLightPlan",
"TrafficLightReal",
"TrafficSignPlan",
"TrafficSignReal",
)
for model in models:
Model = apps.get_model("traffic_control", model)
for owner in Owner.objects.all():
Model.objects.filter(owner_obj=owner).update(owner=owner.name_fi)
class Migration(migrations.Migration):
dependencies = [
("traffic_control", "0021_owner_model"),
]
operations = [
migrations.RunPython(create_initial_owners, migrations.RunPython.noop),
migrations.RunPython(set_owner_relations, set_owner_strings),
]
|
# Code works stable with environment.yml
from pylab import *
import numpy as np
import copy
z,x,y = genfromtxt('U.dat').T
N = int(sqrt(x.shape[0]))
x = x.reshape(N, N)
y = y.reshape(N, N)
z = z.reshape(N, N)
mark_11 = copy.deepcopy(z)
mark_12 = copy.deepcopy(z)
for i in range (0, N-1):
z_point = z[i] * z[i+1]
for j in range (0, N):
if (z_point[j] < 0) :
mark_11[i][j] = 1.0
else:
mark_11[i][j] = 0.0
mark_11[253] = mark_11[253] * 0.0
z = z.T
x = x.T
y = y.T
for i in range (0, N-1):
z_point = z[i] * z[i+1]
for j in range (0, N):
if (z_point[j] < 0) :
mark_12[i][j] = 1.0
else:
mark_12[i][j] = 0.0
mark_12[253] = mark_12[253] * 0.0
mark_12 = mark_12.T
#print mark1[0][226] + mark1[0][227] + mark2[0][226] + mark2[1][226]
cell1 = copy.deepcopy(mark_11)
for i in range (0, N-1):
for j in range (0, N-1):
cell1[i][j] = mark_11[i][j] + mark_11[i][j+1] + mark_12[i][j] + mark_12[i+1][j]
z,x,y = genfromtxt('Q.dat').T
N = int(sqrt(x.shape[0]))
x = x.reshape(N, N)
y = y.reshape(N, N)
z = z.reshape(N, N)
mark_21 = copy.deepcopy(z)
mark_22 = copy.deepcopy(z)
cell2 = copy.deepcopy(z)
for i in range (0, N-1):
z_point = z[i] * z[i+1]
for j in range (0, N):
if (z_point[j] < 0) :
mark_21[i][j] = 1.0
else:
mark_21[i][j] = 0.0
mark_21[253] = mark_21[253] * 0.0
z = z.T
x = x.T
y = y.T
for i in range (0, N-1):
z_point = z[i] * z[i+1]
for j in range (0, N):
if (z_point[j] < 0) :
mark_22[i][j] = 1.0
else:
mark_22[i][j] = 0.0
mark_22[253] = mark_22[253] * 0.0
mark_22 = mark_22.T
cell2 = copy.deepcopy(mark_21)
for i in range (0, N-1):
for j in range (0, N-1):
cell2[i][j] = mark_21[i][j] + mark_21[i][j+1] + mark_22[i][j] + mark_22[i+1][j]
cell = cell1*cell2
answer_x = []
answer_y = []
answer_Ux = []
answer_Uy = []
answer_Qx = []
answer_Qy = []
z1,x1,y1 = genfromtxt('U.dat').T
x1 = x1.reshape(N,N)
y1 = y1.reshape(N,N)
z1 = z1.reshape(N,N)
z2,x2,y2 = genfromtxt('Q.dat').T
x2 = x2.reshape(N,N)
y2 = y2.reshape(N,N)
z2 = z2.reshape(N,N)
z1 = z1.T
z2 = z2.T
Ux, Uy, Uxx, Uyy, Uxy = genfromtxt('derivativesU.dat').T
Qx, Qy, Qxx, Qyy, Qxy = genfromtxt('derivativesQ.dat').T
M = int(sqrt(Ux.shape[0]))
Ux = Ux.reshape(M, M)
Uy = Uy.reshape(M, M)
Qx = Qx.reshape(M, M)
Qy = Qy.reshape(M, M)
for i in range (0, N-1):
for j in range (0, N-1):
if (cell[i][j] != 0): #& (cell[i][j] != 16):
Ux1 = 0.0
Uy1 = 0.0
Ux2 = 0.0
Uy2 = 0.0
Qx1 = 0.0
Qy1 = 0.0
Qx2 = 0.0
Qy2 = 0.0
Uxx1 = 0.0
Uyy1 = 0.0
Uxx2 = 0.0
Uyy2 = 0.0
Qxx1 = 0.0
Qyy1 = 0.0
Qxx2 = 0.0
Qyx2 = 0.0
if (mark_11[i][j] != 0.0):
if (mark_11[i][j+1] != 0.0):
Ux1 = 0.0
Uy1 = fabs(float(z1[i][j]))/(fabs(float(z1[i][j]))+fabs(float(z1[i+1][j])))
Uxx1 = (Ux[i][j] - Ux[i+1][j])*Uy1 + Ux[i+1][j]
Uyy1 = (Uy[i][j] - Uy[i+1][j])*Uy1 + Uy[i+1][j]
Ux2 = 1.0
Uy2 = fabs(float(z1[i][j+1]))/(fabs(float(z1[i][j+1]))+fabs(float(z1[i+1][j+1])))
Uxx2 = (Ux[i][j+1] - Ux[i+1][j+1])*Uy2 + Ux[i+1][j+1]
Uyy2 = (Uy[i][j+1] - Uy[i+1][j+1])*Uy2 + Uy[i+1][j+1]
if (mark_12[i][j] != 0.0):
Ux1 = 0.0
Uy1 = fabs(float(z1[i][j]))/(fabs(float(z1[i][j]))+fabs(float(z1[i+1][j])))
Uxx1 = (Ux[i][j] - Ux[i+1][j])*Uy1 + Ux[i+1][j]
Uyy1 = (Uy[i][j] - Uy[i+1][j])*Uy1 + Uy[i+1][j]
Uy2 = 0.0
Ux2 = fabs(float(z1[i][j]))/(fabs(float(z1[i][j+1]))+fabs(float(z1[i][j])))
Uxx2 = (Ux[i][j+1] - Ux[i][j])*Ux2 + Ux[i][j]
Uyy2 = (Uy[i][j+1] - Uy[i][j])*Ux2 + Uy[i][j]
if (mark_12[i+1][j] != 0.0):
Ux1 = 0.0
Uy1 = fabs(float(z1[i][j]))/(fabs(float(z1[i][j]))+fabs(float(z1[i+1][j])))
Uxx1 = (Ux[i][j] - Ux[i+1][j])*Uy1 + Ux[i+1][j]
Uyy1 = (Uy[i][j] - Uy[i+1][j])*Uy1 + Uy[i+1][j]
Uy2 = 1.0
Ux2 = fabs(float(z1[i+1][j]))/(fabs(float(z1[i+1][j+1]))+fabs(float(z1[i+1][j])))
Uxx2 = (Ux[i+1][j+1] - Ux[i+1][j])*Ux2 + Ux[i+1][j]
Uyy2 = (Uy[i+1][j+1] - Uy[i+1][j])*Ux2 + Uy[i+1][j]
if (mark_11[i][j+1] != 0.0):
if (mark_12[i][j] != 0.0):
Ux1 = 1.0
Uy1 = fabs(float(z1[i][j+1]))/(fabs(float(z1[i][j+1]))+fabs(float(z1[i+1][j+1])))
Uxx1 = (Ux[i][j+1] - Ux[i+1][j+1])*Uy2 + Ux[i+1][j+1]
Uyy1 = (Uy[i][j+1] - Uy[i+1][j+1])*Uy2 + Uy[i+1][j+1]
Uy2 = 0.0
Ux2 = fabs(float(z1[i][j]))/(fabs(float(z1[i][j+1]))+fabs(float(z1[i][j])))
Uxx2 = (Ux[i][j+1] - Ux[i][j])*Ux2 + Ux[i][j]
Uyy2 = (Uy[i][j+1] - Uy[i][j])*Ux2 + Uy[i][j]
if (mark_12[i+1][j] != 0.0):
Ux1 = 1.0
Uy1 = fabs(float(z1[i][j+1]))/(fabs(float(z1[i][j+1]))+fabs(float(z1[i+1][j+1])))
Uxx1 = (Ux[i][j+1] - Ux[i+1][j+1])*Uy2 + Ux[i+1][j+1]
Uyy1 = (Uy[i][j+1] - Uy[i+1][j+1])*Uy2 + Uy[i+1][j+1]
Uy2 = 1.0
Ux2 = fabs(float(z1[i+1][j]))/(fabs(float(z1[i+1][j+1]))+fabs(float(z1[i+1][j])))
Uxx2 = (Ux[i+1][j+1] - Ux[i+1][j])*Ux2 + Ux[i+1][j]
Uyy2 = (Uy[i+1][j+1] - Uy[i+1][j])*Ux2 + Uy[i+1][j]
if (mark_12[i][j] != 0.0):
if (mark_12[i+1][j] != 0.0):
Uy1 = 0.0
Ux1 = fabs(float(z1[i][j]))/(fabs(float(z1[i][j+1]))+fabs(float(z1[i][j])))
Uxx1 = (Ux[i][j+1] - Ux[i][j])*Ux2 + Ux[i][j]
Uyy1 = (Uy[i][j+1] - Uy[i][j])*Ux2 + Uy[i][j]
Uy2 = 1.0
Ux2 = fabs(float(z1[i+1][j]))/(fabs(float(z1[i+1][j+1]))+fabs(float(z1[i+1][j])))
Uxx2 = (Ux[i+1][j+1] - Ux[i+1][j])*Ux2 + Ux[i+1][j]
Uyy2 = (Uy[i+1][j+1] - Uy[i+1][j])*Ux2 + Uy[i+1][j]
#------------------------------------------------------------------------
if (mark_21[i][j] != 0.0):
if (mark_21[i][j+1] != 0.0):
Qx1 = 0.0
Qy1 = fabs(float(z2[i][j]))/(fabs(float(z2[i][j]))+fabs(float(z2[i+1][j])))
Qxx1 = (Qx[i][j] - Qx[i+1][j])*Qy1 + Qx[i+1][j]
Qyy1 = (Qy[i][j] - Qy[i+1][j])*Qy1 + Qy[i+1][j]
Qx2 = 1.0
Qy2 = fabs(float(z2[i][j+1]))/(fabs(float(z2[i][j+1]))+fabs(float(z2[i+1][j+1])))
Qxx2 = (Qx[i][j+1] - Qx[i+1][j+1])*Qy2 + Qx[i+1][j+1]
Qyy2 = (Qy[i][j+1] - Qy[i+1][j+1])*Qy2 + Qy[i+1][j+1]
if (mark_22[i][j] != 0.0):
Qx1 = 0.0
Qy1 = fabs(float(z2[i][j]))/(fabs(float(z2[i][j]))+fabs(float(z2[i+1][j])))
Qxx1 = (Qx[i][j] - Qx[i+1][j])*Qy1 + Qx[i+1][j]
Qyy1 = (Qy[i][j] - Qy[i+1][j])*Qy1 + Qy[i+1][j]
Qy2 = 0.0
Qx2 = fabs(float(z2[i][j]))/(fabs(float(z2[i][j+1]))+fabs(float(z2[i][j])))
Qxx2 = (Qx[i][j+1] - Qx[i][j])*Ux2 + Qx[i][j]
Qyy2 = (Qy[i][j+1] - Qy[i][j])*Ux2 + Qy[i][j]
if (mark_22[i+1][j] != 0.0):
Qx1 = 0.0
Qy1 = fabs(float(z2[i][j]))/(fabs(float(z2[i][j]))+fabs(float(z2[i+1][j])))
Qxx1 = (Qx[i][j] - Qx[i+1][j])*Qy1 + Qx[i+1][j]
Qyy1 = (Qy[i][j] - Qy[i+1][j])*Qy1 + Qy[i+1][j]
Qy2 = 1.0
Qx2 = fabs(float(z2[i+1][j]))/(fabs(float(z2[i+1][j+1]))+fabs(float(z2[i+1][j])))
Qxx2 = (Qx[i+1][j+1] - Qx[i+1][j])*Qx2 + Qx[i+1][j]
Qyy2 = (Qy[i+1][j+1] - Qy[i+1][j])*Qx2 + Qy[i+1][j]
if (mark_21[i][j+1] != 0.0):
if (mark_22[i][j] != 0.0):
Qx1 = 1.0
Qy1 = fabs(float(z2[i][j+1]))/(fabs(float(z2[i][j+1]))+fabs(float(z2[i+1][j+1])))
Qxx1 = (Qx[i][j+1] - Qx[i+1][j+1])*Qy2 + Qx[i+1][j+1]
Qyy1 = (Qy[i][j+1] - Qy[i+1][j+1])*Qy2 + Qy[i+1][j+1]
Qy2 = 0.0
Qx2 = fabs(float(z2[i][j]))/(fabs(float(z2[i][j+1]))+fabs(float(z2[i][j])))
Qxx2 = (Qx[i][j+1] - Qx[i][j])*Ux2 + Qx[i][j]
Qyy2 = (Qy[i][j+1] - Qy[i][j])*Ux2 + Qy[i][j]
if (mark_22[i+1][j] != 0.0):
Qx1 = 1.0
Qy1 = fabs(float(z2[i][j+1]))/(fabs(float(z2[i][j+1]))+fabs(float(z2[i+1][j+1])))
Qxx1 = (Qx[i][j+1] - Qx[i+1][j+1])*Qy2 + Qx[i+1][j+1]
Qyy1 = (Qy[i][j+1] - Qy[i+1][j+1])*Qy2 + Qy[i+1][j+1]
Qy2 = 1.0
Qx2 = fabs(float(z2[i+1][j]))/(fabs(float(z2[i+1][j+1]))+fabs(float(z2[i+1][j])))
Qxx2 = (Qx[i+1][j+1] - Qx[i+1][j])*Qx2 + Qx[i+1][j]
Qyy2 = (Qy[i+1][j+1] - Qy[i+1][j])*Qx2 + Qy[i+1][j]
if (mark_22[i][j] != 0.0):
if (mark_22[i+1][j] != 0.0):
Qy1 = 0.0
Qx1 = fabs(float(z2[i][j]))/(fabs(float(z2[i][j+1]))+fabs(float(z2[i][j])))
Qxx1 = (Qx[i][j+1] - Qx[i][j])*Ux2 + Qx[i][j]
Qyy1 = (Qy[i][j+1] - Qy[i][j])*Ux2 + Qy[i][j]
Qy2 = 1.0
Qx2 = fabs(float(z2[i+1][j]))/(fabs(float(z2[i+1][j+1]))+fabs(float(z2[i+1][j])))
Qxx2 = (Qx[i+1][j+1] - Qx[i+1][j])*Qx2 + Qx[i+1][j]
Qyy2 = (Qy[i+1][j+1] - Qy[i+1][j])*Qx2 + Qy[i+1][j]
k1 = (Uy2 - Uy1)/(Ux2 - Ux1)
k2 = (Qy2 - Qy1)/(Qx2 - Qx1)
x_solve = (k1*(Ux1) - k2*(Qx1) + Qy1 - Uy1)/(k1 - k2)
y_solve = k1*(x_solve - Ux1) + Uy1
Uxx = (Uxx2 - Uxx1)/(Ux2 - Ux1)*x_solve + Uxx1
Uyy = (Uyy2 - Uyy1)/(Uy2 - Uy1)*y_solve + Uyy1
Qxx = (Qxx2 - Qxx1)/(Qx2 - Qx1)*x_solve + Qxx1
Qyy = (Qyy2 - Qyy1)/(Qy2 - Qy1)*y_solve + Qyy1
if (x_solve > -0.0) & (x_solve < 1.0):
if (y_solve > -0.0) & (y_solve < 1.0):
answer_x.append(float(i)/N)
answer_y.append(float(j)/N)
answer_Ux.append(Uxx)
answer_Uy.append(Uyy)
answer_Qx.append(Qxx)
answer_Qy.append(Qyy)
#pcolor(x,y,sqrt(z1*z1 + z2*z2))
#plot(answer_x, answer_y, 'kx', ms=20)
#a1, b1 = genfromtxt('../data_mnk_f/lev1.dat').T
#plot(a1, b1, 'ko', ms=1)
#a2, b2 = genfromtxt('../data_mnk_f/lev2.dat').T
#plot(a2, b2, 'ko', ms=1)
#show()
file = open("pointsP.dat", 'w')
for k in range(0, size(answer_x)-1):
file.write('{} {}\n'.format(int(N*answer_x[k]), int(N*answer_y[k])))
file.close()
#-----------------------------------------
from numpy import roots, random
from math import atan, fabs
def cubic (Qx, Qy, Ux, Uy):
a = Uy
b = (Ux + 2*Qy)
c = (2*Qx - Uy)
d = -Ux
det = -4*b*b*b*d + b*b*c*c -4*a*c*c*c + 18*a*b*c*d - 27*a*a*d*d
if (det < 0):
return 'c'
if (det > 0):
a = roots([a, b, c, d])
a = a.real
a = [atan(a[0]), atan(a[1]), atan(a[2])]
U = [Ux*cos(a[0]) + Uy*sin(a[0]), Ux*cos(a[1]) + Uy*sin(a[1]), Ux*cos(a[2]) + Uy*sin(a[2])]
rightU = [2*sin(a[0])*cos(a[0]), 2*sin(a[1])*cos(a[1]), 2*sin(a[2])*cos(a[2])]
for i in range(0, 3):
if (U[i] * rightU[i] < 0):
a[i] = a[i] + pi
a = sorted(a)
a = [a[0] - a[0], a[1] - a[0], a[2] - a[0]]
#print a
if (a[2] > pi):
return 'a'
else:
return 'b'
#-----------------------------------------
type = []
number = [0, 0, 0]
for k in range(0, size(answer_x)-1):
type.append(cubic(answer_Qx[k], answer_Qy[k], answer_Ux[k], answer_Uy[k]))
if (cubic(answer_Qx[k], answer_Qy[k], answer_Ux[k], answer_Uy[k]) == 'a'):
number[0] = number[0] + 1
if (cubic(answer_Qx[k], answer_Qy[k], answer_Ux[k], answer_Uy[k]) == 'b'):
number[1] = number[1] + 1
if (cubic(answer_Qx[k], answer_Qy[k], answer_Ux[k], answer_Uy[k]) == 'c'):
number[2] = number[2] + 1
#print np.array(number) /float(number[0] + number[1] + number[2])
file = open("type_A.dat", 'a')
file.write('{}\n'.format(number[0]/float(number[0] + number[1] + number[2])))
file = open("type_B.dat", 'a')
file.write('{}\n'.format(number[1]/float(number[0] + number[1] + number[2])))
file = open("type_C.dat", 'a')
file.write('{}\n'.format(number[2]/float(number[0] + number[1] + number[2])))
file = open("points_typesP.dat", 'w')
for k in range(0, size(answer_x)-1):
file.write('{} {} {}\n'.format(int(N*answer_x[k]), int(N*answer_y[k]), type[k]))
#------------------------------------------
|
import base64
import datetime
import io
import json
import zipfile
from functools import wraps
import flask
import urllib.parse
import config
from api import make_eps_api_metadata_request
from app import app, fernet
from auth import exchange_code_for_token, get_access_token, login, set_access_token_cookies, get_authorize_url
from client import render_rivets_client, render_react_client
from cookies import (
set_previous_prescription_id_cookie,
set_current_prescription_id_cookie,
set_next_prescription_id_cookie,
reset_previous_prescription_id_cookie,
reset_next_prescription_id_cookie,
get_auth_method_from_cookie,
set_auth_method_cookie,
set_session_cookie
)
from helpers import (
pr_redirect_required,
pr_redirect_enabled,
get_pr_branch_url,
parse_oauth_state,
get_pr_number,
create_oauth_state
)
import hapi_passthrough
def exclude_from_auth(*args, **kw):
def wrapper(endpoint_method):
endpoint_method._exclude_from_auth = False
@wraps(endpoint_method)
def wrapped(*endpoint_args, **endpoint_kw):
return endpoint_method(*endpoint_args, **endpoint_kw)
return wrapped
return wrapper
@app.before_request
def auth_check():
if config.STATIC_URL in flask.request.path:
return
flask.g.skip_auth = False
if flask.request.endpoint in app.view_functions:
view_func = app.view_functions[flask.request.endpoint]
flask.g.skip_auth = hasattr(view_func, "_exclude_from_auth")
if not flask.g.skip_auth:
access_token_encrypted = flask.request.cookies.get("Access-Token")
if access_token_encrypted is not None:
try:
access_token = fernet.decrypt(access_token_encrypted.encode("utf-8")).decode("utf-8")
except:
return login()
else:
return login()
@app.route("/_healthcheck", methods=["GET"])
@exclude_from_auth()
def get_healthcheck():
return hapi_passthrough.get_healthcheck()
@app.route("/_status", methods=["GET"])
@exclude_from_auth()
def get_status():
return hapi_passthrough.get_status()
@app.route("/change-auth", methods=["GET"])
@exclude_from_auth()
def get_change_auth():
return render_rivets_client("login")
@app.route("/change-auth", methods=["POST"])
@exclude_from_auth()
def post_change_auth():
login_request = flask.request.json
auth_method = login_request["authMethod"]
if config.ENVIRONMENT.endswith("-sandbox"):
authorize_url = "/callback"
else:
state = create_oauth_state(get_pr_number(config.BASE_PATH), "home")
authorize_url = get_authorize_url(state, auth_method)
response = app.make_response({"redirectUri": f'{authorize_url}'})
set_auth_method_cookie(response, auth_method)
return response
@app.route("/", methods=["GET"])
def get_home():
return render_rivets_client("home")
@app.route("/search", methods=["GET"])
def get_search():
return render_react_client("search")
@app.route("/prescribe/load", methods=["GET"])
def get_load():
return render_rivets_client("load")
@app.route("/download", methods=['GET'])
def download():
zFile = io.BytesIO()
access_token = get_access_token()
hapi_session = hapi_passthrough.get_hapi_session()
short_prescription_ids = hapi_session["prescriptionIds"]
with zipfile.ZipFile(zFile, 'w') as zip_file:
for index, short_prescription_id in enumerate(short_prescription_ids):
bundle = hapi_passthrough.get_prescription(short_prescription_id)
zip_file.writestr(f"prepare_request_{index + 1}.json", json.dumps(bundle, indent=2))
if access_token:
xml, _status_code = make_eps_api_convert_message_request(access_token, bundle)
zip_file.writestr(f"prepare_request_{index + 1}.xml", xml)
zFile.seek(0)
return flask.send_file(
zFile,
mimetype='application/zip',
as_attachment=True,
attachment_filename='messages.zip')
def update_pagination(response, short_prescription_ids, current_short_prescription_id):
previous_short_prescription_id_index = short_prescription_ids.index(current_short_prescription_id) - 1
next_short_prescription_id_index = previous_short_prescription_id_index + 2
if previous_short_prescription_id_index >= 0:
set_previous_prescription_id_cookie(response, short_prescription_ids[previous_short_prescription_id_index])
else:
reset_previous_prescription_id_cookie(response)
if next_short_prescription_id_index < len(short_prescription_ids):
set_next_prescription_id_cookie(response, short_prescription_ids[next_short_prescription_id_index])
else:
reset_next_prescription_id_cookie(response)
set_current_prescription_id_cookie(response, current_short_prescription_id)
@app.route("/metadata", methods=["GET"])
@exclude_from_auth()
def get_metadata():
return make_eps_api_metadata_request()
@app.route("/prescription/<short_prescription_id>", methods=["GET"])
def get_prescription(short_prescription_id):
response = hapi_passthrough.get_prescription(str(short_prescription_id))
return app.make_response(response)
@app.route("/tracker", methods=["GET"])
def get_tracker_prescription():
hapi_response = hapi_passthrough.get_tracker_prescription(flask.request.query_string.decode("utf-8"))
return app.make_response(hapi_response)
@app.route("/prescribe/edit", methods=["GET"])
def get_edit():
# handles '+' in query_string where flask.request.args.get does not
short_prescription_id = flask.request.query_string.decode("utf-8")[len("prescription_id="):]
if short_prescription_id is None:
return flask.redirect(f"{config.PUBLIC_APIGEE_URL}{config.BASE_URL}change-auth")
hapi_passthrough.get_prescription(short_prescription_id)
response = app.make_response(render_react_client("edit"))
hapi_session = hapi_passthrough.get_hapi_session()
short_prescription_ids = hapi_session["prescriptionIds"]
short_prescription_id = hapi_session["prescriptionId"]
update_pagination(response, short_prescription_ids, short_prescription_id)
return response
@app.route("/prescribe/edit", methods=["POST"])
def post_edit():
request_bundles = flask.request.json
hapi_passthrough.post_edit(request_bundles)
hapi_session = hapi_passthrough.get_hapi_session()
short_prescription_ids = hapi_session["prescriptionIds"]
short_prescription_id = hapi_session["prescriptionId"]
redirect_url = f'{config.PUBLIC_APIGEE_URL}{config.BASE_URL}prescribe/edit?prescription_id={urllib.parse.quote_plus(short_prescription_id)}'
response = app.make_response({"redirectUri": redirect_url})
update_pagination(response, short_prescription_ids, short_prescription_id)
return response
@app.route("/prescribe/sign", methods=["POST"])
def post_sign():
hapi_response = hapi_passthrough.post_sign()
return app.make_response(hapi_response)
@app.route("/prescribe/send", methods=["GET"])
def get_send():
return render_react_client("send")
@app.route("/prescribe/send", methods=["POST"])
def post_send():
return hapi_passthrough.post_send(flask.request.json)
@app.route("/prescribe/cancel", methods=["GET"])
def get_cancel():
return render_react_client("cancel")
@app.route("/prescribe/cancel", methods=["POST"])
def post_cancel():
if (config.ENVIRONMENT == "prod"):
return app.make_response("Bad Request", 400)
response = hapi_passthrough.post_cancel(flask.request.json)
return app.make_response(response)
@app.route("/dispense/release", methods=["GET"])
def get_release():
if (config.ENVIRONMENT == "prod"):
return app.make_response("Bad Request", 400)
return render_react_client("release")
@app.route("/dispense/release", methods=["POST"])
def post_release():
if (config.ENVIRONMENT == "prod"):
return app.make_response("Bad Request", 400)
response = hapi_passthrough.post_release(flask.request.json)
return app.make_response(response)
@app.route("/dispense/release/<short_prescription_id>", methods=["GET"])
def get_released_prescriptions(short_prescription_id):
response = hapi_passthrough.get_released_prescriptions(str(short_prescription_id))
return app.make_response(json.dumps(response))
@app.route("/dispense/dispense", methods=["GET"])
def get_dispense():
if (config.ENVIRONMENT == "prod"):
return app.make_response("Bad Request", 400)
return render_react_client("dispense")
@app.route("/dispense/dispense", methods=["POST"])
def post_dispense():
if (config.ENVIRONMENT == "prod"):
return app.make_response("Bad Request", 400)
response = hapi_passthrough.post_dispense(flask.request.json)
return app.make_response(response)
@app.route("/dispenseNotifications/<short_prescription_id>", methods=["GET"])
def get_dispense_notifications(short_prescription_id):
response = hapi_passthrough.get_dispense_notifications(str(short_prescription_id))
return app.make_response(json.dumps(response))
@app.route("/dispense/claim", methods=["GET"])
def get_claim():
if config.ENVIRONMENT == "prod":
return app.make_response("Bad Request", 400)
return render_react_client("claim")
@app.route("/dispense/claim", methods=["POST"])
def post_claim():
if (config.ENVIRONMENT == "prod"):
return app.make_response("Bad Request", 400)
response = hapi_passthrough.post_claim(flask.request.json)
return app.make_response(response)
@app.route("/logout", methods=["GET"])
def get_logout():
redirect_url = f'{config.PUBLIC_APIGEE_URL}{config.BASE_URL}'
response = flask.redirect(redirect_url)
set_access_token_cookies(response, "", 0)
set_session_cookie(response, "", 0)
return response
@app.route("/callback", methods=["GET"])
@exclude_from_auth()
def get_callback():
# local development
if config.ENVIRONMENT.endswith("-sandbox"):
hapi_session_cookie, _ = hapi_passthrough.post_login("", "")
session_expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=float(600))
response = flask.redirect(config.BASE_URL)
set_session_cookie(response, hapi_session_cookie, session_expiry)
mock_access_token_encrypted = fernet.encrypt("mock_access_token".encode("utf-8")).decode("utf-8")
set_access_token_cookies(response, mock_access_token_encrypted, session_expiry)
return response
# deployed environments
state = parse_oauth_state(flask.request.args.get("state"))
if pr_redirect_required(config.BASE_PATH, state):
if pr_redirect_enabled(config.ENVIRONMENT):
return flask.redirect(
get_pr_branch_url(state["prNumber"], "callback", flask.request.query_string.decode("utf-8")))
else:
return app.make_response("Bad Request", 400)
code = flask.request.args.get("code")
auth_method = get_auth_method_from_cookie()
token_response_json = exchange_code_for_token(code, auth_method)
access_token = token_response_json["access_token"]
refresh_token = token_response_json["refresh_token"]
access_token_expires_in = token_response_json["expires_in"]
refresh_token_expires_in = token_response_json["refresh_token_expires_in"]
access_token_encrypted = fernet.encrypt(access_token.encode("utf-8")).decode("utf-8")
refresh_token_encrypted = fernet.encrypt(refresh_token.encode("utf-8")).decode("utf-8")
access_token_expires = datetime.datetime.utcnow() + datetime.timedelta(seconds=float(access_token_expires_in))
refresh_token_expires = datetime.datetime.utcnow() + datetime.timedelta(seconds=float(refresh_token_expires_in))
hapi_session_cookie, _ = hapi_passthrough.post_login(auth_method, access_token)
redirect_url = f'{config.PUBLIC_APIGEE_URL}{config.BASE_URL}'
response = flask.redirect(redirect_url)
set_session_cookie(response, hapi_session_cookie, access_token_expires)
set_access_token_cookies(response, access_token_encrypted, access_token_expires)
return response
|
#!/usr/bin/env python
import os, sys
if sys.hexversion < 0x2040400:
sys.stderr.write("pysync.py needs python version at least 2.4.4.\n")
sys.stderr.write("You are using %s\n" % sys.version)
sys.stderr.write("Here is a guess at where the python executable is--\n")
os.system("/bin/sh -c 'type python>&2'");
sys.exit(1)
import cPickle
import inspect
import hashlib
import signal
import socket
import subprocess
import threading
import zlib
import pysync_remote
from pysync_remote import Options
from pysync_remote import ProgressUpdate, ProgressCounters
from pysync_remote import statToTuple
from gppylib.commands.gp import PySync
# MPP-13617
import re
RE1 = re.compile('\\[([^]]+)\\]:(.+)')
bootstrapSource = """
import os,sys
exec(sys.stdin.read(int(sys.stdin.readline())))
"""
class PysyncProxy:
'''
The PysyncProxy class is used to initiate a third-party synchronization operation.
An instance of PysyncProxy is used to start a LocalPysync instance on a remote host
to be used as the source of the synchronization operation. The "remote" LocalPysync
instance then runs RemotePysync on the destination as usual. Progress information
is fed from the destination host, through the remote LocalPysync instance an to this
instance for reporting.
Lines written by LocalPysync to stdout are recorded in the list self.stdout; lines
written by LocalPysync to stderr are recorded in self.stderr. Progress information
is handled only by the functions set for the recordProgressCallback and
recordRawProgressCallback properties.
'''
class _Quit(SystemExit):
def __init__(self, *info):
SystemExit.__init__(self, *info)
def __init__(self, sourceHost, sourceDir, destHost, destDir, syncOptions, verbose=False,
progressBytes=None, progressTime=None,
recordProgressCallback=None, recordRawProgressCallback=None, progressTimestamp=False):
'''
Initialize a new PysyncProxy instance.
sourceHost - the host from which data is to be copied.
sourceDir - the directory on sourceHost from which data is to be copied.
destHost - the host to which data is to be copied.
destDir - the directory on sourceHost to which data is to be copied.
syncOptions - a list of command-line options as described by LocalPysync.usage();
other options may be added based on the following arguments.
verbose - indicates whether or not debugging output is generated.
progressBytes - the number of bytes moved for a volume-based progress message;
maps to the LocalPysync --progress-bytes option.
progressTime - the amount of time for a time-based progress message; maps to
the LocalPysync --progress-time option.
recordProgressCallback - function to call to present a printable progress
message generated by RemotePysync; the function must accept a single
argument of type str. If not set, progress messages are ignored.
recordRawProgressCallback - function to call to handle raw progress information
generated by RemotePysync; the function must accept a single argument
of type pysync_remote.ProgressUpdate. If not set, raw progress
information is ignored.
progressTimestamp - indicates whether or not RemotePysync should include the
observation timestamp on messages it creates.
'''
self.ppid = 0
self.sourceHost = sourceHost
self.sourceDir = sourceDir
self.destHost = destHost
self.destDir = destDir
self.recordProgressCallback = recordProgressCallback
self.recordRawProgressCallback = recordRawProgressCallback
self.syncOptions = syncOptions
if verbose:
self.syncOptions += ["-v"]
if progressBytes:
self.syncOptions += ["--progress-bytes", progressBytes]
if progressTime:
self.syncOptions += ["--progress-time", progressTime]
self.syncOptions += ["--proxy"]
if not progressTimestamp:
self.syncOptions += ["--omit-progress-timestamp"]
self.stderr = []
self.stdout = []
self.cmd = None
self.returncode = None
def run(self):
'''
Initiate and wait for completion of a directory synchronization operation.
Stderr output is appended to the self.stderr list. Stdout output is appended
to the self.stdout list. Progress messages are written to stdout unless a
callback is set.
'''
pysyncCmd = PySync('pysync', self.sourceDir, self.destHost, self.destDir,
options=' '.join(self.syncOptions))
self.cmd = '. %s/greenplum_path.sh && %s' % (os.environ.get('GPHOME'), pysyncCmd.cmdStr)
# save of ppid to allow the process to be stopped.
self.ppid = os.getppid()
pidFilename = '/tmp/pysync.py.%s.%s.ppid' % (self.destHost, self.destDir.replace('/', '_'))
pidFile = open(pidFilename, 'w')
pidFile.write('%d' % (self.ppid))
pidFile.close()
code = 0
self.p = None
stderrThread = None
try:
try:
args = []
args.append("ssh")
args.extend(["-o", "BatchMode=yes"])
args.extend(["-o", "StrictHostKeyChecking=no"])
args.append(self.sourceHost)
args.append(self.cmd)
self.p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderrThread = ReaderThread("pysync_stderr", self.p.stderr, self.stderr)
stderrThread.start()
code = self._work()
except OSError, e:
self.stderr.append(str(e))
raise
finally:
os.remove(pidFilename)
if self.p:
timer = threading.Timer(2.0, (lambda: os.kill(self.p.pid, signal.SIGHUP)))
timer.start()
self.returncode = self.p.wait()
timer.cancel()
if stderrThread:
stderrThread.join(2.0)
return code
def _work(self):
'''
Wait for and process commands from the LocalPysync instance connected
to the Popened SSH process.
Command processing continues until EOF is reached on Popen.stdout (the
command input stream from LocalPysync) or a "quit" command is proocessed.
Because standard command output may be interleaved with serialized command
objects, command objects are prefixed with "pKl:<length>\n". Non-command
object lines are appended to the self.stdout buffer.
'''
while True:
try:
# check if parent still alive
os.kill(self.ppid, 0)
except:
# parent gone, exit
return 2
# Get the length of the next serialized command
a = self.p.stdout.readline()
if len(a) == 0:
# End the command loop if EOF
self.stderr.append("[FATAL]:-Unexpected EOF on LocalPysync output stream")
return 3
# If not a pickled command object, just record it
if not a.startswith("pKl:"):
self.stdout.append(a.rstrip())
continue
size = int(a[4:])
# Read the serialized command and process it.
data = self.p.stdout.read(size)
assert len(data) == size
try:
self._doCommand(cPickle.loads(data))
except PysyncProxy._Quit, e:
return e.code
def _doCommand(self, what):
'''
Perform the command requested by the remote side and prepare any
result.
'''
if what[0] == 'recordProgress':
if self.recordProgressCallback:
self.recordProgressCallback(what[1].rstrip())
return None
elif what[0] == 'recordRawProgress':
if self.recordRawProgressCallback:
self.recordRawProgressCallback(what[1])
return None
elif what[0] == 'quit':
raise PysyncProxy._Quit(what[1])
else:
assert 0
class ReaderThread(threading.Thread):
'''
Appends all output read from a file handle to the lines buffer.
'''
def __init__(self, name, file, lines):
self.file = file
self.lines = lines
threading.Thread.__init__(self, name=name)
self.setDaemon(True)
def run(self):
for line in self.file:
self.lines.append(line.rstrip())
class LocalPysync:
'''
The LocalPysync class initiates a directory synchronization task by starting
the pysync_remote module on a target system then processes commands from that
system to accomplish directry synchronization. Once the pysync_remote module
is started on the remote system, this LocalPysync instance acts as the remote
system's agent.
When invoked through PysyncProxy, stdout is used to return pickled objects
representing status information from this LocalPysync instance.
'''
NUMBER_SCALES = {'M': 1024 * 1024, 'G': 1024 * 1024 * 1024, 'T': 1024 * 1024 * 1024 * 1024}
class _Quit(SystemExit):
def __init__(self, *info):
SystemExit.__init__(self, *info)
def __init__(self, argv, recordProgressCallback=None, recordRawProgressCallback=None, progressTimestamp=False):
'''
Initialize a new LocalPysync instance.
argv - a command-line style list of arguments as described by self.usage()
recordProgressCallback - function to call to present a printable progress
message generated by RemotePysync; the function must accept a single
argument of type str.
recordRawProgressCallback - function to call to handle raw progress information
generated by RemotePysync; the function must accept a single argument
of type pysync_remote.ProgressUpdate.
progressTimestamp - indicates whether or not RemotePysync should include the
observation timestamp on messages it creates.
'''
self.options = Options()
self.usingProxy = False
self.sshargs = []
self.cache = [None]
self.exclude = set()
self.include = set()
self.recordProgressCallback = recordProgressCallback
if self.recordProgressCallback:
self.options.sendProgress = True
self.recordRawProgressCallback = recordRawProgressCallback
if self.recordRawProgressCallback:
self.options.sendRawProgress = True
self.options.progressTimestamp = progressTimestamp
a = argv[1:]
while a:
if a[0] == '-v':
self.options.verbose = True
elif a[0] == '-?':
self.usage(argv)
elif a[0] == '-compress':
self.options.compress = True
elif a[0] == '-n':
self.options.minusn = True
elif a[0] == '--insecure':
self.options.insecure = True
elif a[0] == '--ssharg':
a.pop(0)
self.sshargs.append(a[0])
elif a[0] == '--delete':
self.options.delete = True
elif a[0] == '-x':
a.pop(0)
name = a[0]
if name[0] == '/':
raise Exception('Please do not use absolute path with -x.')
if name[0:2] != './':
name = os.path.join('.', name)
self.exclude.add(name)
elif a[0] == '-i':
a.pop(0)
name = a[0]
if name[0] == '/':
raise Exception('Please do not use absolute path with -i.')
if name[0:2] != './':
name = os.path.join('.', name)
self.include.add(name)
elif a[0] == '--progress-bytes':
a.pop(0)
try:
scale = a[0][-1]
if scale == '%':
# Ensure number part is convertable; otherwise pass the whole value
factor = float(a[0][:-1])
self.options.progressBytes = a[0]
elif scale.upper() in LocalPysync.NUMBER_SCALES:
# Real numeric value followed by a supported scale identifier
progressBytes = int(float(a[0][:-1]) * LocalPysync.NUMBER_SCALES[scale.upper()])
self.options.progressBytes = progressBytes
else:
# If the value isn't a percent or scaled, it must be an integer number of bytes
progressBytes = int(a[0])
self.options.progressBytes = self.options.progressBytes
except ValueError:
raise ValueError("--progress-bytes value is not supported", a[0])
if type(
self.options.progressBytes) != str and progressBytes < pysync_remote.SyncProgress.MINIMUM_VOLUME_INTERVAL:
raise ValueError(
"--progress-bytes value must be at least %d" % pysync_remote.SyncProgress.MINIMUM_VOLUME_INTERVAL,
a[0])
elif a[0] == '--progress-time':
a.pop(0)
try:
progressSeconds = int(60 * float(a[0]))
self.options.progressTime = progressSeconds
except ValueError:
raise ValueError("--progress-time value is not supported", a[0])
if progressSeconds < pysync_remote.SyncProgress.MINIMUM_TIME_INTERVAL:
raise ValueError("--progress-time value must be at least %f" % (
pysync_remote.SyncProgress.MINIMUM_TIME_INTERVAL / 60))
elif a[0] == '--proxy':
self.usingProxy = True
self.options.sendProgress = True
self.recordProgressCallback = self._recordProgress
self.options.sendRawProgress = True
self.recordRawProgressCallback = self._recordRawProgress
elif a[0] == '--omit-progress-timestamp':
self.options.progressTimestamp = False
else:
break
a.pop(0)
if len(a) != 2:
self.usage(argv)
self.sourceDir = os.path.abspath(a[0])
if not os.path.exists(self.sourceDir):
raise ValueError("Source path \"%s\" not found" % self.sourceDir)
if not os.path.isdir(self.sourceDir):
raise ValueError("Source path \"%s\" is not a directory" % self.sourceDir)
if not os.access(self.sourceDir, os.F_OK | os.R_OK | os.X_OK):
raise ValueError("Source path) \"%s\" is not accessible" % self.sourceDir)
dest = a[1]
# MPP-13617
m = re.match(RE1, dest)
if m:
self.userAndHost, self.destDir = m.groups()
else:
i = dest.find(':')
if i == -1:
self.usage(argv)
self.userAndHost, self.destDir = dest[:i], dest[i + 1:]
self.connectAddress = None
self.sendData = None
hostname = self.userAndHost[self.userAndHost.find('@') + 1:]
try:
addrinfo = socket.getaddrinfo(hostname, None)
except:
print 'dest>>%s<<' % dest, ' hostname>>%s<<' % hostname
raise
if addrinfo:
self.options.addrinfo = addrinfo[0]
else:
raise Exception("Unable to determine address for %s" % self.userAndHost)
def usage(self, argv):
sys.stderr.write("""usage:
python """ + argv[0] + """ [-v] [-?] [-n]
[--ssharg arg] [-x exclude_file] [-i include_file] [--insecure] [--delete]
[--progress-time seconds] [--progress-bytes { n[.n]{% | G | T} }
[--proxy] [--omit-progress-timestamp]
sourcedir [user@]host:destdir
-v: verbose output
-?: Print this message.
--ssharg arg: pass arg to ssh. Use many times to pass many args.
-n: Do not do any work. Just print how many bytes will need to be
transferred over the network per file and a total.
-x name: Do not transfer named file or directory. Don't be too
creative with the name. For example, "directory/./file" will not
work--use "directory/file". Name is relative to sourcedir.
-i name: Only transfer named file or directory. Don't be too
creative with the name. For example, "directory/./file" will not
work--use "directory/file". Name is relative to sourcedir.
--insecure: Do not check SHA256 digest after transfering data.
This makes pysync.py run faster, but a bad guy can forge TCP
packets and put junk of his choice into your files.
--delete: Delete things in dst that do not exist in src.
--progress-time minutes: the number of minutes to elapse before a
time-based progress message is issued. Progress messages may
appear more frequently than specified due to the --progress-bytes
value.
--progress-bytes count: the number of bytes processed before a
volume-based progress message is issued. The count may be a
number followed by 'G' or 'T' or number followed by '%'. If
specified as a percent, the count is calculated as the specified
percent of the total bytes expected to be processed.
--proxy: Internal option indicating a call from PysyncProxy.
--omit-progress-timestamp: Omit the timestamp from progress messages.
""")
sys.exit(1)
def readFile(self, filename, offset, size):
'''
Read a chunk of the specified size at the specified offset from the
file identified. The last chunk read is cached for possible re-reading.
The file is opened only for the duration of the seek and read operations.
'''
key = (filename, offset, size)
if self.cache[0] == key:
return self.cache[1]
absfilename = os.path.join(self.sourceDir, filename)
f = open(absfilename, 'rb')
f.seek(offset)
a = f.read(size)
f.close()
assert len(a) == size
self.cache = (key, a)
return a
def getList(self):
'''
Gets a map of {name:stat} pairs to be processed. The stat value
is generally the tuple returned from pysync_remote.statToTuple.
Hard links (an entry with an inode equal to another in the list)
are represented by a ('L', linked_name) tuple.
'''
list = dict()
inomap = dict()
for root, dirs, files in os.walk(self.sourceDir):
for i in dirs + files:
absname = os.path.join(root, i)
relname = '.' + absname[len(self.sourceDir):]
if relname in self.exclude:
if i in dirs:
dirs.remove(i)
continue
if len(self.include) > 0:
""" Check if the file or dir is in the include list """
if relname in self.include:
pass
else:
""" Make sure we include any files or dirs under a dir in the include list."""
foundPrefix = False
for j in self.include:
if relname.startswith(j + '/') == True:
foundPrefix = True
continue
if foundPrefix == False:
if i in dirs:
dirs.remove(i)
continue
s = os.lstat(absname)
if s.st_ino in inomap:
list[relname] = ('L', inomap[s.st_ino])
continue
inomap[s.st_ino] = relname
list[relname] = statToTuple(s, absname)
return list
def doCommand(self, what):
'''
Perform the command requested by the remote side and prepare any
result.
'''
if what[0] == 'connect':
self.connectAddress = what[1]
elif what[0] == 'getOptions':
return self.options
elif what[0] == 'getDestDir':
return self.destDir
elif what[0] == 'getList':
return self.getList()
elif what[0] == 'getDigest':
m = hashlib.sha256()
m.update(self.readFile(what[1], what[2], what[3]))
return m.digest()
elif what[0] == 'getData':
self.sendData = self.readFile(what[1], what[2], what[3])
if self.options.compress:
self.sendData = zlib.compress(self.sendData, 1)
return len(self.sendData)
elif what[0] == 'recordProgress':
if self.recordProgressCallback:
self.recordProgressCallback(what[1].rstrip())
else:
sys.stdout.write(what[1].rstrip())
sys.stdout.write('\n')
return None
elif what[0] == 'recordRawProgress':
if self.recordRawProgressCallback:
self.recordRawProgressCallback(what[1])
else:
sys.stdout.write("raw: " + str(what[1]))
sys.stdout.write('\n')
return None
elif what[0] == 'quit':
raise LocalPysync._Quit(what[1])
else:
assert 0
def _recordProgress(self, message):
'''
Send progress information to associated PysyncProxy instance.
'''
if message:
self._sendCommand('recordProgress', message)
def _recordRawProgress(self, progressUpdate):
'''
Send raw progress data to associated PysyncProxy instance.
'''
if progressUpdate:
self._sendCommand('recordRawProgress', progressUpdate)
def _sendCommand(self, *args):
'''
Serialize the command & arguments using cPickle and send write to stdout.
This method is used for communication with the initiating PysyncProxy
instance.
'''
a = cPickle.dumps(args)
sys.stdout.write('pKl:%d\n%s' % (len(a), a))
sys.stdout.flush()
def work(self):
'''
Wait for and process commands from the RemotePysync instance connected
to the Popened SSH process.
Command processing continues until EOF is reached on Popen.stdout (the
command input stream from RemotePysync) or a "quit" command is proocessed.
Command response objects are serialized and written to Popen.stdin (the
command output stream to RemotePysync).
'''
while True:
try:
# check if parent still alive
os.kill(os.getppid(), 0)
except:
# parent gone, exit
return 2
# Get the length of the next serialized command
a = self.p.stdout.readline()
if len(a) == 0:
# End the command loop if EOF
print >> sys.stderr, "[FATAL]:-Unexpected EOF on RemotePysync output stream"
return 3
size = int(a)
# Read the serialized command and process it.
data = self.p.stdout.read(size)
assert len(data) == size
try:
answer = cPickle.dumps(self.doCommand(cPickle.loads(data)))
except LocalPysync._Quit, e:
return e.code
# Send the serialized command response
self.p.stdin.write("%d\n%s" % (len(answer), answer))
self.p.stdin.flush()
# If the command was a connect order, open a socket to
# the remote side for data transfer
if self.connectAddress != None:
self.socket = socket.socket(self.options.addrinfo[0])
self.socket.connect(self.connectAddress)
self.connectAddress = None
# If the command was a getData order, send the prepared
# data over the socket.
if self.sendData != None:
self.socket.sendall(self.sendData)
self.sendData = None
def run(self):
'''
Start the pysync_remote module on the remote host and call self.work() to process
commands presented by the remote host.
'''
# save of ppid to allow the process to be stopped.
os.system('echo %d > /tmp/pysync.py.%s.ppid' % (os.getppid(), self.destDir.replace('/', '_')))
PATH = os.environ.get('PATH') or '.'
LIBPATH = os.environ.get('LD_LIBRARY_PATH') or '.'
cmd = ('''. %s/greenplum_path.sh && bash -c "python -u -c '%s'"'''
% (os.environ.get('GPHOME'),
bootstrapSource))
args = []
args.append('ssh')
args.extend(["-o", "BatchMode=yes"])
args.extend(["-o", "StrictHostKeyChecking=no"])
args.extend(self.sshargs)
args.append(self.userAndHost)
args.append(cmd)
code = 0
self.p = None
try:
try:
pysyncSource = inspect.getsource(pysync_remote)
self.p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.p.stdin.write("%d\n%s" % (len(pysyncSource), pysyncSource))
code = self.work()
except OSError, e:
sys.stderr.write(str(e))
raise
finally:
os.remove('/tmp/pysync.py.%s.ppid' % (self.destDir.replace('/', '_')))
if self.p:
timer = threading.Timer(2.0, (lambda: os.kill(self.p.pid, signal.SIGHUP)))
timer.start()
rc = self.p.wait()
timer.cancel()
if self.usingProxy:
self._sendCommand('quit', code)
return code
if os.environ.get('GPHOME') is None:
print >> sys.stderr, '[FATAL]:- Please specify environment variable GPHOME'
sys.exit(1)
if __name__ == '__main__':
sys.exit(LocalPysync(sys.argv, progressTimestamp=True).run())
|
from __future__ import division
import sys
import albow # used for translation update
from pygame import Rect, Surface, image
from pygame.locals import K_RETURN, K_KP_ENTER, K_ESCAPE, K_TAB, KEYDOWN, SRCALPHA
from pygame.mouse import set_cursor
from pygame.cursors import arrow as arrow_cursor
from pygame.transform import rotozoom
from vectors import add, subtract
from utils import frame_rect
import theme
from theme import ThemeProperty, FontProperty
import resource
from numpy import fromstring
from OpenGL import GL, GLU
debug_rect = False
debug_tab = True
root_widget = None
current_cursor = None
def overridable_property(name, doc=None):
"""Creates a property which calls methods get_xxx and set_xxx of
the underlying object to get and set the property value, so that
the property's behaviour may be easily overridden by subclasses."""
getter_name = intern('get_' + name)
setter_name = intern('set_' + name)
return property(
lambda self: getattr(self, getter_name)(),
lambda self, value: getattr(self, setter_name)(value),
None,
doc)
def rect_property(name):
def get(self):
return getattr(self._rect, name)
def set(self, value):
r = self._rect
old_size = r.size
setattr(r, name, value)
new_size = r.size
if old_size != new_size:
self._resized(old_size)
return property(get, set)
# noinspection PyPropertyAccess
class Widget(object):
# rect Rect bounds in parent's coordinates
# parent Widget containing widget
# subwidgets [Widget] contained widgets
# focus_switch Widget subwidget to receive key events
# fg_color color or None to inherit from parent
# bg_color color to fill background, or None
# visible boolean
# border_width int width of border to draw around widget, or None
# border_color color or None to use widget foreground color
# tab_stop boolean stop on this widget when tabbing
# anchor string of 'ltrb'
font = FontProperty('font')
fg_color = ThemeProperty('fg_color')
bg_color = ThemeProperty('bg_color')
bg_image = ThemeProperty('bg_image')
scale_bg = ThemeProperty('scale_bg')
border_width = ThemeProperty('border_width')
border_color = ThemeProperty('border_color')
sel_color = ThemeProperty('sel_color')
margin = ThemeProperty('margin')
menu_bar = overridable_property('menu_bar')
is_gl_container = overridable_property('is_gl_container')
tab_stop = False
enter_response = None
cancel_response = None
anchor = 'ltwh'
debug_resize = False
_menubar = None
_visible = True
_is_gl_container = False
redraw_every_event = True
tooltip = None
tooltipText = None
doNotTranslate = False
# 'name' is used to track widgets without parent
name = 'Widget'
def __init__(self, rect=None, **kwds):
if rect and not isinstance(rect, Rect):
raise TypeError("Widget rect not a pygame.Rect")
self._rect = Rect(rect or (0, 0, 100, 100))
#-# Translation live update preparation
self.__lang = albow.translate.getLang()
self.__update_translation = False
self.shrink_wrapped = False
#-#
self.parent = None
self.subwidgets = []
self.focus_switch = None
self.is_modal = False
self.set(**kwds)
self.root = self.get_root()
self.setup_spacings()
#-# Translation live update preparation
@property
def get_update_translation(self):
return self.__update_translation
def set_update_ui(self, v):
if v:
self.font = self.predict_font({})
for widget in self.subwidgets:
widget.set_update_ui(v)
if self.shrink_wrapped:
self.shrink_wrap()
if hasattr(self, 'calc_size'):
self.calc_size()
self.invalidate()
self.__update_translation = v
#-#
def setup_spacings(self):
def new_size(size):
size = float(size * 1000) / float(100)
size = int(size * resource.font_proportion / 1000)
return size
self.margin = new_size(self.margin)
if hasattr(self, 'spacing'):
self.spacing = new_size(self.spacing)
def set(self, **kwds):
for name, value in kwds.iteritems():
if not hasattr(self, name):
raise TypeError("Unexpected keyword argument '%s'" % name)
setattr(self, name, value)
def get_rect(self):
return self._rect
def set_rect(self, x):
old_size = self._rect.size
self._rect = Rect(x)
self._resized(old_size)
resizing_axes = {'h': 'lr', 'v': 'tb'}
resizing_values = {'': [0], 'm': [1], 's': [0, 1]}
def set_resizing(self, axis, value):
chars = self.resizing_axes[axis]
anchor = self.anchor
for c in chars:
anchor = anchor.replace(c, '')
for i in self.resizing_values[value]:
anchor += chars[i]
self.anchor = anchor + value
def _resized(self, (old_width, old_height)):
new_width, new_height = self._rect.size
dw = new_width - old_width
dh = new_height - old_height
if dw or dh:
self.resized(dw, dh)
def resized(self, dw, dh):
if self.debug_resize:
print "Widget.resized:", self, "by", (dw, dh), "to", self.size
for widget in self.subwidgets:
widget.parent_resized(dw, dh)
def parent_resized(self, dw, dh):
debug_resize = self.debug_resize or getattr(self.parent, 'debug_resize', False)
if debug_resize:
print "Widget.parent_resized:", self, "by", (dw, dh)
left, top, width, height = self._rect
move = False
resize = False
anchor = self.anchor
if dw:
factors = [1, 1, 1] # left, width, right
if 'r' in anchor:
factors[2] = 0
if 'w' in anchor:
factors[1] = 0
if 'l' in anchor:
factors[0] = 0
if any(factors):
resize = factors[1]
move = factors[0] or factors[2]
#print "lwr", factors
left += factors[0] * dw / sum(factors)
width += factors[1] * dw / sum(factors)
#left = (left + width) + factors[2] * dw / sum(factors) - width
if dh:
factors = [1, 1, 1] # bottom, height, top
if 't' in anchor:
factors[2] = 0
if 'h' in anchor:
factors[1] = 0
if 'b' in anchor:
factors[0] = 0
if any(factors):
resize = factors[1]
move = factors[0] or factors[2]
#print "bht", factors
top += factors[2] * dh / sum(factors)
height += factors[1] * dh / sum(factors)
#top = (top + height) + factors[0] * dh / sum(factors) - height
if resize:
if debug_resize:
print "Widget.parent_resized: changing rect to", (left, top, width, height)
self.rect = Rect((left, top, width, height))
elif move:
if debug_resize:
print "Widget.parent_resized: moving to", (left, top)
self._rect.topleft = (left, top)
rect = property(get_rect, set_rect)
left = rect_property('left')
right = rect_property('right')
top = rect_property('top')
bottom = rect_property('bottom')
width = rect_property('width')
height = rect_property('height')
size = rect_property('size')
topleft = rect_property('topleft')
topright = rect_property('topright')
bottomleft = rect_property('bottomleft')
bottomright = rect_property('bottomright')
midleft = rect_property('midleft')
midright = rect_property('midright')
midtop = rect_property('midtop')
midbottom = rect_property('midbottom')
center = rect_property('center')
centerx = rect_property('centerx')
centery = rect_property('centery')
def get_visible(self):
return self._visible
def set_visible(self, x):
self._visible = x
visible = overridable_property('visible')
def add(self, arg, index=None):
if arg:
if isinstance(arg, Widget):
if index is not None:
arg.set_parent(self, index)
else:
arg.set_parent(self)
else:
for item in arg:
self.add(item)
def add_centered(self, widget):
w, h = self.size
widget.center = w // 2, h // 2
self.add(widget)
def remove(self, widget):
if widget in self.subwidgets:
widget.set_parent(None)
def set_parent(self, parent, index=None):
if parent is not self.parent:
if self.parent:
self.parent._remove(self)
self.parent = parent
if parent:
parent._add(self, index)
def all_parents(self):
widget = self
parents = []
while widget.parent:
parents.append(widget.parent)
widget = widget.parent
return parents
def _add(self, widget, index=None):
if index is not None:
self.subwidgets.insert(index, widget)
else:
self.subwidgets.append(widget)
if hasattr(widget, "idleevent"):
#print "Adding idle handler for ", widget
self.root.add_idle_handler(widget)
def _remove(self, widget):
if hasattr(widget, "idleevent"):
#print "Removing idle handler for ", widget
self.root.remove_idle_handler(widget)
self.subwidgets.remove(widget)
if self.focus_switch is widget:
self.focus_switch = None
def draw_all(self, surface):
if self.visible:
surf_rect = surface.get_rect()
bg_image = self.bg_image
if bg_image:
assert isinstance(bg_image, Surface)
if self.scale_bg:
bg_width, bg_height = bg_image.get_size()
width, height = self.size
if width > bg_width or height > bg_height:
hscale = width / bg_width
vscale = height / bg_height
bg_image = rotozoom(bg_image, 0.0, max(hscale, vscale))
r = bg_image.get_rect()
r.center = surf_rect.center
surface.blit(bg_image, r)
else:
bg = self.bg_color
if bg:
surface.fill(bg)
self.draw(surface)
bw = self.border_width
if bw:
bc = self.border_color or self.fg_color
frame_rect(surface, bc, surf_rect, bw)
for widget in self.subwidgets:
sub_rect = widget.rect
if debug_rect:
print "Widget: Drawing subwidget %s of %s with rect %s" % (
widget, self, sub_rect)
sub_rect = surf_rect.clip(sub_rect)
if sub_rect.width > 0 and sub_rect.height > 0:
try:
sub = surface.subsurface(sub_rect)
except ValueError as e:
if str(e) == "subsurface rectangle outside surface area":
self.diagnose_subsurface_problem(surface, widget)
else:
raise
else:
widget.draw_all(sub)
self.draw_over(surface)
def diagnose_subsurface_problem(self, surface, widget):
mess = "Widget %s %s outside parent surface %s %s" % (
widget, widget.rect, self, surface.get_rect())
sys.stderr.write("%s\n" % mess)
surface.fill((255, 0, 0), widget.rect)
def draw(self, surface):
pass
def draw_over(self, surface):
pass
def find_widget(self, pos):
for widget in self.subwidgets[::-1]:
if widget.visible:
r = widget.rect
if r.collidepoint(pos):
return widget.find_widget(subtract(pos, r.topleft))
return self
def handle_mouse(self, name, event):
self.augment_mouse_event(event)
self.call_handler(name, event)
self.setup_cursor(event)
def mouse_down(self, event):
self.call_parent_handler("mouse_down", event)
def mouse_up(self, event):
self.call_parent_handler("mouse_up", event)
def augment_mouse_event(self, event):
event.dict['local'] = self.global_to_local(event.pos)
def setup_cursor(self, event):
global current_cursor
cursor = self.get_cursor(event) or arrow_cursor
if cursor is not current_cursor:
set_cursor(*cursor)
current_cursor = cursor
def dispatch_key(self, name, event):
if self.visible:
if event.type == KEYDOWN:
menubar = self._menubar
if menubar and menubar.handle_command_key(event):
return
widget = self.focus_switch
if widget:
widget.dispatch_key(name, event)
else:
self.call_handler(name, event)
else:
self.call_parent_handler(name, event)
def get_focus(self):
widget = self
while 1:
focus = widget.focus_switch
if not focus:
break
widget = focus
return widget
def notify_attention_loss(self):
widget = self
while 1:
if widget.is_modal:
break
parent = widget.parent
if not parent:
break
focus = parent.focus_switch
if focus and focus is not widget:
self.root.notMove = False
focus.dispatch_attention_loss()
widget = parent
def dispatch_attention_loss(self):
widget = self
while widget:
widget.attention_lost()
widget = widget.focus_switch
def attention_lost(self):
pass
def handle_command(self, name, *args):
method = getattr(self, name, None)
if method:
return method(*args)
else:
parent = self.next_handler()
if parent:
return parent.handle_command(name, *args)
def next_handler(self):
if not self.is_modal:
return self.parent
def call_handler(self, name, *args):
method = getattr(self, name, None)
if method:
return method(*args)
else:
return 'pass'
def call_parent_handler(self, name, *args):
parent = self.next_handler()
if parent:
parent.call_handler(name, *args)
def global_to_local(self, p):
return subtract(p, self.local_to_global_offset())
def local_to_global(self, p):
return add(p, self.local_to_global_offset())
def local_to_global_offset(self):
d = self.topleft
parent = self.parent
if parent:
d = add(d, parent.local_to_global_offset())
return d
def key_down(self, event):
k = event.key
#print "Widget.key_down:", k ###
if k == K_RETURN or k == K_KP_ENTER:
if self.enter_response is not None:
self.dismiss(self.enter_response)
return
elif k == K_ESCAPE:
self.root.fix_sticky_ctrl()
if self.cancel_response is not None:
self.dismiss(self.cancel_response)
return
elif k == K_TAB:
self.tab_to_next()
return
self.call_parent_handler('key_down', event)
def key_up(self, event):
self.call_parent_handler('key_up', event)
def is_inside(self, container):
widget = self
while widget:
if widget is container:
return True
widget = widget.parent
return False
@property
def is_hover(self):
return self.root.hover_widget is self
def present(self, centered=True):
#print "Widget: presenting with rect", self.rect
if self.root is None:
self.root = self.get_root()
if "ControlPanel" not in str(self):
self.root.notMove = True
if centered:
self.center = self.root.center
self.root.add(self)
try:
self.root.run_modal(self)
self.dispatch_attention_loss()
finally:
self.root.remove(self)
#print "Widget.present: returning", self.modal_result
if "ControlPanel" not in str(self):
self.root.notMove = False
return self.modal_result
def dismiss(self, value=True):
self.root.notMove = False
self.modal_result = value
def get_root(self):
return root_widget
def get_top_widget(self):
top = self
while top.parent and not top.is_modal:
top = top.parent
return top
def focus(self):
parent = self.next_handler()
if parent:
parent.focus_on(self)
def focus_on(self, subwidget):
old_focus = self.focus_switch
if old_focus is not subwidget:
if old_focus:
old_focus.dispatch_attention_loss()
self.focus_switch = subwidget
self.focus()
def has_focus(self):
return self.is_modal or (self.parent and self.parent.focused_on(self))
def focused_on(self, widget):
return self.focus_switch is widget and self.has_focus()
def focus_chain(self):
result = []
widget = self
while widget:
result.append(widget)
widget = widget.focus_switch
return result
def shrink_wrap(self):
contents = self.subwidgets
if contents:
rects = [widget.rect for widget in contents]
#rmax = Rect.unionall(rects) # broken in PyGame 1.7.1
rmax = rects.pop()
for r in rects:
rmax = rmax.union(r)
self._rect.size = add(rmax.topleft, rmax.bottomright)
#-# Translation live update preparation
self.shrink_wrapped = True
#-#
def invalidate(self):
if self.root:
self.root.bonus_draw_time = False
@staticmethod
def get_cursor(event):
return arrow_cursor
def predict(self, kwds, name):
try:
return kwds[name]
except KeyError:
return theme.root.get(self.__class__, name)
def predict_attr(self, kwds, name):
try:
return kwds[name]
except KeyError:
return getattr(self, name)
def init_attr(self, kwds, name):
try:
return kwds.pop(name)
except KeyError:
return getattr(self, name)
def predict_font(self, kwds, name='font'):
return kwds.get(name) or theme.root.get_font(self.__class__, name)
def get_margin_rect(self):
r = Rect((0, 0), self.size)
d = -2 * self.margin
r.inflate_ip(d, d)
return r
def set_size_for_text(self, width, nlines=1):
if width is not None:
font = self.font
d = 2 * self.margin
if isinstance(width, basestring):
width, height = font.size(width)
width += d + 2
else:
height = font.size("X")[1]
self.size = (width, height * nlines + d)
def tab_to_first(self):
chain = self.get_tab_order()
if chain:
chain[0].focus()
def tab_to_next(self):
top = self.get_top_widget()
chain = top.get_tab_order()
try:
i = chain.index(self)
except ValueError:
return
target = chain[(i + 1) % len(chain)]
target.focus()
def get_tab_order(self):
result = []
self.collect_tab_order(result)
return result
def collect_tab_order(self, result):
if self.visible:
if self.tab_stop:
result.append(self)
for child in self.subwidgets:
child.collect_tab_order(result)
# def tab_to_first(self, start = None):
# if debug_tab:
# print "Enter Widget.tab_to_first:", self ###
# print "...start =", start ###
# if not self.visible:
# if debug_tab: print "...invisible" ###
# self.tab_to_next_in_parent(start)
# elif self.tab_stop:
# if debug_tab: print "...stopping here" ###
# self.focus()
# else:
# if debug_tab: print "...tabbing to next" ###
# self.tab_to_next(start or self)
# if debug_tab: print "Exit Widget.tab_to_first:", self ###
#
# def tab_to_next(self, start = None):
# if debug_tab:
# print "Enter Widget.tab_to_next:", self ###
# print "...start =", start ###
# sub = self.subwidgets
# if sub:
# if debug_tab: print "...tabbing to first subwidget" ###
# sub[0].tab_to_first(start or self)
# else:
# if debug_tab: print "...tabbing to next in parent" ###
# self.tab_to_next_in_parent(start)
# if debug_tab: print "Exit Widget.tab_to_next:", self ###
#
# def tab_to_next_in_parent(self, start):
# if debug_tab:
# print "Enter Widget.tab_to_next_in_parent:", self ###
# print "...start =", start ###
# parent = self.parent
# if parent and not self.is_modal:
# if debug_tab: print "...telling parent to tab to next" ###
# parent.tab_to_next_after(self, start)
# else:
# if self is not start:
# if debug_tab: print "...wrapping back to first" ###
# self.tab_to_first(start)
# if debug_tab: print "Exit Widget.tab_to_next_in_parent:", self ###
#
# def tab_to_next_after(self, last, start):
# if debug_tab:
# print "Enter Widget.tab_to_next_after:", self, last ###
# print "...start =", start ###
# sub = self.subwidgets
# i = sub.index(last) + 1
# if debug_tab: print "...next index =", i, "of", len(sub) ###
# if i < len(sub):
# if debug_tab: print "...tabbing there" ###
# sub[i].tab_to_first(start)
# else:
# if debug_tab: print "...tabbing to next in parent" ###
# self.tab_to_next_in_parent(start)
# if debug_tab: print "Exit Widget.tab_to_next_after:", self, last ###
def inherited(self, attribute):
value = getattr(self, attribute)
if value is not None:
return value
else:
parent = self.next_handler()
if parent:
return parent.inherited(attribute)
def __contains__(self, event):
r = Rect(self._rect)
r.left = 0
r.top = 0
p = self.global_to_local(event.pos)
return r.collidepoint(p)
def get_mouse(self):
return self.root.get_mouse_for(self)
def get_menu_bar(self):
return self._menubar
def set_menu_bar(self, menubar):
if menubar is not self._menubar:
if self._menubar:
self.remove(self._menubar)
self._menubar = menubar
if menubar:
if menubar.width == 0:
menubar.width = self.width
menubar.anchor = 'lr'
self.add(menubar)
def get_is_gl_container(self):
return self._is_gl_container
def set_is_gl_container(self, x):
self._is_gl_container = x
def gl_draw_all(self, root, offset):
if not self.visible:
return
#from OpenGL import GL, GLU
rect = self.rect.move(offset)
if self.is_gl_container:
self.gl_draw_self(root, offset)
suboffset = rect.topleft
for subwidget in self.subwidgets:
subwidget.gl_draw_all(root, suboffset)
else:
try:
surface = Surface(self.size, SRCALPHA)
except:
#size error?
return
self.draw_all(surface)
data = image.tostring(surface, 'RGBA', 1)
w, h = root.size
GL.glViewport(0, 0, w, h)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GLU.gluOrtho2D(0, w, 0, h)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GL.glRasterPos2i(max(rect.left, 0), max(h - rect.bottom, 0))
GL.glPushAttrib(GL.GL_COLOR_BUFFER_BIT)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glDrawPixels(self.width, self.height,
GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, fromstring(data, dtype='uint8'))
GL.glPopAttrib()
GL.glFlush()
def gl_draw_self(self, root, offset):
pass
|
<filename>nnet/core_layers.py
from .layer import Layer
import numpy as np
import math
class Linear(Layer):
def __init__(self, nout, name=None, lid=None):
Layer.__init__(self, name=name, lid=lid)
self.NOut = nout
def init(self, inputs):
assert len(inputs) == 1
inp = inputs[0]
self.NIn = inp.Shape[-1]
W_shape = (self.NIn, self.NOut)
rng = np.random.RandomState()
#self.W = rng.normal(size=W_shape, scale=math.sqrt(2.0/(self.NIn + self.NOut)))
self.W = (np.random.random(W_shape)*2 - 1) * math.sqrt(6.0/(self.NIn + self.NOut))
self.b = np.zeros(self.NOut)
self.Shape = inp.Shape[:-1] + (self.NOut,)
self.Params = (self.W, self.b)
self.Regularizable = (self.W,)
def FP(self, xs, state):
assert len(xs) == 1
x = xs[0]
y = np.dot(x, self.W) + self.b
return y, state
def BP(self, gy):
#print "linear.bprop: x:", x[0].shape, " gy:", gY.shape
x = self.Xs[0]
n_mb = len(x)
inp_flat = x.reshape((-1, self.NIn))
g_flat = gy.reshape((-1, self.NOut))
gW = np.dot(inp_flat.T, g_flat)/n_mb # [...,n_in],[...,n_out]
gb = np.mean(g_flat, axis=0)
gx = np.dot(gy, self.W.T)
return [gx], [gW, gb]
def config(self):
cfg = Layer.config(self)
cfg["type"] = "linear"
cfg["nout"] = self.NOut
return cfg
class Merge(Layer):
def __init__(self, axis=0, name=None, lid=None):
Layer.__init__(self, name=name, lid=lid)
assert axis == 0 # for now , 0 is next axis after the one along the minibatch
self.Axis = axis
def init(self, inputs):
shape0 = inputs[0].shape()
shape0[self.Axis] = 0
n_total = 0
for inp in inputs:
shape = inp.Shape
n_total += shape[self.Axis]
shape[self.Axis] = 0
assert shape == shape0
self.Shape = [n_total]+shape0[1:]
self.Params = ()
def FP(self, xs, state):
return np.concatenate(xs, axis=self.Axis), state
def BP(self, gy):
i = 0
gxs = []
for inp in self.Inputs:
gxs.append(gy[:,i+inp.shape()[0]])
return gxs, None
def config(self):
cfg = Layer.config(self)
cfg["type"] = "merge"
cfg["axis"] = self.Axis
return cfg
class Input(Layer):
def __init__(self, shape, name=None, lid=None):
Layer.__init__(self, name=name, lid=lid)
self.Shape = shape
def set(self, value, t):
self.newTick(t)
self.Xs = [value]
def forward(self, t=None):
assert t == self.Tick
self.Y = self.Xs[0]
return self.Y
def BP(self, gy):
return (), ()
def config(self):
cfg = Layer.config(self)
cfg["type"] = "input"
cfg["shape"] = self.Shape
return cfg
class Flatten(Layer):
def init(self, inputs):
assert len(inputs) == 1
inp = inputs[0]
self.InShape = inp.Shape
n = 1
for x in self.InShape: n *= x
self.Shape = (n,)
def FP(self, xs, state):
assert len(xs) == 1
x = xs[0]
return x.reshape((x.shape[0], -1)), state
def BP(self, gy):
return [gy.reshape((gy.shape[0],)+self.InShape)], []
def config(self):
cfg = Layer.config(self)
cfg["type"] = "flatten"
return cfg
def core_layer(cfg):
if cfg["type"] == "flatten":
return Flatten(name=cfg.get("name"), lid=cfg["id"])
elif cfg["type"] == "input":
return Input(tuple(cfg["shape"]), name=cfg.get("name"), lid=cfg["id"])
elif cfg["type"] == "merge":
return Merge(axis=cfg["axis"], name=cfg.get("name"), lid=cfg["id"])
elif cfg["type"] == "linear":
return Linear(cfg["nout"], name=cfg.get("name"), lid=cfg["id"])
else:
raise ValueError("Unknown core layer type: %s" % (cfg["type"],))
|
import os
import yaml
from string import Template
from copy import deepcopy
from .plugins import ArgcountChecker, OptionalArguments, ArgumentReferences, \
BeforeAfterCall, ConstantArguments, ReturnArguments, GILRelease
from ..shared import cwrap_common
class cwrap(object):
BASE_INDENT_SIZE = 6
RETURN_WRAPPERS = {
'void': Template('Py_RETURN_NONE;'),
'long': Template('return PyLong_FromLong($result);'),
'int64_t': Template('return PyLong_FromLong($result);'),
'bool': Template('return PyBool_FromLong($result);'),
'void*': Template('return PyLong_FromVoidPtr($result);'),
}
OPTION_TEMPLATE = Template("""
${els}if ($arg_check) {
$pre_arg_assign
$arg_assign
$code
""")
ARG_ASSIGN_TEMPLATE = Template("""${type} ${name} = ${unpack};""")
OPTION_CODE_TEMPLATE = [
'$call',
'$return_result',
]
FUNCTION_CALL_TEMPLATE = Template("$capture_result$cname($call_arg);")
DEFAULT_PLUGIN_CLASSES = [ArgcountChecker, ConstantArguments, OptionalArguments,
ArgumentReferences, BeforeAfterCall, ReturnArguments, GILRelease]
def __init__(self, source, destination=None, plugins=None, default_plugins=True, template_path=None):
if destination is None:
destination = source.replace('.cwrap', '.cpp')
self.plugins = [] if plugins is None else plugins
if default_plugins:
defaults = [cls() for cls in self.DEFAULT_PLUGIN_CLASSES]
self.plugins = defaults + self.plugins
for plugin in self.plugins:
plugin.initialize(self)
self.base_path = os.path.dirname(os.path.abspath(source))
with open(source, 'r') as f:
declarations = f.read()
# wrap all the declarations in the source .cwrap file
wrapper = self.wrap_declarations(declarations)
# let each plugin do any post-processing of the wrapped file
for plugin in self.plugins:
wrapper = plugin.process_full_file(wrapper, template_path)
# See Note [Unchanging results for ninja]
try:
with open(destination, 'r') as f:
old_wrapper = f.read()
except IOError:
old_wrapper = None
if old_wrapper != wrapper:
with open(destination, 'w') as f:
print("Writing {}".format(destination))
f.write(wrapper)
else:
print("Skipped writing {}".format(destination))
def wrap_declarations(self, declarations):
lines = declarations.split('\n')
declaration_lines = []
output = []
in_declaration = False
i = 0
while i < len(lines):
line = lines[i]
if line == '[[':
declaration_lines = []
in_declaration = True
elif line == ']]':
in_declaration = False
declaration = yaml.load('\n'.join(declaration_lines))
cwrap_common.set_declaration_defaults(declaration)
# Pass declaration in a list - maybe some plugins want to add
# multiple wrappers
declarations = [declaration]
for plugin in self.plugins:
declarations = plugin.process_declarations(declarations)
# Generate wrappers for all declarations and append them to
# the output
for declaration in declarations:
wrapper = self.generate_wrapper(declaration)
for plugin in self.plugins:
wrapper = plugin.process_wrapper(wrapper, declaration)
output.append(wrapper)
elif in_declaration:
declaration_lines.append(line)
elif '!!inc ' == line[:6]:
fname = os.path.join(self.base_path, line[6:].strip())
with open(fname, 'r') as f:
included = f.read().split('\n')
# insert it into lines at position i+1
lines[i + 1:i + 1] = included
else:
output.append(line)
i += 1
return '\n'.join(output)
def parse_arguments(self, args):
new_args = []
for arg in args:
# Simple arg declaration of form "<type> <name>"
if isinstance(arg, str):
t, _, name = arg.partition(' ')
new_args.append({'type': t, 'name': name})
elif isinstance(arg, dict):
if 'arg' in arg:
arg['type'], _, arg['name'] = arg['arg'].partition(' ')
del arg['arg']
new_args.append(arg)
else:
assert False
return new_args
def search_plugins(self, fnname, args, fallback):
"""Search plugins for the given function to call with args.
If not found, call fallback with args.
"""
for plugin in self.plugins:
wrapper = getattr(plugin, fnname)(*args)
if wrapper is not None:
return wrapper
return fallback(*args)
def get_type_check(self, arg, option):
return self.search_plugins('get_type_check', (arg, option), lambda arg, _: None)
def get_type_unpack(self, arg, option):
return self.search_plugins('get_type_unpack', (arg, option), lambda arg, _: None)
def get_return_wrapper(self, option):
return self.search_plugins('get_return_wrapper', (option,), lambda _: self.RETURN_WRAPPERS[option['return']])
def get_wrapper_template(self, declaration):
return self.search_plugins('get_wrapper_template', (declaration,), lambda _: None)
def get_assign_args(self, arguments):
return self.search_plugins('get_assign_args', (arguments,), lambda _: arguments)
def get_arg_accessor(self, arg, option):
def wrap_accessor(arg, _):
if arg.get('idx') is None:
raise RuntimeError("Missing accessor for '{} {}'".format(
arg['type'], arg['name']))
return 'PyTuple_GET_ITEM(args, {})'.format(arg['idx'])
return self.search_plugins('get_arg_accessor', (arg, option), wrap_accessor)
def generate_wrapper(self, declaration):
wrapper = ''
for i, option in enumerate(declaration['options']):
option_wrapper = self.generate_option(option, is_first=(i == 0))
for plugin in self.plugins:
option_wrapper = plugin.process_option_code(option_wrapper, option)
wrapper += option_wrapper
return self.get_wrapper_template(declaration).substitute(name=declaration['name'], options=wrapper)
def map_selected_arguments(self, base_fn_name, plugin_fn_name, option, arguments):
result = []
for arg in arguments:
accessor = self.get_arg_accessor(arg, option)
tmpl = getattr(self, base_fn_name)(arg, option)
if tmpl is None:
fn = 'check' if base_fn_name == 'get_type_check' else 'unpack'
raise RuntimeError("Missing type {} for '{} {}'".format(
fn, arg['type'], arg['name']))
res = tmpl.substitute(arg=accessor, idx=arg.get('idx'))
for plugin in self.plugins:
res = getattr(plugin, plugin_fn_name)(res, arg, accessor)
result.append(res)
return result
def build_option_args(self, arguments, arg_unpack):
assignement = []
call_arg = []
# If types or names needs to be changed
arguments = self.get_assign_args(arguments)
for arg, unpack in zip(arguments, arg_unpack):
if arg['type'] == 'CONSTANT':
call_arg.append(unpack)
else:
var_name = "arg_" + str(arg.get('assign_name', arg['name']))
res = self.ARG_ASSIGN_TEMPLATE.substitute(
type=arg['type'],
name=var_name,
unpack=unpack)
if var_name not in call_arg:
assignement.append(res)
call_arg.append(var_name)
return assignement, call_arg
def indent_code(self, code):
if code == '':
return code
code_lines = map(lambda s: s.strip(), code.split('\n'))
code = '\n'
depth = self.BASE_INDENT_SIZE
for line in code_lines:
depth -= line.count('}') * 2
code += ' ' * depth + line + '\n'
depth += line.count('{') * 2
depth += line.count('(') * 4
depth -= line.count(')') * 4
return code[:-1]
def generate_option(self, option, is_first):
checked_args = list(filter(
lambda arg: 'ignore_check' not in arg or not arg['ignore_check'],
option['arguments']))
option['num_checked_args'] = len(checked_args)
idx_args = list(filter(
lambda arg: not arg.get('ignore_check') and not arg.get('no_idx'),
option['arguments']))
for i, arg in enumerate(idx_args):
arg['idx'] = i
# Generate checks
arg_checks = self.map_selected_arguments('get_type_check',
'process_single_check', option, checked_args)
arg_checks = ' &&\n '.join(arg_checks)
for plugin in self.plugins:
arg_checks = plugin.process_all_checks(arg_checks, option)
# Generate pre_arg assign
pre_arg_assign = []
for plugin in self.plugins:
pre_arg_assign = plugin.process_pre_arg_assign(pre_arg_assign, option)
# Generate arg assignment and call arguments
arg_unpack = self.map_selected_arguments('get_type_unpack',
'process_single_unpack', option, option['arguments'])
arg_assign, call_arg = self.build_option_args(option['arguments'], arg_unpack)
call_arg = ', '.join(call_arg)
for plugin in self.plugins:
call_arg = plugin.process_all_call_arg(call_arg, option)
# Generate call
try:
return_result = self.get_return_wrapper(option).substitute()
call = self.FUNCTION_CALL_TEMPLATE.substitute(capture_result='',
cname=option['cname'], call_arg=call_arg)
except KeyError:
return_result = self.get_return_wrapper(option).substitute(result='__result')
call = self.FUNCTION_CALL_TEMPLATE.substitute(capture_result=(option['return'] + ' __result = '),
cname=option['cname'], call_arg=call_arg)
code_template = deepcopy(self.OPTION_CODE_TEMPLATE)
for plugin in self.plugins:
code_template = plugin.process_option_code_template(code_template,
option)
code_template = Template('\n'.join(code_template))
code = code_template.substitute(call=call, return_result=return_result)
code = self.indent_code(code)
pre_arg_assign = self.indent_code('\n'.join(pre_arg_assign))
arg_assign = self.indent_code('\n'.join(arg_assign))
# Put everything together
return self.OPTION_TEMPLATE.substitute(
els=('} else ' if not is_first else ''),
arg_check=arg_checks,
pre_arg_assign=pre_arg_assign,
arg_assign=arg_assign,
code=code,
)
|
import datetime
import os
import requests
from flask import Flask, jsonify, abort
from flask_sqlalchemy import SQLAlchemy
import opentracing
from flask_opentracing import FlaskTracing
from sqlalchemy.sql import func
from elasticapm.contrib.flask import ElasticAPM
from elasticapm.contrib.opentracing import Tracer
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SQLITE_DB_PATH = 'sqlite:///' + os.path.abspath(os.path.join(BASE_DIR, 'demo', 'db.sql'))
DJANGO_API_URL = os.environ.get("DJANGO_API_URL", "http://localhost:8000")
from logging.config import dictConfig
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
}
})
opentracing_tracer = Tracer(config={"SERVICE_NAME": "opbeans-flask-ot"})
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', SQLITE_DB_PATH)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['ELASTIC_APM'] = {
'SERVICE_NAME': os.environ.get('ELASTIC_APM_SERVICE_NAME', 'opbeans-flask'),
'SERVER_URL': os.environ.get('ELASTIC_APM_SERVER_URL', 'http://localhost:8200'),
'DEBUG': True,
}
db = SQLAlchemy(app)
apm = ElasticAPM(app, logging=True)
#tracing = FlaskTracing(opentracing_tracer, trace_all_requests=True, app=app)
class Customer(db.Model):
__tablename__ = 'customers'
id = db.Column(db.Integer, primary_key=True)
full_name = db.Column(db.String(1000))
company_name = db.Column(db.String(1000))
email = db.Column(db.String(1000))
address = db.Column(db.String(1000))
postal_code = db.Column(db.String(1000))
city = db.Column(db.String(1000))
country = db.Column(db.String(1000))
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
customer_id = db.Column(db.Integer, db.ForeignKey('customers.id'), nullable=False)
customer = db.relationship('Customer', backref=db.backref('orders', lazy=True))
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
products = db.relationship('Product', secondary='order_lines')
class ProductType(db.Model):
__tablename__ = 'product_types'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(1000), unique=True)
def __str__(self):
return self.name
class Product(db.Model):
__tablename__ = 'products'
id = db.Column(db.Integer, primary_key=True)
sku = db.Column(db.String(1000), unique=True)
name = db.Column(db.String(1000))
description = db.Column(db.Text)
product_type_id = db.Column('type_id', db.Integer, db.ForeignKey('product_types.id'), nullable=False)
product_type = db.relationship('ProductType', backref=db.backref('products', lazy=True))
stock = db.Column(db.Integer)
cost = db.Column(db.Integer)
selling_price = db.Column(db.Integer)
orders = db.relationship('Order', secondary='order_lines')
class OrderLine(db.Model):
__tablename__ = 'order_lines'
product_id = db.Column(db.Integer, db.ForeignKey('products.id'), primary_key=True)
product = db.relationship('Product')
order_id = db.Column(db.Integer, db.ForeignKey('orders.id'), primary_key=True)
order = db.relationship('Order')
amount = db.Column(db.Integer)
@app.route('/api/products')
def products():
product_list = Product.query.all()
data = []
for p in product_list:
data.append({
'id': p.id,
'sku': p.sku,
'name': p.name,
'stock': p.stock,
'type_name': p.product_type.name
})
return jsonify(data)
@app.route('/api/products/top')
def top_products():
product_list = db.session.query(
Product.id,
Product.sku,
Product.name,
Product.stock,
func.sum(OrderLine.amount).label('sold')
).join(OrderLine).group_by(Product.id).order_by('-sold').limit(3)
return jsonify([{
'id': p.id,
'sku': p.sku,
'name': p.name,
'stock': p.stock,
'sold': p.sold,
} for p in product_list])
@app.route('/api/products/<int:pk>')
def product(pk):
result = requests.get(DJANGO_API_URL + "/api/products/{}".format(pk))
return jsonify(result.json())
@app.route("/api/products/<int:pk>/customers")
def product_customers(pk):
result = requests.get(DJANGO_API_URL + "/api/products/{}/customers".format(pk))
return jsonify(result.json())
@app.route('/api/types')
def product_types():
types_list = ProductType.query.all()
data = []
for t in types_list:
data.append({
'id': t.id,
'name': t.name,
})
return jsonify(data)
@app.route('/api/types/<int:pk>')
def product_type(pk):
product_type = ProductType.query.filter_by(id=pk)[0]
products = Product.query.filter_by(product_type=product_type)
return jsonify({
"id": product_type.id,
"name": product_type.name,
"products": [{
"id": product.id,
"name": product.name,
} for product in products]
})
@app.route("/api/customers")
def customers():
customers = Customer.query.all()
data = []
for customer in customers:
data.append({
"id": customer.id,
"full_name": customer.full_name,
"company_name": customer.company_name,
"email": customer.email,
"address": customer.address,
"postal_code": customer.postal_code,
"city": customer.city,
"country": customer.country,
})
return jsonify(data)
@app.route("/api/customers/<int:pk>")
def customer(pk):
try:
customer_obj = Customer.query.filter_by(id=pk)[0]
except IndexError:
app.logger.warning('Customer with ID %s not found', pk, exc_info=True)
abort(404)
return jsonify({
"id": customer_obj.id,
"full_name": customer_obj.full_name,
"company_name": customer_obj.company_name,
"email": customer_obj.email,
"address": customer_obj.address,
"postal_code": customer_obj.postal_code,
"city": customer_obj.city,
"country": customer_obj.country,
})
if __name__ == '__main__':
app.run(debug=True, port=5000)
|
<filename>MaximizeHField.py
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
# Highly modified by <NAME> <EMAIL>
# maximize H field in a sample and maximize the uniformity
# this is used to find non-obvious solutions to the planar micro resonator
# turns elements to silver (1) or vacuum (0)
import random
from deap import base
from deap import creator
from deap import tools
import hycohanz as hfss
import shutil
from datetime import datetime
startTime = datetime.now()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, typecode='f', fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
# define 'attr_bool' to be an attribute ('gene')
# which corresponds to integers sampled uniformly
# from the range [0,1] (i.e. 0 or 1 with equal
# probability)
toolbox.register("attr_bool", random.randint, 0, 1)
# Structure initializers
# define 'individual' to be an individual
# consisting of 2490 'attr_bool' elements ('genes')
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, 1767)
# define the population to be a list of individuals
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# colorize the solution for visual of generation
def colorize_best(individual):
[oAnsoftApp, oDesktop] = hfss.setup_interface()
oProject = hfss.get_active_project(oDesktop)
oDesign = hfss.set_active_design(oProject, 'HFSSDesign1')
oEditor = hfss.set_active_editor(oDesign)
index = 0
Vac = []
Silv = []
for i in individual:
if i == 1:
Silv.append("Elm_"+str(index))
else:
Vac.append("Elm_"+str(index))
index += 1
hfss.assign_White(oEditor, Vac)
hfss.assign_Orange(oEditor, Silv)
# the goal ('fitness') function to be maximized
def evalOneMax(individual):
[oAnsoftApp, oDesktop] = hfss.setup_interface()
oProject = hfss.get_active_project(oDesktop)
oDesign = hfss.set_active_design(oProject, 'HFSSDesign1')
oEditor = hfss.set_active_editor(oDesign)
oFieldsReporter = hfss.get_module(oDesign, 'FieldsReporter')
oSolution = oDesign.GetModule("Solutions")
# Shut off autosave to minimize the .adresults folder
oDesktop.EnableAutoSave(False)
index = 0
Vac = []
Silv = []
for i in individual:
if i == 1:
Silv.append("Elm_"+str(index))
else:
Vac.append("Elm_"+str(index))
index += 1
# Check if list is empty
if Vac:
#hfss.assign_IsModel(oEditor, Vac, IsModel=False)
hfss.assign_material(oEditor, Vac, MaterialName="vacuum", SolveInside=True)
if Silv:
# hfss.assign_IsModel(oEditor, Silv, IsModel=True)
hfss.assign_material(oEditor, Silv, MaterialName="pec", SolveInside=False)
oDesktop.ClearMessages("", "", 3)
# Purge History to minimize the solution time and minimize the .adresults folder
# Is this needed every time? Modeler -> PurgeHistory
#oEditor.PurgeHistory(["NAME:Selections", "Selections:=", Silv, "NewPartsModelFlag:=", "Model"])
#oEditor.PurgeHistory(["NAME:Selections", "Selections:=", Vac, "NewPartsModelFlag:=", "Model"])
# Solutions results purge with shutil.rmtree
folder = "B:\\GA_PlanarResonator.aedtresults\\HFSSDesign1.results"
shutil.rmtree(folder)
# Try to solve, if there is an error send it to zero.
# ISSUE: If the RAMDisk is full this gives an error and sends everything to zero
# should be fixed with the PurgeHistory and AutoSave off... ??
# Autosave off helps, but shutil.rmtree is needed
try:
oDesign.Analyze("Setup1")
except:
print("Simulation Error Set Fitness -10000, ")
return -10000,
oFieldsReporter.CalcStack('clear')
# Load the pre solved calculator expressions. Some will delete when Fastlist is deleted
# Remember to set Ple to zero unless you are solving for the losses in the substrate
#oFieldsReporter.LoadNamedExpressions("E:\\MPI\\Maxwell\\Projects\\PersonalLib\\_Signal_14 - Xband - ICE.clc", "Fields", ["ImDieHold", "ImDieSam", "Frq", "H1r", "H1rMax", "IntH1r2dVs"])
oFieldsReporter.CopyNamedExprToStack("IntH1r2dVs")
# Is there a solution present? If so clc_eval if not, run the Analyze again
# if there is still no solution, send it to zero
if oSolution.HasFields("Setup1:LastAdaptive", "x_size=2mm") == 1:
hfss.clc_eval(
oFieldsReporter,
'Setup1',
'LastAdaptive',
9.7e9,
0,
{},
)
else:
oDesign.Analyze("Setup1")
try:
hfss.clc_eval(
oFieldsReporter,
'Setup1',
'LastAdaptive',
9.7e9,
0,
{},
)
except:
print("Simulation Error Set Fitness -1000, ")
return -1000,
outH = hfss.get_top_entry_value(
oFieldsReporter,
'Setup1',
'LastAdaptive',
9.7e9,
0,
{},
)
#oFieldsReporter.CopyNamedExprToStack("EdVs")
# Is there a solution present? If so clc_eval if not, run the Analyze again
# if there is still no solution, send it to zero
# if oSolution.HasFields("Setup1:LastAdaptive", "x_size=2mm") == 1:
# hfss.clc_eval(
# oFieldsReporter,
# 'Setup1',
# 'LastAdaptive',
# 9.7e9,
# 0,
# {},
# )
# else:
# oDesign.Analyze("Setup1")
# try:
# hfss.clc_eval(
# oFieldsReporter,
# 'Setup1',
# 'LastAdaptive',
# 9.7e9,
# 0,
# {},
# )
# except:
# print("Simulation Error Set Fitness -1, ")
# return -1,
# outE = hfss.get_top_entry_value(
# oFieldsReporter,
# 'Setup1',
# 'LastAdaptive',
# 9.7e9,
# 0,
# {},
# )
# print(outH[0] + ", " + outE[0])
print(outH[0])
print("Time: " + str(datetime.now() - startTime))
return float(outH[0]),
#----------
# Operator registration
#----------
# register the goal / fitness function
toolbox.register("evaluate", evalOneMax)
# register the crossover operator
toolbox.register("mate", tools.cxTwoPoint)
# register a mutation operator with a probability to
# flip each attribute/gene of 0.05
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
#----------
def main():
random.seed(42)
# create an initial population of 300 individuals (where
# each individual is a list of integers)
pop = toolbox.population(n=40)
# CXPB is the probability with which two individuals
# are crossed
#
# MUTPB is the probability for mutating an individual
#
# NGEN is the number of generations for which the
# evolution runs
CXPB, MUTPB, NGEN = 0.55, 0.3, 30
print("Start of evolution")
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(pop))
# Begin the evolution
for g in range(NGEN):
print("-- Generation %i --" % g)
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# cross two individuals with probability CXPB
if random.random() < CXPB:
toolbox.mate(child1, child2)
# fitness values of the children
# must be recalculated later
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
# mutate an individual with probability MUTPB
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(invalid_ind))
# The population is entirely replaced by the offspring
pop[:] = offspring
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
# Save progress
best_ind = tools.selBest(pop, 1)[0]
f = open('./Solutions/' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + '_best_individual_Gen_' + str(g), 'w')
f.write("%s\n" % (best_ind))
f.write(" Max %s" % max(fits))
f.close()
# Colorize the best solution
# colorize_best(best_ind)
print("Time: " + str(datetime.now() - startTime))
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
print(datetime.now() - startTime)
# Save best individual final
f = open('./Solutions/' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + '_best_individual_Final', 'w')
f.write("%s\n" % (best_ind))
f.write(" Max %s" % max(fits))
f.close()
# Colorize the final best individual
colorize_best(best_ind)
if __name__ == "__main__":
main()
|
<reponame>NIC619/pyethereum
import pytest
import ethereum.messages as messages
import ethereum.transactions as transactions
import ethereum.meta as meta
from ethereum.transaction_queue import TransactionQueue
import rlp
from rlp.utils import decode_hex, encode_hex
import ethereum.pow.ethpow as ethpow
import ethereum.utils as utils
from ethereum.pow.chain import Chain
from ethereum.db import EphemDB
from ethereum.tests.utils import new_db
from ethereum.state import State
from ethereum.block import Block
from ethereum.consensus_strategy import get_consensus_strategy
from ethereum.genesis_helpers import mk_basic_state
from ethereum.slogging import get_logger
logger = get_logger()
_db = new_db()
# from ethereum.slogging import LogRecorder, configure_logging, set_level
# config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
# configure_logging(config_string=config_string)
@pytest.fixture(scope='function')
def db():
return EphemDB()
alt_db = db
@pytest.fixture(scope="module")
def accounts():
k = utils.sha3(b'cow')
v = utils.privtoaddr(k)
k2 = utils.sha3(b'horse')
v2 = utils.privtoaddr(k2)
return k, v, k2, v2
def mine_on_chain(chain, parent=None, transactions=[],
coinbase=None, timestamp=None):
"""Mine the next block on a chain.
The newly mined block will be considered to be the head of the chain,
regardless of its total difficulty.
:param parent: the parent of the block to mine, or `None` to use the
current chain head
:param transactions: a list of transactions to include in the new block
:param coinbase: optional coinbase to replace ``chain.coinbase``
"""
txqueue = TransactionQueue()
for t in transactions:
txqueue.add_transaction(t)
parent_timestamp = parent.timestamp if parent else chain.state.timestamp
hc, _ = meta.make_head_candidate(chain, txqueue, parent,
timestamp or parent_timestamp + 1, coinbase or b'\x00' * 20)
assert hc.difficulty == 1
m = ethpow.Miner(hc)
rounds = 100
nonce = 0
while True:
b = m.mine(rounds=rounds, start_nonce=nonce)
if b:
break
nonce += rounds
assert chain.add_block(b)
return b
def mine_next_block(chain, coinbase=None, transactions=[]):
block = mine_on_chain(chain, coinbase=coinbase, transactions=transactions)
return block
def test_mining(db):
chain = Chain({}, difficulty=1)
assert chain.state.block_number == 0
assert chain.state.block_difficulty == 1
for i in range(2):
blk = mine_next_block(chain)
assert blk.number == i + 1
@pytest.fixture(scope="module")
def get_transaction(gasprice=0, nonce=0):
k, v, k2, v2 = accounts()
tx = transactions.Transaction(
nonce, gasprice, startgas=100000,
to=v2, value=utils.denoms.finney * 10, data='').sign(k)
return tx
def test_transfer(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
b_v = chain.state.get_balance(v)
b_v2 = chain.state.get_balance(v2)
value = 42
success = chain.state.transfer_value(v, v2, value)
assert success
assert chain.state.get_balance(v) == b_v - value
assert chain.state.get_balance(v2) == b_v2 + value
def test_failing_transfer(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
b_v = chain.state.get_balance(v)
b_v2 = chain.state.get_balance(v2)
value = utils.denoms.ether * 2
# should fail
success = chain.state.transfer_value(v, v2, value)
assert not success
assert chain.state.get_balance(v) == b_v
assert chain.state.get_balance(v2) == b_v2
def test_mine_block(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
genesis_hash = chain.state.prev_headers[0].hash
blk2 = mine_next_block(chain, coinbase=v)
blk3 = mine_next_block(chain, coinbase=v)
blk4 = mine_next_block(chain, coinbase=v)
blk5 = mine_next_block(chain, coinbase=v)
assert chain.state.get_balance(
v) == chain.env.config['BLOCK_REWARD'] + chain.mk_poststate_of_blockhash(blk4.hash).get_balance(v)
assert chain.state.get_balance(
v) == chain.env.config['BLOCK_REWARD'] * 2 + chain.mk_poststate_of_blockhash(blk3.hash).get_balance(v)
assert chain.state.get_balance(
v) == chain.env.config['BLOCK_REWARD'] * 3 + chain.mk_poststate_of_blockhash(blk2.hash).get_balance(v)
assert chain.state.get_balance(
v) == chain.env.config['BLOCK_REWARD'] * 4 + chain.mk_poststate_of_blockhash(genesis_hash).get_balance(v)
assert blk2.prevhash == genesis_hash
def test_block_serialization_with_transaction_empty_genesis(db):
k, v, k2, v2 = accounts()
chain = Chain({}, difficulty=1)
tx = get_transaction(gasprice=10) # must fail, as there is no balance
a_blk2 = mine_next_block(chain, transactions=[tx])
assert tx.hash not in [x.hash for x in a_blk2.transactions]
assert len(a_blk2.transactions) == 0
def test_mine_block_with_transaction(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
tx = get_transaction()
blk = mine_next_block(chain, transactions=[tx])
assert tx.hash in [x.hash for x in blk.transactions]
assert blk.transactions[0] == tx
assert len(blk.transactions) == 1
assert chain.state.get_balance(v) == utils.denoms.finney * 990
assert chain.state.get_balance(v2) == utils.denoms.finney * 10
def test_mine_block_with_transaction2(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
genesis_hash = chain.state.prev_headers[0].hash
tx = get_transaction()
blk2 = mine_next_block(chain, coinbase=v, transactions=[tx])
assert tx in blk2.transactions
assert tx in blk2.transactions
assert chain.get_block(blk2.hash) == blk2
assert tx.gasprice == 0
assert chain.state.get_balance(
v) == chain.env.config['BLOCK_REWARD'] + chain.mk_poststate_of_blockhash(genesis_hash).get_balance(v) - tx.value
def test_mine_block_with_transaction3(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
tx = get_transaction()
blk = mine_next_block(chain, transactions=[tx])
assert tx in blk.transactions
assert chain.state.get_balance(v) == utils.denoms.finney * 990
assert chain.state.get_balance(v2) == utils.denoms.finney * 10
def test_transaction(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
blk = mine_next_block(chain)
tx = get_transaction()
assert tx not in blk.transactions
messages.apply_transaction(chain.state, tx)
assert chain.state.get_balance(v) == utils.denoms.finney * 990
assert chain.state.get_balance(v2) == utils.denoms.finney * 10
def test_transaction_serialization():
k, v, k2, v2 = accounts()
tx = get_transaction()
assert tx in set([tx])
assert tx.hash == rlp.decode(rlp.encode(tx), transactions.Transaction).hash
assert tx in set([tx])
def test_invalid_transaction(db):
k, v, k2, v2 = accounts()
chain = Chain({v2: {"balance": utils.denoms.ether * 1}}, difficulty=1)
tx = get_transaction()
blk = mine_next_block(chain, transactions=[tx])
assert chain.state.get_balance(v) == 0
assert chain.state.get_balance(v2) == utils.denoms.ether * 1
assert tx not in blk.transactions
def test_prevhash(db):
chain = Chain({}, difficulty=1)
L1 = mine_on_chain(chain)
assert chain.state.get_block_hash(0) != b'\x00' * 32
assert chain.state.get_block_hash(1) != b'\x00' * 32
assert chain.state.get_block_hash(2) == b'\x00' * 32
def test_genesis_chain(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
blk = mine_on_chain(chain)
print('blook', blk)
assert chain.has_block(blk.hash)
assert blk.hash in chain
assert chain.get_block(blk.hash) == blk
assert chain.head == blk
assert chain.get_children(blk) == []
assert chain.get_chain() == [blk]
assert chain.get_block_by_number(1)
assert not chain.get_block_by_number(2)
assert chain.get_block_by_number(1) == blk
def test_simple_chain(db):
k, v, k2, v2 = accounts()
chain = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
tx = get_transaction()
blk2 = mine_next_block(chain, transactions=[tx])
blk3 = mine_next_block(chain)
assert blk2.hash in chain
assert blk3.hash in chain
assert chain.has_block(blk2.hash)
assert chain.has_block(blk3.hash)
assert chain.get_block(blk2.hash) == blk2
assert chain.get_block(blk3.hash) == blk3
assert chain.head == blk3
assert chain.get_children(blk2) == [blk3]
assert chain.get_chain() == [blk2, blk3]
assert chain.get_block_by_number(1) == blk2
assert chain.get_block_by_number(2) == blk3
assert not chain.get_block_by_number(3)
assert chain.get_tx_position(tx.hash) == (blk2.number, 0)
def test_add_side_chain(db, alt_db):
""""
Local: L0, L1, L2
add
Remote: R0, R1
"""
k, v, k2, v2 = accounts()
# Remote: mine one block
chainR = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
tx0 = get_transaction(nonce=0)
R1 = mine_next_block(chainR, transactions=[tx0])
assert tx0.hash in [x.hash for x in R1.transactions]
# Local: mine two blocks
chainL = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
tx0 = get_transaction(nonce=0)
L1 = mine_next_block(chainL, transactions=[tx0])
tx1 = get_transaction(nonce=1)
L2 = mine_next_block(chainL, transactions=[tx1])
# receive serialized remote blocks, newest first
rlp_blocks = [rlp.encode(R1)]
for rlp_block in rlp_blocks:
block = rlp.decode(rlp_block, Block)
chainL.add_block(block)
assert L2.hash in chainL
assert chainL.head == L2
def test_add_longer_side_chain(db, alt_db):
""""
Local: L0, L1, L2
Remote: R0, R1, R2, R3
"""
k, v, k2, v2 = accounts()
# Remote: mine three blocks
chainR = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
remote_blocks = []
for i in range(3):
tx = get_transaction(nonce=i)
blk = mine_next_block(chainR, transactions=[tx])
remote_blocks.append(blk)
# Local: mine two blocks
chainL = Chain({v: {"balance": utils.denoms.ether * 1}}, difficulty=1)
tx0 = get_transaction(nonce=0)
L1 = mine_next_block(chainL, transactions=[tx0])
tx1 = get_transaction(nonce=1)
L2 = mine_next_block(chainL, transactions=[tx1])
# receive serialized remote blocks, newest first
rlp_blocks = [rlp.encode(x) for x in remote_blocks]
for rlp_block in rlp_blocks:
block = rlp.decode(rlp_block, Block)
chainL.add_block(block)
assert chainL.head == remote_blocks[-1]
def test_reward_uncles(db):
"""
B0 B1 B2
B0 Uncle
We raise the block's coinbase account by Rb, the block reward,
and also add uncle and nephew rewards
"""
k, v, k2, v2 = accounts()
chain = Chain({}, difficulty=1)
blk0 = mine_on_chain(chain, coinbase=decode_hex('0' * 40))
local_coinbase = decode_hex('1' * 40)
uncle_coinbase = decode_hex('2' * 40)
# Mine the uncle
uncle = mine_on_chain(chain, blk0, coinbase=uncle_coinbase)
assert chain.state.get_balance(
uncle_coinbase) == 1 * chain.env.config['BLOCK_REWARD']
# Mine the first block in the "intended main chain"
blk1 = mine_on_chain(chain, blk0, coinbase=local_coinbase)
# next block should reward uncles
blk2 = mine_on_chain(chain, blk1, coinbase=local_coinbase)
# print [x.hash for x in chain.get_chain()], [blk0.hash, uncle.hash,
# blk1.hash, blk2.hash]
assert blk1.hash in chain
assert uncle.header.hash in [u.hash for u in blk2.uncles]
assert chain.head == blk2
assert chain.get_chain() == [blk0, blk1, blk2]
assert chain.state.get_balance(local_coinbase) == \
2 * chain.env.config['BLOCK_REWARD'] + \
chain.env.config['NEPHEW_REWARD']
assert chain.state.get_balance(
uncle_coinbase) == chain.env.config['BLOCK_REWARD'] * 7 // 8
def test_genesis_from_state_snapshot():
"""
Test if Chain could be initilaized from State snapshot
"""
# Customize a state
k, v, k2, v2 = accounts()
alloc = {v: {"balance": utils.denoms.ether * 1}}
state = mk_basic_state(alloc, None)
state.block_difficulty = 1
# Initialize another chain from state.to_snapshot()
genesis = state.to_snapshot()
new_chain = Chain(genesis=genesis)
assert new_chain.state.trie.root_hash == state.trie.root_hash
assert new_chain.state.block_difficulty == state.block_difficulty
assert new_chain.head.number == state.block_number
# TODO ##########################################
#
# test for remote block with invalid transaction
# test for multiple transactions from same address received
# in arbitrary order mined in the same block
# test_db = None
# test_transfer = None
# test_failing_transfer = None
# test_transient_block = None
# test_genesis = None
# test_deserialize = None
# test_deserialize_commit = None
# test_genesis_db = None
# test_mine_block = None
# test_mine_block_with_transaction = None
# test_block_serialization_with_transaction_empty_genesis = None
# test_mine_block_with_transaction = None
# test_block_serialization_same_db = None
# test_block_serialization_other_db = None
# test_block_serialization_with_transaction_other_db = None
# test_transaction = None
# test_transaction_serialization = None
# test_mine_block_with_transaction = None
# test_invalid_transaction = None
# test_prevhash = None
# test_genesis_chain = None
# test_simple_chain = None
# test_add_side_chain = None
# test_add_longer_side_chain = None
# test_reward_uncles = None
|
<reponame>skandupmanyu/facet
"""
Projection of SHAP contribution scores (i.e, SHAP importance) of all possible
pairings of features onto the SHAP importance vector in partitions of for synergy,
redundancy, and independence.
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Iterable, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
from pytools.api import AllTracker, inheritdoc
from pytools.fit import FittableMixin
from ._shap import ShapCalculator
log = logging.getLogger(__name__)
__all__ = [
"AffinityMatrix",
"ShapContext",
"ShapGlobalExplainer",
"ShapInteractionGlobalExplainer",
"ShapInteractionValueContext",
"ShapValueContext",
"cov",
"cov_broadcast",
"diagonal",
"ensure_last_axis_is_fast",
"fill_diagonal",
"make_symmetric",
"sqrt",
"transpose",
]
#: if ``True``, optimize numpy arrays to ensure pairwise partial summation.
#: But given that we will add floats of the same order of magnitude and only up
#: to a few thousand of them in the base case, the loss of accuracy with regular
#: (sequential) summation will be negligible in practice
_PAIRWISE_PARTIAL_SUMMATION = False
#
# Type variables
#
T_Self = TypeVar("T_Self")
T_ShapCalculator = TypeVar("T_ShapCalculator", bound=ShapCalculator)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# Class definitions
#
class AffinityMatrix:
"""
Stores all variations of a feature affinity matrix.
"""
# shape: (2, 2, n_outputs, n_features, n_features)
_matrices: np.ndarray
# shape: (2, 2, n_outputs, n_features, n_features)
_matrices_std: Optional[np.ndarray]
def __init__(
self, matrices: np.ndarray, matrices_std: Optional[np.ndarray] = None
) -> None:
shape = matrices.shape
assert len(shape) == 5
assert shape[:2] == (2, 2)
assert shape[3] == shape[4]
assert matrices_std is None or matrices_std.shape == matrices.shape
self._matrices = matrices
self._matrices_std = matrices_std
@staticmethod
def from_relative_affinity(
affinity_rel_ij: np.ndarray, std_p_i: np.ndarray
) -> "AffinityMatrix":
"""
:param affinity_rel_ij: the affinity matrix from which to create all variations,
shaped `(n_outputs, n_features, n_features)`
:param std_p_i: SHAP vector magnitudes for all outputs and features,
shaped `(n_outputs, n_features, 1)`
"""
assert affinity_rel_ij.ndim == 3
assert std_p_i.ndim == 3
assert affinity_rel_ij.shape[:2] == std_p_i.shape[:2]
assert affinity_rel_ij.shape[1] == affinity_rel_ij.shape[2]
assert std_p_i.shape[2] == 1
# normalize SHAP vector magnitudes to get feature importance in %
importance_ij = std_p_i / std_p_i.sum(axis=1).reshape(std_p_i.shape[0], 1, 1)
# absolute affinity is relative affinity scaled by feature importance (row-wise)
affinity_abs_ij = importance_ij * affinity_rel_ij
# absolute symmetrical affinity is the mean of unilateral absolute affinity
affinity_abs_sym_ij_2x = affinity_abs_ij + transpose(affinity_abs_ij)
# relative symmetrical affinity is absolute symmetrical affinity scaled back
# from total feature importance per feature pair
affinity_rel_sym_ij = np.zeros(affinity_rel_ij.shape)
np.divide(
affinity_abs_sym_ij_2x,
importance_ij + transpose(importance_ij),
out=affinity_rel_sym_ij,
# do not divide where the nominator is 0 (the denominator will be 0 as well)
where=affinity_abs_sym_ij_2x > 0.0,
)
# re-set the diagonal to 1.0 in case of rounding errors
fill_diagonal(affinity_rel_sym_ij, 1.0)
# return the AffinityMatrices object
return AffinityMatrix(
matrices=np.vstack(
(
affinity_rel_ij,
affinity_abs_ij,
affinity_rel_sym_ij,
affinity_abs_sym_ij_2x / 2,
)
).reshape((2, 2, *affinity_rel_ij.shape))
)
@staticmethod
def aggregate(affinity_matrices: Iterable["AffinityMatrix"]) -> "AffinityMatrix":
"""
Aggregate several sets of affinity matrices (obtained from different splits)
into one, by calculating the mean and standard deviation for each value in the
provided iterable of affinity matrices.
:param affinity_matrices: sets of affinity matrices to aggregate
:return: the aggregated set of affinity matrices
"""
matrix_values = np.stack(
tuple(affinity_matrix._matrices for affinity_matrix in affinity_matrices)
)
return AffinityMatrix(
matrices=matrix_values.mean(axis=0), matrices_std=matrix_values.std(axis=0)
)
def get_values(
self, symmetrical: bool, absolute: bool, std: bool
) -> Optional[np.ndarray]:
"""
Get the matrix matching the given criteria.
:param symmetrical: if ``True``, get the symmetrical version of the matrix
:param absolute: if ``True``, get the absolute version of the matrix
:param std: if ``True``, return standard deviations instead of (mean) values;
return ``None`` if only a single affinity matrix had been calculated and
thus the standard deviation is not known
:return: the affinity matrix
"""
if std:
matrices = self._matrices_std
if matrices is None:
return None
else:
matrices = self._matrices
return matrices[int(symmetrical), int(absolute)]
@inheritdoc(match="""[see superclass]""")
class ShapGlobalExplainer(FittableMixin[ShapCalculator], metaclass=ABCMeta):
"""
Derives feature association as a global metric of SHAP values for multiple
observations.
"""
def __init__(self) -> None:
super().__init__()
self.feature_index_: Optional[pd.Index] = None
@property
def is_fitted(self) -> bool:
"""[see superclass]"""
return self.feature_index_ is not None
def fit(self: T_Self, shap_calculator: ShapCalculator, **fit_params: Any) -> T_Self:
"""
Calculate the SHAP decomposition for the shap values produced by the
given SHAP calculator.
:param shap_calculator: the fitted calculator from which to get the shap values
"""
self: ShapGlobalExplainer # support type hinting in PyCharm
try:
if len(fit_params) > 0:
raise ValueError(
f'unsupported fit parameters: {", ".join(fit_params.values())}'
)
self._fit(shap_calculator=shap_calculator)
self.feature_index_ = shap_calculator.feature_index_
except Exception:
# reset fit in case we get an exception along the way
self._reset_fit()
raise
return self
@abstractmethod
def association(
self, absolute: bool, symmetrical: bool, std: bool = False
) -> Optional[np.ndarray]:
"""
The association matrix for all feature pairs.
Raises an error if this global explainer has not been fitted.
:param absolute: if ``False``, return relative association as a percentage of
total feature importance;
if ``True``, return absolute association as a portion of feature importance
:param symmetrical: if ``False``, return an asymmetrical matrix
quantifying unilateral association of the features represented by rows
with the features represented by columns;
if ``True``, return a symmetrical matrix quantifying mutual association
:param std: if ``True``, return a matrix of estimated standard deviations
instead of (mean) values; return ``None`` if the matrix was determined
from a single model and thus no standard deviation could be estimated
:returns: the matrix as an array of shape (n_outputs, n_features, n_features)
"""
def to_frames(self, matrix: np.ndarray) -> List[pd.DataFrame]:
"""
Transforms one or more affinity matrices into a list of data frames.
:param matrix: an array of shape `(n_outputs, n_features, n_features)`,
representing one or more affinity matrices
:return: a list of `n_outputs` data frames of shape `(n_features, n_features)`
"""
index = self.feature_index_
n_features = len(index)
assert matrix.ndim == 3
assert matrix.shape[1:] == (n_features, n_features)
return [
pd.DataFrame(
m,
index=index,
columns=index,
)
for m in matrix
]
@abstractmethod
def _fit(self, shap_calculator: ShapCalculator) -> None:
pass
def _reset_fit(self) -> None:
self.feature_index_ = None
class ShapInteractionGlobalExplainer(ShapGlobalExplainer, metaclass=ABCMeta):
"""
Derives feature association, synergy, and redundancy as a global metric of SHAP
interaction values for multiple observations.
"""
@abstractmethod
def synergy(
self, symmetrical: bool, absolute: bool, std: bool = False
) -> Optional[np.ndarray]:
"""
The synergy matrix for all feature pairs.
Raises an error if this global explainer has not been fitted.
:param absolute: if ``False``, return relative synergy as a percentage of
total feature importance;
if ``True``, return absolute synergy as a portion of feature importance
:param symmetrical: if ``False``, return an asymmetrical matrix
quantifying unilateral synergy of the features represented by rows
with the features represented by columns;
if ``True``, return a symmetrical matrix quantifying mutual synergy
:param std: if ``True``, return a matrix of estimated standard deviations
instead of (mean) values; return ``None`` if the matrix was determined
from a single model and thus no standard deviation could be estimated
:returns: the matrix as an array of shape (n_outputs, n_features, n_features)
"""
@abstractmethod
def redundancy(
self, symmetrical: bool, absolute: bool, std: bool = False
) -> Optional[np.ndarray]:
"""
The redundancy matrix for all feature pairs.
Raises an error if this global explainer has not been fitted.
:param absolute: if ``False``, return relative redundancy as a percentage of
total feature importance;
if ``True``, return absolute redundancy as a portion of feature importance
:param symmetrical: if ``False``, return an asymmetrical matrix
quantifying unilateral redundancy of the features represented by rows
with the features represented by columns;
if ``True``, return a symmetrical matrix quantifying mutual redundancy
:param std: if ``True``, return a matrix of estimated standard deviations
instead of (mean) values; return ``None`` if the matrix was determined
from a single model and thus no standard deviation could be estimated
:returns: the matrix as an array of shape (n_outputs, n_features, n_features)
"""
#
# Utility functions
#
def ensure_last_axis_is_fast(array: np.ndarray) -> np.ndarray:
"""
For future implementations, ensure that the last axis of the given array is `fast`
to allow for `partial summation`.
This will be relevant once ``np.matmul`` and ``np.einsum`` support partial
summation.
:param array: a numpy array
:return: an equivalent array where the last axis is guaranteed to be `fast`
"""
if _PAIRWISE_PARTIAL_SUMMATION:
if array.strides[-1] != array.itemsize:
array = array.copy()
assert array.strides[-1] == array.itemsize
return array
def sqrt(array: np.ndarray) -> np.ndarray:
"""
Get the square root of each element in the given array.
Negative values are replaced by `0` before calculating the square root, to prevent
errors from minimally negative values due to rounding errors.
:param array: an arbitrary array
:return: array of same shape as arg ``array``, with all values replaced by their
square root
"""
return np.sqrt(np.clip(array, 0, None))
def make_symmetric(m: np.ndarray) -> np.ndarray:
"""
Enforce matrix symmetry by transposing the `feature x feature` matrix for each
output and averaging it with the original matrix.
:param m: array of shape `(n_outputs, n_features, n_features)`
:return: array of shape `(n_outputs, n_features, n_features)` with `n_outputs`
symmetrical `feature x feature` matrices
"""
return (m + transpose(m)) / 2
def transpose(m: np.ndarray, ndim: int = 3) -> np.ndarray:
"""
Transpose the `feature x feature` matrix for each output.
Supports matrices with identical values per row, represented as a broadcastable
`numpy` array of shape `(n_features, 1)`.
:param m: array of shape `(n_outputs, n_features, n_features)`
or shape `(n_outputs, n_features, n_features, n_observations)`
or shape `(n_outputs, n_features, 1)`
or shape `(n_outputs, n_features, 1, n_observations)`
:param ndim: expected dimensions of ``m`` for validation purposes
:return: array of same shape as arg ``m``, with both feature axes swapped
"""
assert m.ndim == ndim
assert m.shape[1] == m.shape[2] or m.shape[2] == 1
return m.swapaxes(1, 2)
def diagonal(m: np.ndarray) -> np.ndarray:
"""
Get the diagonal of the `feature x feature` matrix for each output.
:param m: array of shape `(n_outputs, n_features, n_features)`
:return: array of shape `(n_outputs, n_features)`, with the diagonals of arg ``m``
"""
assert m.ndim == 3
assert m.shape[1] == m.shape[2]
return m.diagonal(axis1=1, axis2=2)
def fill_diagonal(m: np.ndarray, value: Union[float, np.ndarray]) -> None:
"""
Fill the diagonal of the `feature x feature` matrix for each output with the given
value.
:param m: array of shape `(n_outputs, n_features, n_features)`
:param value: scalar or array of shape `(n_features)` to fill each diagonal with
"""
assert m.ndim == 3
assert m.shape[1] == m.shape[2]
if isinstance(value, np.ndarray):
assert value.ndim == 2 and value.shape[:2] == m.shape[:2]
for m_i, value_i in zip(m, value):
np.fill_diagonal(m_i, value_i, wrap=True)
else:
for m_i in m:
np.fill_diagonal(m_i, value, wrap=True)
def cov(vectors: np.ndarray, weight: Optional[np.ndarray]) -> np.ndarray:
"""
Calculate the covariance matrix of pairs of vectors along the observations axis and
for each output, assuming all vectors are centered (µ=0).
:param vectors: a sequence of `n_features` vectors per output,
shaped `(n_outputs, n_features, n_observations)`
:param weight: an optional array with weights per observation
shaped `(n_observations)`
:return: covariance matrices for each output,
shaped `(n_outputs, n_features, n_features)`
"""
assert vectors.ndim == 3
assert weight is None or vectors.shape[2:] == weight.shape
if _PAIRWISE_PARTIAL_SUMMATION:
raise NotImplementedError("max precision matmul not yet implemented")
if weight is None:
vectors_weighted = vectors
weight_total = vectors.shape[2]
else:
vectors_weighted = vectors * weight.reshape((1, 1, -1))
weight_total = weight.sum()
return np.matmul(vectors_weighted, vectors.swapaxes(1, 2)) / weight_total
def cov_broadcast(
vector_sequence: np.ndarray, vector_grid: np.ndarray, weight: np.ndarray
) -> np.ndarray:
"""
Calculate the covariance matrix between a sequence of vectors and a grid of vectors
along the observations axis and for each output, assuming all vectors are centered
(µ=0).
:param vector_sequence: a sequence of `n_features` vectors per output,
shaped `(n_outputs, n_features, n_observations)`
:param vector_grid: a grid of `n_features x n_features` vectors per output,
shaped `(n_outputs, n_features, n_features, n_observations)`
:param weight: an optional array with weights per observation
shaped `(n_observations)`
:return: covariance matrices for each output,
shaped `(n_outputs, n_features, n_features)`
"""
assert vector_sequence.ndim == 3
assert vector_grid.ndim == 4
assert (
tuple(vector_sequence.shape[i] for i in (0, 1, 1, 2)) == vector_grid.shape
), f"shapes {vector_sequence.shape} and {vector_grid.shape} are compatible"
assert weight is None or vector_sequence.shape[2:] == weight.shape
if _PAIRWISE_PARTIAL_SUMMATION:
raise NotImplementedError(
"max precision Einstein summation not yet implemented"
)
if weight is None:
vectors_weighted = vector_sequence
weight_total = vector_sequence.shape[2]
else:
vectors_weighted = vector_sequence * weight.reshape((1, 1, -1))
weight_total = weight.sum()
return (
np.einsum("...io,...ijo->...ij", vectors_weighted, vector_grid) / weight_total
)
class ShapContext(metaclass=ABCMeta):
"""
Contextual data for global SHAP calculations.
"""
#: SHAP vectors,
#: with shape `(n_outputs, n_features, n_observations)`
p_i: np.ndarray
#: observation weights (optional),
#: with shape `(n_observations)`
weight: Optional[np.ndarray]
#: Covariance matrix for p[i],
#: with shape `(n_outputs, n_features, n_features)`
cov_p_i_p_j: np.ndarray
#: Variances for p[i],
#: with shape `(n_outputs, n_features, 1)`
var_p_i: np.ndarray
#: SHAP interaction vectors,
#: with shape `(n_outputs, n_features, n_features, n_observations)`
p_ij: Optional[np.ndarray]
def __init__(
self,
p_i: np.ndarray,
p_ij: Optional[np.ndarray],
weight: Optional[np.ndarray],
) -> None:
assert p_i.ndim == 3
if weight is not None:
assert weight.ndim == 1
assert p_i.shape[2] == len(weight)
self.p_i = p_i
self.p_ij = p_ij
self.weight = weight
# covariance matrix of shap vectors
# shape: (n_outputs, n_features, n_features)
self.cov_p_i_p_j = cov_p_i_p_j = cov(p_i, weight)
# var(p[i])
# variances of SHAP vectors
# shape: (n_outputs, n_features, 1)
# i.e. adding a second, empty feature dimension to enable correct broadcasting
self.var_p_i = diagonal(cov_p_i_p_j)[:, :, np.newaxis]
class ShapValueContext(ShapContext):
"""
Contextual data for global SHAP calculations based on SHAP values.
"""
def __init__(self, shap_calculator: ShapCalculator, split_id: int) -> None:
shap_values: pd.DataFrame = shap_calculator.get_shap_values(
aggregation=None
).xs(split_id, level=0)
def _p_i() -> np.ndarray:
n_outputs: int = len(shap_calculator.output_names_)
n_features: int = len(shap_calculator.feature_index_)
n_observations: int = len(shap_values)
# p[i] = p_i
# shape: (n_outputs, n_features, n_observations)
# the vector of shap values for every output and feature
return ensure_last_axis_is_fast(
np.transpose(
shap_values.values.reshape((n_observations, n_outputs, n_features)),
axes=(1, 2, 0),
)
)
def _weight() -> Optional[np.ndarray]:
# weights
# shape: (n_observations)
# return a 1d array of weights that aligns with the observations axis of the
# SHAP values tensor (axis 1)
_weight_sr = shap_calculator.sample_.weight
if _weight_sr is not None:
return _weight_sr.loc[shap_values.index.get_level_values(-1)].values
else:
return None
super().__init__(p_i=_p_i(), p_ij=None, weight=_weight())
class ShapInteractionValueContext(ShapContext):
"""
Contextual data for global SHAP calculations based on SHAP interaction values.
"""
def __init__(self, shap_calculator: ShapCalculator, split_id: int) -> None:
shap_values: pd.DataFrame = shap_calculator.get_shap_interaction_values(
aggregation=None
).xs(split_id, level=0)
n_features: int = len(shap_calculator.feature_index_)
n_outputs: int = len(shap_calculator.output_names_)
n_observations: int = len(shap_values) // n_features
assert shap_values.shape == (
n_observations * n_features,
n_outputs * n_features,
)
self.matrix_shape = (n_outputs, n_features, n_features)
# weights
# shape: (n_observations)
# return a 1d array of weights that aligns with the observations axis of the
# SHAP values tensor (axis 1)
weight: Optional[np.ndarray]
_weight_sr = shap_calculator.sample_.weight
if _weight_sr is not None:
_observation_indices = shap_values.index.get_level_values(
-2
).values.reshape((n_observations, n_features))[:, 0]
weight = ensure_last_axis_is_fast(
_weight_sr.loc[_observation_indices].values
)
else:
weight = None
# p[i, j]
# shape: (n_outputs, n_features, n_features, n_observations)
# the vector of interaction values for every output and feature pairing
# for improved numerical precision, we ensure the last axis is the fast axis
# i.e. stride size equals item size (see documentation for numpy.sum)
p_ij = ensure_last_axis_is_fast(
np.transpose(
shap_values.values.reshape(
(n_observations, n_features, n_outputs, n_features)
),
axes=(2, 1, 3, 0),
)
)
# p[i]
# shape: (n_outputs, n_features, n_observations)
super().__init__(
p_i=ensure_last_axis_is_fast(p_ij.sum(axis=2)),
p_ij=ensure_last_axis_is_fast(
self.__get_orthogonalized_interaction_vectors(p_ij=p_ij, weight=weight)
),
weight=weight,
)
@staticmethod
def __get_orthogonalized_interaction_vectors(
p_ij: np.ndarray, weight: Optional[np.ndarray]
) -> np.ndarray:
# p_ij: shape: (n_outputs, n_features, n_features, n_observations)
assert p_ij.ndim == 4
n_features = p_ij.shape[1]
assert p_ij.shape[2] == n_features
# p[i, i]
# shape: (n_outputs, n_features, n_observations)
# independent feature contributions;
# this is the diagonal of p[i, j], i.e., the main effects p[i, i]
p_ii = p_ij.diagonal(axis1=1, axis2=2).swapaxes(1, 2)
# cov[p[i, i], p[j, j]]
# shape: (n_outputs, n_features, n_features)
# covariance matrix of the main effects p[i, i]
cov_p_ii_p_jj = cov(p_ii, weight=weight)
# var[p[i, i]]
# shape: (n_outputs, n_features, 1)
# variance of the main effects p[i, i] as a broadcastable matrix where each
# column is identical
var_p_ii = diagonal(cov_p_ii_p_jj)[:, :, np.newaxis]
# var[p[j, j]]
# shape: (n_outputs, 1, n_features)
# variance of the main effects p[j, j] as a broadcastable matrix where each
# row is identical
var_p_jj = transpose(var_p_ii)
# cov[p[i, i], p[i, j]]
# shape: (n_outputs, n_features, n_features)
# covariance matrix of the main effects p[i, i] with interaction effects p[i, j]
cov_p_ii_p_ij = cov_broadcast(p_ii, p_ij, weight=weight)
# adjustment_factors[i, j]
# shape: (n_outputs, n_features, n_features)
# multiple of p[i, i] to be subtracted from p[i, j] and added to p[i, i]
# to orthogonalize the SHAP interaction vectors
_nominator = cov_p_ii_p_jj * transpose(cov_p_ii_p_ij) - cov_p_ii_p_ij * var_p_jj
fill_diagonal(_nominator, 0.0)
_denominator = cov_p_ii_p_jj ** 2 - var_p_ii * var_p_jj
# The denominator is <= 0 due to the Cauchy-Schwarz inequality.
# It is 0 only if the variance of p_ii or p_jj are zero (i.e., no main effect).
# In that fringe case, the nominator will also be zero and we set the adjustment
# factor to 0 (intuitively, there is nothing to adjust in a zero-length vector)
adjustment_factors_ij = np.zeros(_nominator.shape)
# todo: prevent catastrophic cancellation where nominator/denominator are ~0.0
np.divide(
_nominator,
_denominator,
out=adjustment_factors_ij,
where=_denominator < 0.0,
)
fill_diagonal(adjustment_factors_ij, np.nan)
delta_ij = (
adjustment_factors_ij[:, :, :, np.newaxis] * p_ii[:, :, np.newaxis, :]
)
return p_ij - delta_ij - transpose(delta_ij, ndim=4)
__tracker.validate()
|
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
import os
import sys
import threading
from openvisualizer.opentun.opentun import OpenTun
from openvisualizer.utils import format_buf, format_crash_message, format_critical_message, format_ipv6_addr
log = logging.getLogger('OpenTunMacOS')
log.setLevel(logging.ERROR)
log.addHandler(logging.NullHandler())
# ============================ helper classes ==================================
class TunReadThread(threading.Thread):
"""
Thread which continuously reads input from a TUN interface.
When data is received from the interface, it calls a callback configured during instantiation.
"""
ETHERNET_MTU = 1500
IPv6_HEADER_LENGTH = 40
def __init__(self, tun_if, callback):
# store params
self.tun_if = tun_if
self.callback = callback
# local variables
self.goOn = True
# initialize parent
super(TunReadThread, self).__init__()
# give this thread a name
self.name = 'TunReadThread'
# start myself
self.start()
def run(self):
try:
while self.goOn:
# wait for data
p = os.read(self.tun_if, self.ETHERNET_MTU)
# debug info
log.debug('packet captured on tun interface: {0}'.format(format_buf(p)))
# make sure it's an IPv6 packet (i.e., starts with 0x6x)
if (p[0] & 0xf0) != 0x60:
continue
# because of the nature of tun for Windows, p contains ETHERNET_MTU
# bytes. Cut at length of IPv6 packet.
p = p[:self.IPv6_HEADER_LENGTH + 256 * p[4] + p[5]]
# call the callback
self.callback(p)
except Exception as err:
err_msg = format_crash_message(self.name, err)
log.critical(err_msg)
sys.exit(1)
# ======================== public ==========================================
def close(self):
self.goOn = False
# ============================ main class ======================================
@OpenTun.record_os('darwin')
class OpenTunMACOS(OpenTun):
""" Class which interfaces between a TUN virtual interface and an EventBus. """
# insert 4 octedts ID tun for compatibility (it'll be discard)
VIRTUAL_TUN_ID = [0x00, 0x00, 0x86, 0xdd]
IFF_TUN = 0x0001
TUN_SET_IFF = 0x400454ca
def __init__(self):
# log
log.debug("create instance")
# initialize parent class
super(OpenTunMACOS, self).__init__()
# ======================== public ==========================================
# ======================== private =========================================
def _v6_to_internet_notif(self, sender, signal, data):
"""
Called when receiving data from the EventBus.
This function forwards the data to the the TUN interface. Read from tun interface and forward to 6lowPAN
"""
# convert data to string
data = ''.join([chr(b) for b in data])
try:
# write over tuntap interface
os.write(self.tun_if, data.encode('utf-8'))
log.debug("data dispatched to tun correctly {0}, {1}".format(signal, sender))
except Exception as err:
err_msg = format_critical_message(err)
log.critical(err_msg)
def _create_tun_if(self):
"""
Open a TUN/TAP interface and switch it to TUN mode.
:returns: The handler of the interface, which can be used for later read/write operations.
"""
# =====
log.info("opening tun interface")
tun_counter = 0
while tun_counter < 16:
try:
if_name = 'tun{0}'.format(tun_counter)
f = os.open("/dev/{0}".format(if_name), os.O_RDWR)
break
except OSError:
tun_counter += 1
if tun_counter == 16:
raise OSError('TUN device not found: check if it exists or if it is busy')
else:
# =====
log.debug("configuring IPv6 address")
prefix_str = format_ipv6_addr(self.IPV6PREFIX)
host_str = format_ipv6_addr(self.IPV6HOST)
_ = os.system('ifconfig {0} inet6 {1}:{2} prefixlen 64'.format(if_name, prefix_str, host_str))
_ = os.system('ifconfig {0} inet6 fe80::{1} prefixlen 64 add'.format(if_name, host_str))
# =====
log.debug("adding static route route")
# added 'metric 1' for router-compatibility constraint
# (show ping packet on wireshark but don't send to mote at all)
# os.system('ip -6 route add ' + prefixStr + ':1415:9200::/96 dev ' + ifname + ' metric 1')
os.system('route add -inet6 {0}:1415:9200::/96 -interface {1}'.format(prefix_str, if_name))
# trying to set a gateway for this route
# os.system('ip -6 route add ' + prefixStr + '::/64 via ' + IPv6Prefix + ':' + hostStr + '/64')
# =====
log.debug("enabling IPv6 forwarding")
# os.system('echo 1 > /proc/sys/net/ipv6/conf/all/forwarding')
os.system('sysctl -w net.inet6.ip6.forwarding=1')
# =====
log.info('created following virtual interfaces')
os.system('ifconfig {0}'.format(if_name))
# =====start radvd
# os.system('radvd start')
return f
def _create_tun_read_thread(self):
"""
Creates and starts the thread to read messages arriving from the
TUN interface.
"""
return TunReadThread(self.tun_if, self._v6_to_mesh_notif)
|
import argparse
from pathlib import Path
import numpy as np
from model.embedder import SpeechEmbedder
import torch
from utils.hparams import HParam
import librosa
from utils.audio import Audio
#python encoder_inference.py --in_dir ../vox1_test/wav/ --out_dir spkid --gpu_str 0 (eval)
#python encoder_inference.py --in_dir ../datasets/raw_libri/LibriSpeech --out_dir '' --gpu_str 0 (generate ls)
#python encoder_inference.py --in_dir training_prepared/train --out_dir '' --gpu_str 0 (for speaker extraction of convtasnet, voicefilter will produce embedding on the fly)
if __name__ == '__main__':
## Info & args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("-e", "--enc_model_fpath", type=Path,
default="embedder.pt",
help="Path to a saved encoder")
parser.add_argument('-c', '--config', type=str,
default="config/config.yaml",
help="yaml file for configuration")
parser.add_argument("--in_dir", type=str, required=True, help="input data(pickle) dir")
parser.add_argument("--out_dir", type=str, required=True, help="input data(pickle) dir")
parser.add_argument('--gpu_str', default='0')
args = parser.parse_args()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu_str)
print("Preparing the encoder...")
hp = HParam(args.config)
embedder_pt = torch.load(args.enc_model_fpath)
embedder = SpeechEmbedder(hp).cuda()
embedder.load_state_dict(embedder_pt)
embedder.eval()
audio = Audio(hp)
from glob import glob
from tqdm import tqdm
#../datasets/raw_libri/librispeech/train-clean-360/8194/89390/8194-89390-0041-norm.wav
#txt_list=glob('%s/*.txt' % args.in_dir, recursive=True)
#for txt_file in tqdm(txt_list):
# with open(txt_file) as f:
# wav_file=f.readline().strip()
#wav_list=glob('%s/**/*.wav' % args.in_dir, recursive=True)
wav_list=glob('%s/*dvec.wav' % args.in_dir, recursive=True)#convtasnet
#wav_list=glob('%s/*dvec3.wav' % args.in_dir, recursive=True)#convtasnet
#wav_list=[wavfile for wavfile in wav_list if int(os.path.basename(wavfile).split('-')[0]) >=70000]
for wav_file in tqdm(wav_list):
#preprocessed_wav = encoder.preprocess_wav(wav_file)
#norm_mean_dvector= encoder.embed_utterance(preprocessed_wav)
dvec_wav, _ = librosa.load(wav_file, sr=hp.audio.sample_rate)
dvec_mel = audio.get_mel(dvec_wav)
dvec_mel = torch.from_numpy(dvec_mel).float().cuda()
norm_mean_dvector = embedder(dvec_mel)
##filename='%s.npy' % os.path.basename(txt_file.replace('.txt',''))
#filename='%s.npy' % os.path.basename(wav_file.replace('.wav',''))
##spk_dir=args.in_dir
#file_parts=wav_file.split('/')
#spkid=file_parts[-3]
#clipid=file_parts[-2]
#spk_dir="%s/%s/%s" % (args.out_dir, spkid, clipid)
#if not os.path.exists(spk_dir):
# os.makedirs(spk_dir)
#npy_save_path='%s/%s' % (spk_dir, filename)
#convtasnet
filename='%s.npy' % os.path.basename(wav_file).replace('.wav', '')
npy_save_path='%s/%s' % (args.in_dir, filename)
np.save(npy_save_path, norm_mean_dvector.detach().cpu().numpy())
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 6 11:42:29 2019
@author:
"""
import math
import numpy
#from scipy.linalg.blas import daxpy
#from scipy.linalg.blas import ddot
#from scipy.linalg.blas import dscal
#from scipy.linalg.blas import idamax
#from Documents.ChromaStarPy.GAS.blas.Daxpy import daxpy
#from Documents.ChromaStarPy.GAS.blas.Ddot import ddot
#from Documents.ChromaStarPy.GAS.blas.Dscal import dscal
#from Documents.ChromaStarPy.GAS.blas.Idamax import idamax
import Daxpy
import Ddot
import Dscal
import Idamax
def dgesl(a, lda, n, ipvt, b, job):
#integer lda,n,ipvt(1),job
#double precision a(lda,1),b(1)
"""
c
c dgesl solves the double precision system
c a * x = b or trans(a) * x = b
c using the factors computed by dgeco or dgefa.
c
c on entry
c
c a double precision(lda, n)
c the output from dgeco or dgefa.
c
c lda integer
c the leading dimension of the array a .
c
c n integer
c the order of the matrix a .
c
c ipvt integer(n)
c the pivot vector from dgeco or dgefa.
c
c b double precision(n)
c the right hand side vector.
c
c job integer
c = 0 to solve a*x = b ,
c = nonzero to solve trans(a)*x = b where
c trans(a) is the transpose.
c
c on return
c
c b the solution vector x .
c
c error condition
c
c a division by zero will occur if the input factor contains a
c zero on the diagonal. technically this indicates singularity
c but it is often caused by improper arguments or improper
c setting of lda . it will not occur if the subroutines are
c called correctly and if dgeco has set rcond .gt. 0.0
c or dgefa has set info .eq. 0 .
c
c to compute inverse(a) * c where c is a matrix
c with p columns
c call dgeco(a,lda,n,ipvt,rcond,z)
c if (rcond is too small) go to ...
c do 10 j = 1, p
c call dgesl(a,lda,n,ipvt,c(1,j),0)
c 10 continue
c
c linpack. this version dated 08/14/78 .
c <NAME>, university of new mexico, argonne national lab.
c
c subroutines and functions
c
c blas daxpy,ddot
c
c internal variables
c
"""
#double precision ddot,t
#integer k,kb,l,nm1
#c
nm1 = n - 1
if (job == 0):
#c
#c job = 0 , solve a * x = b
#c first solve l*y = b
#c
if (nm1 >= 1):
for k in range(nm1):
l = ipvt[k]
t = b[l]
if (l != k):
#print("DGESL if triggered")
b[l] = b[k]
b[k] = t
#print("DGESL 1: l ", l, " k, ", k, " b ", b[k])
#FORTRAN call call daxpy(n-k, t, a[k+1][k], 1, b[k+1], 1)
#5th parameter is in/out:
#b[k+1] = daxpy(n-k, t, a[k+1][k], 1, b[k+1], 1)
#[b[kk+1] for kk in range(k, n)] = daxpy(n-k, t,\
# [a[k+1][kk] for kk in range(k, n)], 1, [b[kk+1] for kk in range(k, n)], 1)
daxpyOut =\
Daxpy.daxpy(n-k-1, t, [a[kk][k] for kk in range(k+1, n)], 1, [b[kk] for kk in range(k+1, n)], 1)
daxpyCount = 0
for kk in range(k+1, n):
b[kk] = daxpyOut[daxpyCount]
daxpyCount+=1
#print("DGESL 2: k ", k, " b ", b[k])
#scipy: b[k+1] = daxpy(t, a[k+1][k], n-k, 1, 1)
#c
#c now solve u*x = y
#c
#print("DGESL: Before 2nd DAXPY call n ", n)
for kb in range(n):
#k = n + 1 - kb
k = (n-1) - kb
#print("DGESL: kb ", kb, " k ", k, " b ", b[k], " a ", a[k][k])
b[k] = b[k]/a[k][k]
t = -b[k]
#FORTRAN call: call daxpy(k-1, t, a[1][k], 1, b[1], 1)
#b[1] = daxpy(k-1, t, a[1][k], 1, b[1], 1)
#[b[kk] for kk in range(1, k)] = daxpy(k-1, t,\
# [a[1][kk] for kk in range(1, k)], 1, [b[kk] for kk in range(1, k)], 1)
#print("DGESL: Before DAPXPY 2:")
#print("a ", [a[kk][k] for kk in range(0, k+1)])
#print("b ", [b[kk] for kk in range(0, k+1)])
daxpyOut =\
Daxpy.daxpy(k, t, [a[kk][k] for kk in range(0, k+1)], 1, [b[kk] for kk in range(0, k+1)], 1)
daxpyCount = 0
for kk in range(0, k+1):
b[kk] = daxpyOut[daxpyCount]
daxpyCount+=1
#print("DGESL: After DAPXPY 2:")
#print("b ", [b[kk] for kk in range(0, k+1)])
#scipy: b[0] = daxpy(t, a[0][k], k-1, 1, 1)
# **** goto 100 !!! Oh-oh!!
#c
#c job = nonzero, solve trans(a) * x = b
#c first solve trans(u)*y = b
#c
if (job != 0):
for k in range(n):
#t = ddot(k-1, a[1][k], 1, b[1], 1)
t = Ddot.ddot(k, [a[kk][k] for kk in range(0, k)],\
1, [b[kk] for kk in range(0, k)], 1)
b[k] = (b[k] - t)/a[k][k]
#print("DDOT 1: t ", t)
#c
#c now solve trans(l)*x = y
#c
if (nm1 >= 1):
for kb in range(nm1):
#k = n - kb
k = n - kb - 1
#b[k] = b[k] + ddot(n-k, a[k+1][k], 1, b[k+1], 1)
b[k] = b[k] + Ddot.ddot(n-k, [a[kk][k] for kk in range(k, n)],\
1, [b[kk] for kk in range(k, n)], 1)
#print("DDOT 2: t ", t)
l = ipvt[k]
if (l != k):
t = b[l]
b[l] = b[k]
b[k] = t
return b |
<reponame>tycoer/rfvision-1
from rfvision.models.builder import HEADS
from rfvision.components.utils.dct_utils import dct_2d, idct_2d
from rfvision.components.roi_heads.mask_heads import FCNMaskHead
from rflib.cnn import ConvModule
import torch.nn as nn
import numpy as np
import torch
from torch.nn import functional as F
from .fcn_mask_head import GPU_MEM_LIMIT, BYTES_PER_FLOAT, _do_paste_mask, warn
class DctMaskEncoding(object):
"""
Apply DCT to encode the binary mask, and use the encoded vector as mask representation in instance segmentation.
"""
def __init__(self, vec_dim, mask_size=128):
"""
vec_dim: the dimension of the encoded vector, int
mask_size: the resolution of the initial binary mask representaiton.
"""
self.vec_dim = vec_dim
self.mask_size = mask_size
assert vec_dim <= mask_size*mask_size
self.dct_vector_coords = self.get_dct_vector_coords(r=mask_size)
def encode(self, masks, dim=None):
"""
Encode the mask to vector of vec_dim or specific dimention.
"""
if dim is None:
dct_vector_coords = self.dct_vector_coords[:self.vec_dim]
else:
dct_vector_coords = self.dct_vector_coords[:dim]
masks = masks.view([-1, self.mask_size, self.mask_size]).to(dtype=float) # [N, H, W]
dct_all = dct_2d(masks, norm='ortho')
xs, ys = dct_vector_coords[:, 0], dct_vector_coords[:, 1]
dct_vectors = dct_all[:, xs, ys] # reshape as vector
return dct_vectors # [N, vec_dim]
def decode(self, dct_vectors, dim=None):
"""
intput: dct_vector numpy [N,dct_dim]
output: mask_rc mask reconstructed [N, mask_size, mask_size]
"""
device = dct_vectors.device
if dim is None:
dct_vector_coords = self.dct_vector_coords[:self.vec_dim]
else:
dct_vector_coords = self.dct_vector_coords[:dim]
dct_vectors = dct_vectors[:, :dim]
N = dct_vectors.shape[0]
dct_trans = torch.zeros([N, self.mask_size, self.mask_size], dtype=dct_vectors.dtype).to(device)
xs, ys = dct_vector_coords[:, 0], dct_vector_coords[:, 1]
dct_trans[:, xs, ys] = dct_vectors
mask_rc = idct_2d(dct_trans, norm='ortho') # [N, mask_size, mask_size]
return mask_rc
def get_dct_vector_coords(self, r=128):
"""
Get the coordinates with zigzag order.
"""
dct_index = []
for i in range(r):
if i % 2 == 0: # start with even number
index = [(i-j, j) for j in range(i+1)]
dct_index.extend(index)
else:
index = [(j, i-j) for j in range(i+1)]
dct_index.extend(index)
for i in range(r, 2*r-1):
if i % 2 == 0:
index = [(i-j, j) for j in range(i-r+1,r)]
dct_index.extend(index)
else:
index = [(j, i-j) for j in range(i-r+1,r)]
dct_index.extend(index)
dct_idxs = np.asarray(dct_index)
return dct_idxs
@HEADS.register_module()
class MaskRCNNDCTHead(FCNMaskHead):
"""
Refers to https://github.com/aliyun/DCT-Mask
A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`).
Predictions are made with a final 1x1 conv layer.
"""
def __init__(self,
num_convs=4,
in_channels=256,
dct_vector_dim=300,
mask_size=128,
dct_loss_type='l1',
mask_loss_para=0.007,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.dct_vector_dim = dct_vector_dim
self.dct_loss_type = dct_loss_type
self.mask_loss_para = mask_loss_para
self.mask_size = mask_size
self.coder = DctMaskEncoding(vec_dim=dct_vector_dim, mask_size=mask_size)
self.convs = nn.ModuleList()
for i in range(num_convs):
self.convs.append(
ConvModule(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1,
**cfg))
self.predictor_fc1 = nn.Linear(256 * 14 * 14, 1024)
self.predictor_fc2 = nn.Linear(1024, 1024)
self.predictor_fc3 = nn.Linear(1024, dct_vector_dim)
def init_weights(self, init_cfg=None):
super().init_weights()
if self.init_cfg is not None:
nn.init.normal_(self.predictor_fc3.weight, std=0.001)
nn.init.constant_(self.predictor_fc3.bias, 0)
def forward(self, x):
"""
Args:
x: input region feature(s) provided by :class:`ROIHeads`.
Returns:
A dict of losses in training. The predicted "instances" in inference.
"""
for layer in self.convs:
x = layer(x)
x = torch.flatten(x, start_dim=1)
x = F.relu(self.predictor_fc1(x))
x = F.relu(self.predictor_fc2(x))
x = self.predictor_fc3(x)
return x
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if mask_pred.size(0) == 0:
mask_loss = mask_pred.sum() * 0
else:
mask_targets = self.coder.encode(mask_targets) # shape (instances_num, 300)
if self.dct_loss_type == "l1":
num_instance = mask_targets.size()[0]
mask_loss = F.l1_loss(mask_pred, mask_targets, reduction="none")
mask_loss = self.mask_loss_para * mask_loss / num_instance
mask_loss = torch.sum(mask_loss)
elif self.dct_loss_type == "sl1":
num_instance = mask_targets.size()[0]
mask_loss = F.smooth_l1_loss(mask_pred, mask_targets, reduction="none")
mask_loss = self.mask_loss_para * mask_loss / num_instance
mask_loss = torch.sum(mask_loss)
elif self.dct_loss_type == "l2":
num_instance = mask_targets.size()[0]
mask_loss = F.mse_loss(mask_pred, mask_targets, reduction="none")
mask_loss = self.mask_loss_para * mask_loss / num_instance
mask_loss = torch.sum(mask_loss)
else:
raise ValueError("Loss Type Only Support : l1, l2; yours: {}".format(self.dct_loss_type))
loss['loss_mask'] = mask_loss
return loss
def dct_style_to_fcn_style(self, mask_pred, det_labels):
##################### convert shape of mask pred to FCNMaskHead used shape #################
mask_pred = self.coder.decode(mask_pred.detach()) # shape (instances_num, 128, 128)
# mask_pred shape(instances_num, 128, 128) to shape(instances_num, 80, 128, 128)
device = mask_pred.device
# mask_pred shape in FCNMaskHead.get_seg_mask is (instances_num, 80, self.mask_size, self.mask_size)
# example mask_pred.shape (18, 80, 28, 28) dim = 4,
# but the self.coder.decode(mask_pred.detach()) output shape is (instances_num, 128, 128) dim = 3
mask_pred_temp = torch.zeros((mask_pred.shape[0], 80, self.mask_size, self.mask_size),
dtype=torch.float,
device=device)
for i, label in enumerate(det_labels):
mask_pred_temp[i, label, :, :] = mask_pred[i]
return mask_pred_temp
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
mask_pred = self.dct_style_to_fcn_style(mask_pred, det_labels)
# if isinstance(mask_pred, torch.Tensor):
# mask_pred = mask_pred.sigmoid()
# else:
# # In AugTest, has been activated before
# mask_pred = det_bboxes.new_tensor(mask_pred)
if not isinstance(mask_pred, torch.Tensor):
# in dct mask sigmoid is not used!!!
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
# In most cases, scale_factor should have been
# converted to Tensor when rescale the bbox
if not isinstance(scale_factor, torch.Tensor):
if isinstance(scale_factor, float):
scale_factor = np.array([scale_factor] * 4)
warn('Scale_factor should be a Tensor or ndarray '
'with shape (4,), float would be deprecated. ')
assert isinstance(scale_factor, np.ndarray)
scale_factor = torch.Tensor(scale_factor)
if rescale:
img_h, img_w = ori_shape[:2]
bboxes = bboxes / scale_factor
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
# the types of img_w and img_h are np.int32,
# when the image resolution is large,
# the calculation of num_chunks will overflow.
# so we neet to change the types of img_w and img_h to int.
# See https://github.com/open-mmlab/mmdetection/pull/5191
num_chunks = int(
np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /
GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
if __name__ == '__main__':
n = 13
# test model
m = MaskRCNNDCTHead()
t = torch.rand(n, 256, 14, 14)
res_head = m(t) # shape (13, 300)
# test loss
mask_pred = res_head
mask_target = torch.rand(n, 128, 128)
labels = torch.randint(low=0, high=80, size=(n, 1))
loss = m.loss(mask_pred, mask_target, labels)
|
# pylint: disable=R0201
# R0201: For testing methods which could be functions are fine.
#
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#
#met:
#
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Test case for the ItemVisitorBase and ItemTreeWalkerBase classes.
"""
import unittest
from datafinder.core.item.base import ItemBase
from datafinder.core.item.collection import ItemRoot, ItemCollection
from datafinder.core.item.leaf import ItemLeaf
from datafinder.core.item.link import ItemLink
from datafinder.core.item.visitor.base import ItemTreeWalkerBase, VisitSlot
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
class _TestItem(object):
"""
Simple class definition to test failure of visitor class.
"""
def __init__(self):
""" Constructor. """
pass
class _TestItemVisitor(object):
"""
Mock visitor to test L{ItemVisitorBase<datafinder.core.item.visitor.base.ItemVisitorBase>}.
Two visit slots are defined: C{test1} and C{test2} both of which only have a valid implementation
for L{SimpleMock<datafinder_test.mocks.SimpleMock>}.
"""
def test1ReturnsNodeValue(self, node):
"""
Visitor slot implementation for C{SimpleMock}.
@param node: The visited node.
@type node: C{SimpleMock}
@return: The value of the node (C{node.value}).
"""
return node.value
def test1ReturnsFalse(self, _):
"""
Visitor slot implementation for C{_FailTestItem}.
@param node: The visited node.
@type node: C{SimpleMock}
@return: C{False}
"""
return False
test1 = VisitSlot((test1ReturnsNodeValue, [SimpleMock]),
(test1ReturnsFalse, [_TestItem]))
def test2ChecksNodeValueForPara(self, node, para):
"""
Visitor slot implementation for C{SimpleMock} with extra parameter which is compared to
the mocks value.
@param node: The visited node.
@type node: C{SimpleMock}
@param para: A parameter to be passed.
@type para: Boolean
@return: Returns whether the node value (C{node.value}) equals the given
parameter.
@rtype: Boolean
"""
return node.value == para
test2 = VisitSlot((test2ChecksNodeValueForPara, [SimpleMock]))
class _DerivedTestItemVisitor(_TestItemVisitor, object):
def test1Overridden(self, _):
return False
test1 = VisitSlot((test1Overridden, (SimpleMock, )), inherits="test1")
def test2Hidden(self, node, para):
return node.value == para
test2 = VisitSlot((test2Hidden, [_TestItem]))
class ItemVisitorBaseTestCase(unittest.TestCase):
"""
Test case for L{ItemVisitorBase<datafinder.core.item.visitor.base.ItemVisitorBase>}.
"""
def setUp(self):
"""
Unittest setup. Initializes an C{SimpleMock}, an C{_FailTestItem} and a
C{_TestItemVisitor} for later use in the tests.
"""
self.mockItem = SimpleMock(True)
self.testItem = _TestItem()
self.visitor = _TestItemVisitor()
self.visitor2 = _DerivedTestItemVisitor()
def testAllFine(self):
"""
These test simply check whether calling a visitor slot works with and without parameters.
"""
self.assertTrue(self.visitor.test1(self.mockItem))
self.assertTrue(self.visitor.test2(self.mockItem, True))
self.assertFalse(self.visitor.test2(self.mockItem, False))
def testDispatch(self):
"""
This method checks whether the slots really only respond to the data they
are registered for.
"""
self.assertTrue(self.visitor.test1(self.mockItem))
self.assertFalse(self.visitor.test1(self.testItem))
self.assertRaises(TypeError, self.visitor.test2, self.mockItem) # too less parameters
self.assertTrue(self.visitor.test2(self.mockItem, True))
self.assertRaises(AttributeError, self.visitor.test2, self.testItem) # no valid slot
self.assertFalse(self.visitor2.test1(self.mockItem))
self.assertFalse(self.visitor2.test1(self.testItem))
self.assertRaises(AttributeError, self.visitor2.test2, self.mockItem)
class _TestItemTreeWalker(ItemTreeWalkerBase):
"""
Mock tree walker class to test
L{ItemTreeWalkerBase<datafinder.core.item.visitor.base.ItemTreeWalkerBase>}.
"""
def __init__(self, mode=-1):
"""
Constructor.
"""
ItemTreeWalkerBase.__init__(self, mode=mode)
self.sequence = list()
def reset(self):
"""
Reset the list of walked items.
"""
self.sequence = list()
def handleData(self, node):
"""
Visitor slot C{handle} for all nodes expect links.
"""
self.sequence.append(node.name)
def handleLink(self, node):
"""
Visitor slot C{handle} for link nodes.
@return: False
"""
self.sequence.append("*" + node.name)
handle = VisitSlot((handleData, [ItemBase, ItemRoot, ItemCollection, ItemLeaf]),
(handleLink, [ItemLink]))
class _EmptyItemTreeWalker(ItemTreeWalkerBase):
"""
Another mock up tree walker where the C{handle} slot has been disabled.
"""
def __init__(self, mode=-1):
""" Constructor. """
ItemTreeWalkerBase.__init__(self, mode=mode)
class ItemTreeWalkerTestCase(unittest.TestCase):
"""
Test case for L{ItemTreeWalkerBase<datafinder.item.visitor.base.ItemTreeWalkerBase>}.
"""
PREORDER_RESULT = ("root", "collection", "leaf", "base", "*link")
POSTORDER_RESULT = ("leaf", "base", "*link", "collection", "root")
NODEONLY_RESULT = PREORDER_RESULT[1:]
def setUp(self):
# A tree walker that operates Pre-order (mode=-1)
self.preorderWalker = _TestItemTreeWalker()
self.preorderWalker.reset()
# A tree walker that applies Post-order scheme (mode=1)
self.postorderWalker = _TestItemTreeWalker(mode=1)
self.postorderWalker.reset()
# A root for testing
self.testRoot = ItemRoot("root")
self.testRoot._fileStorer = SimpleMock(list())
self.testRoot.itemFactory = SimpleMock(SimpleMock(list()))
self.testRoot.path = ""
# A collection for testing
self.testNode = ItemCollection("collection")
self.testNode._fileStorer = SimpleMock(list())
self.testNode.itemFactory = SimpleMock(SimpleMock(list()))
self.testNode.parent = self.testRoot
# A leaf for testing
self.testLeaf = ItemLeaf("leaf")
self.testLeaf._fileStorer = SimpleMock(list())
self.testLeaf.itemFactory = SimpleMock(SimpleMock(list()))
self.testLeaf.parent = self.testNode
# A base item for testing
self.testBase = ItemBase("base")
self.testBase._fileStorer = SimpleMock(list())
self.testBase.itemFactory = SimpleMock(SimpleMock(list()))
self.testBase.parent = self.testNode
# A link for testing
self.testLink = ItemLink("link")
self.testLink._fileStorer = SimpleMock(list())
self.testLink.itemFactory = SimpleMock(SimpleMock(list()))
self.testLink.parent = self.testNode
def _assertSequencesEqual(self, results, expected):
"""
Assert two sequences equal itemwise.
@param results: The sequence to be tested.
@type results: Any class implementing iterator protocol.
@param expected: The expected results.
@type expected: Any class implementing iterator protocol.
"""
for result, expect in zip(results, expected):
self.assertEqual(result, expect)
def testAllFine(self):
"""
Simply compares if the produced sequence is produced as expected.
"""
self.preorderWalker.walk(self.testRoot)
self._assertSequencesEqual(self.preorderWalker.sequence,
ItemTreeWalkerTestCase.PREORDER_RESULT)
self.postorderWalker.walk(self.testRoot)
self._assertSequencesEqual(self.postorderWalker.sequence,
ItemTreeWalkerTestCase.POSTORDER_RESULT)
def testExceptions(self):
"""
Check whether exceptions are raised just as expected.
"""
self.assertRaises(ValueError, _EmptyItemTreeWalker, mode=0)
walker = _EmptyItemTreeWalker()
self.assertRaises(AttributeError, walker.walk, self.testRoot) # No handler slot
_EmptyItemTreeWalker.handle = VisitSlot(inherits="handle")
self.assertRaises(AttributeError, walker.walk, self.testRoot) # No slot for type
def testNodes(self):
"""
Check the performance of the tree walker when started on a collection.
"""
self.preorderWalker.walk(self.testNode)
self._assertSequencesEqual(self.preorderWalker.sequence,
ItemTreeWalkerTestCase.NODEONLY_RESULT)
def testLeafs(self):
"""
Check the performance of the tree walker when started on a leaf or link.
"""
self.preorderWalker.walk(self.testLeaf)
self.assertEqual(self.preorderWalker.sequence[0], "leaf")
self.preorderWalker.reset()
self.preorderWalker.walk(self.testLink)
self.assertEqual(self.preorderWalker.sequence[0], "*link")
|
'''
# fastly-blocklist #
Configure request blocking on a Fastly service.
'''
from pathlib import Path
from argparse import ArgumentParser, RawTextHelpFormatter
import lib
def main(args):
'''
run fastly-blocklist
'''
# setup our environment
env = lib.Environment(args)
state = lib.State()
# sync state with live service
if args.sync:
print('Syncing with live service.')
remote = lib.Remote(env)
state.sync(env, remote)
# list operations
lib.Lists(args, env)
# item operations
lib.Items(args, env)
# deploy and/or save config state
if args.commit:
print('Deploying to live service(s).')
remote = lib.Remote(env)
state.commit(env, remote)
if args.save:
print(f'Saving running config to file: {env.config_file}')
state.save(env)
else:
print(f'Warning: This change has NOT been saved. Use --save to store '
f'in config file: {env.config_file}'
)
if __name__ == '__main__':
version = 'master'
PARSER = ArgumentParser(formatter_class=RawTextHelpFormatter, description=(
f'\n# fastly-blocklist # version: {version}\n'
f'Configure request blocking for a Fastly service.\n'
))
# Enable verbose mode
PARSER.add_argument('-v', '--verbose', required=False, action='store_true',
help=("Enable verbose mode.")
)
# Manage blocklist environment
ENVIRONMENT = PARSER.add_argument_group('ENVIRONMENT',
'Manage blocklist environment'
)
ENVIRONMENT.add_argument('--init', required=False, action='store_true',
help=("Create a new fastly-blocklist config.")
)
ENVIRONMENT.add_argument(
'--force',
required=False,
action='store_true',
help=(
"Force config initialization, overwriting existing local config "
"file."))
ENVIRONMENT.add_argument(
'--apikey', required=False, default='{}/.fastlyctl_token'.format(
Path.home()), type=str, help=(
"Location of a file containing Fastly API key/token.\n"
"\tDefault: Read from ~/.fastlyctl_token"))
ENVIRONMENT.add_argument('--config', required=False,
default='{}/config.blocklist'.format(Path.cwd()),
type=str,
help=("Location of a fastly-blocklist config file.\n"
"\tDefault: ./config.blocklist"
)
)
ENVIRONMENT.add_argument(
'--service',
required=False,
default=[],
type=lambda s: [
str(item) for item in s.split(',')],
help=(
"Service(s) to target.\n"
"\tThis is required on config --init.\n"
"\tDefault: Read from the selected config file.\n"
"\tExample: --service ABCDEF,DEFABC"))
ENVIRONMENT.add_argument(
'--log',
required=False,
default='',
type=str,
help=(
"VCL to execute when a request is logged/blocked.\n"
"\tDefault: none"))
ENVIRONMENT.add_argument(
'--block',
required=False,
default='error 403 "Forbidden";',
type=str,
help=(
"VCL to execute when a request is blocked.\n"
"\tDefault: error 403 \"Forbidden\""))
# Manage configuration state
STATE = PARSER.add_argument_group(
'STATE', 'Modify live service and local config state')
STATE.add_argument(
'--sync',
required=False,
action='store_true',
help=("Sync live service configuration to the running config."))
STATE.add_argument('--commit', required=False, action='store_true',
help=("Deploy running config to the live service(s).")
)
STATE.add_argument(
'--save',
required=False,
action='store_true',
help=("Save running configuration to a fastly-blocklist config file."))
# Manage lists
LISTS = PARSER.add_argument_group('LISTS',
'Manage blocklist lists'
)
LISTS.add_argument('-n', '--new', required=False, action='store_true',
help=("Create a new list.")
)
LISTS.add_argument('-d', '--delete', required=False, action='store_true',
help=("Delete an existing list.")
)
LISTS.add_argument(
'-l',
'--list',
required=False,
default=[],
type=lambda s: [
str(item) for item in s.split(',')],
help=(
"List name(s) to create/update/delete.\n"
"\tThis is required for all operations on lists & list items.\n"
"\tExample: my-block-list"))
LISTS.add_argument(
'-t',
'--type',
required=False,
choices=[
'allow',
'geo',
'block',
'temp',
'var',
'combo'],
help=(
"List type.\n"
"\tThis is required when creating a new list.\n"
"\tallow\t- Allow IP addresses. Disables processing for all "
"other lists.\n"
"\tgeo \t- Block geolocations (ISO alpha-2).\n"
"\tblock\t- Block IP addresses permanently.\n"
"\ttemp\t- Block IP addresses temporarily.\n"
"\tvar\t- Block whenever a VCL variable matches an item.\n"
"\tcombo\t- Block whenever any 2+ lists are matched."))
LISTS.add_argument(
'--action',
required=False,
default='none',
choices=[
'none',
'log',
'block'],
help=(
"Action to take when the list is matched.\n"
"\tnone\t- No action is taken.\n"
"\tlog\t- Log that a match occurred.\n"
"\tblock\t- Block the request and log that a match occurred.\n"
"\tDefault: none"))
LISTS.add_argument(
'--match',
required=False,
default='exact',
choices=[
'exact',
'regexp'],
help=(
"Match type for var lists.\n"
"\tThis is required when creating a new var list.\n"
"\texact\t- Match only if variable value == list item.\n"
"\tregexp\t- Match if variable value ~ list item.\n"
"\tDefault: exact"))
LISTS.add_argument(
'--variable',
'--var',
required=False,
type=str,
help=(
"VCL variable name to match against for var lists.\n"
"\tThis is required when creating a new var list.\n"
"\tExample: req.http.User-Agent"))
LISTS.add_argument(
'--block_length',
'--len',
required=False,
default=600,
type=int,
help=(
"Block length in seconds for temp lists.\n"
"\tItems will be added with expiration time (now + len).\n"
"\tDefault: 600"))
# Manage list items
ITEMS = PARSER.add_argument_group('ITEMS',
'Manage list items'
)
ITEMS.add_argument('-a', '--add', required=False, action='store_true',
help=("Add an item or items to a list.")
)
ITEMS.add_argument('-r', '--remove', required=False, action='store_true',
help=("Remove an item or items from a list.")
)
ITEMS.add_argument(
'-i',
'--item',
required=False,
default=[],
type=lambda s: [
str(item) for item in s.split(',')],
help=(
"List item(s) to add/remove.\n"
"\t--item or --file are required when operating on list items.\n"
"\tExample: 1.2.3.4,4.3.2.1"))
ITEMS.add_argument(
'-f',
'--file',
required=False,
type=str,
help=(
"File containing list items to add/remove.\n"
"\t--item or --file are required when operating on list items."))
ITEMS.add_argument(
'--clean',
required=False,
action='store_true',
help=(
"Clean up expired entries from temp list(s) in the running "
"config."))
ITEMS.add_argument(
'--removeall',
required=False,
action='store_true',
help=(
"Remove all items from a list or all lists in the running "
"config."))
# print the fastly-blocklist header
print(PARSER.description)
main(PARSER.parse_args())
|
from pathlib import Path
import ruamel.yaml
from ..model import AutojailArch, AutojailConfig, AutojailLogin
from .base import BaseCommand
class InitCommand(BaseCommand):
""" Initializes an autojail project
init
{--f|force : if set overwrites existing autojail.yml}
{--name= : name of the project}
{--arch= : architecture of the project}
{--cross-compile= : cross-compiler prefix eg. CROSS_COMPILE argument for jailhouse and kernel builds}
{--jailhouse= : directory containing the jailhouse sources}
{--kernel= : directory containing the kernel build}
{--host= : hostname or ip address of target board}
{--user= : username on target board}
{--uart= : device node of local uart connected to target board}
{--board= : if automate is installed the automate board id (if given selects automate backend)}
{--a|automate : use automate as backend}
"""
def handle(self):
""" Initialize the project """
if self.config_path.exists():
if not self.option("force"):
self.line(
"<error>This directory already contains an <comment>autojail.yml</comment> use -f to overwrite</error>"
)
return -1
self.line("")
self.line(
"This command will guide you through the initialization of an autojail project"
)
self.line("")
if self.option("board"):
config = self._init_automate()
elif self.option("host"):
config = self._init_ssh()
else:
if self.automate_context:
choices = ["ssh", "automate"]
backend = self.choice(
"Which backend should be used to connect to the board",
choices,
attempts=3,
default=0,
)
# FIXME: should be fixed in updated clikit
if isinstance(backend, int):
backend = choices[backend]
if backend == "ssh":
config = self._init_ssh()
else:
config = self._init_automate()
else:
config = self._init_ssh()
with self.config_path.open("w") as config_file:
yaml = ruamel.yaml.YAML()
def represent_str(representer, data: str):
return representer.represent_scalar(
"tag:yaml.org,2002:str", data
)
yaml.representer.add_representer(AutojailArch, represent_str)
yaml.representer.add_representer(AutojailLogin, represent_str)
config_dict = config.dict()
yaml.dump(config_dict, config_file)
def _init_automate(self) -> AutojailConfig:
"Initialize the backend with test rack"
assert self.automate_context is not None
name = self.option("name")
if not name:
name = Path.cwd().name.lower()
question = self.create_question(
f"Project name [<comment>{name}</comment>]", default=name
)
name = self.ask(question)
board = self.option("board")
if not board:
choices = [b.name for b in self.automate_context.boards()]
board = self.choice(
"Which target board should be used:",
choices,
attempts=3,
default=0,
)
automate_board = self.automate_context.board(board)
os = automate_board.os
arch = "ARM64" if os.triple.machine.name == "aarch64" else "ARM"
compiler = automate_board.compiler(toolchain="gcc")
cross_compile = str(compiler.bin_path / compiler.prefix)
kernel_dir = "kernel"
jailhouse_dir = "jailhouse"
uart = None
config = AutojailConfig(
name=name,
board=board,
login=f"automate:{board}",
arch=arch,
cross_compile=cross_compile,
kernel_dir=kernel_dir,
jailhouse_dir=jailhouse_dir,
uart=uart,
)
return config
def _init_ssh(self) -> AutojailConfig:
"Initialize the project with direct ssh connection"
name = self.option("name")
if not name:
name = Path.cwd().name.lower()
question = self.create_question(
f"Project name [<comment>{name}</comment>]", default=name
)
name = self.ask(question)
arch = self.option("arch")
if not arch:
arch = "ARM64"
choices = ["ARM64", "ARM"]
arch = self.choice(
"What is the base architecture of the board?",
choices,
attempts=3,
default=0,
)
# FIXME: in non interactive mode clikit return default number instead of value
if isinstance(arch, int):
arch = choices[arch]
arch = arch.upper()
cross_compile = self.option("cross-compile")
if not cross_compile:
defaults = {
"ARM64": "aarch64-linux-gnu-",
"ARM": "arm-linux-gnueabihf-",
}
cross_compile = defaults[arch]
question = self.create_question(
f"Cross compiler prefix [<comment>{cross_compile}</comment>]",
default=cross_compile,
)
cross_compile = self.ask(question)
host = self.option("host")
if not host:
host = "10.42.0.100" # This is the default IP-Address when using Network Manager connection sharing
question = self.create_question(
f"Hostname or IP of target board [<comment>{host}</comment>]",
default=host,
)
host = self.ask(question)
user = self.option("user")
if not user:
user = "root"
question = self.create_question(
f"Username on the target board [<comment>{user}</comment>]",
default=user,
)
user = self.ask(question)
uart = self.option("uart")
if not uart:
uart = "/dev/ttyUSB0"
if not Path(uart).exists():
uart = ""
question = self.create_question(
f"Serial interface connected to board: [<comment>{uart}</comment>]",
default=uart,
)
uart = self.ask(question)
kernel_dir = self.option("kernel")
if not kernel_dir:
kernel_dir = "./kernel"
question = self.create_question(
f"Directory containing the kernel build [<comment>{kernel_dir}</comment>]",
default=kernel_dir,
)
kernel_dir = self.ask(question)
jailhouse_dir = self.option("jailhouse")
if not jailhouse_dir:
jailhouse_dir = "./jailhouse"
question = self.create_question(
f"Directory containing the jailhouse sources [<comment>{jailhouse_dir}</comment>]",
default=jailhouse_dir,
)
jailhouse_dir = self.ask(question)
# TODO: ask for baud rate and rest of uart config
config = AutojailConfig(
name=name,
board=name, # FIXME: for users
login=f"ssh:{user}@{host}",
arch=arch,
cross_compile=cross_compile,
kernel_dir=kernel_dir,
jailhouse_dir=jailhouse_dir,
uart=uart,
)
return config
|
# Submit a request via cURL:
# curl -X POST -F audio=@salli.wav 'http://localhost:5000/predict'
# import the necessary packages
# -*- coding: utf-8 -*-
import sugartensor as tf
import numpy as np
import librosa
from model import *
import data
import flask
import io
from datetime import datetime
from werkzeug import secure_filename
import os
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
model = None
def init_model():
global x, y
# set log level to debug
tf.sg_verbosity(10)
#
# hyper parameters
#
batch_size = 1 # batch size
#
# inputs
#
# vocabulary size
voca_size = data.voca_size
# print(voca_size)
# mfcc feature of audio
x = tf.placeholder(dtype=tf.sg_floatx, shape=(batch_size, None, 20))
# sequence length except zero-padding
seq_len = tf.not_equal(x.sg_sum(axis=2), 0.).sg_int().sg_sum(axis=1)
# encode audio feature
logit = get_logit(x, voca_size=voca_size)
# ctc decoding
decoded, _ = tf.nn.ctc_beam_search_decoder(logit.sg_transpose(perm=[1, 0, 2]), seq_len, merge_repeated=False)
# to dense tensor
y = tf.sparse_to_dense(decoded[0].indices, decoded[0].dense_shape, decoded[0].values) + 1
def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global model, sess
init_model()
# run network
sess = tf.Session()
# init variables
tf.sg_init(sess)
# restore parameters
saver = tf.train.Saver()
model = saver.restore(sess, tf.train.latest_checkpoint('wavenet_train'))
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
result = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
# initialize the data dictionary that will be returned from the
# view
# load wave file
f = flask.request.files["audio"]
#wav, _ = sf.read(io.BytesIO(file))
filename = datetime.now().strftime("%Y%m%d-%H%M%S") + ".wav"
# file = "./audioSamples/salli.wav"
f.save(secure_filename(filename))
wav, _ = librosa.load(filename, mono=True, sr=16000)
# get mfcc feature
mfcc = np.transpose(np.expand_dims(librosa.feature.mfcc(wav, 16000), axis=0), [0, 2, 1])
# run session
label = sess.run(y, feed_dict={x: mfcc})
result["predictions"] = []
# print label
r = data.print_index(label)
for index_list in label:
preds = data.index2str(index_list)
result["predictions"].append(preds)
# indicate that the request was a success
result["success"] = True
os.remove(filename)
# return the data dictionary as a JSON response
return flask.jsonify(result)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading wavenet model and Flask starting server..."
"please wait until server has fully started"))
load_model()
app.run(host='0.0.0.0', port=7012)
|
#!/usr/bin/env python3
from typing import Tuple
import rsa
from sympy import Symbol, solve
from ContinuedFraction import ContinuedFraction
from libs.RSAvulnerableKeyGenerator import generateKeys
class Wiener:
def __init__(self,
n: int or None = None,
e: int or None = None,
nbits: int = 1024):
"""
Initializes a class for demonstrating Wiener's Attack (small private
exponent attack) on RSA.
You can either supply the class with your own public key (N,e) to try
it out in a real scenario, or leave them blank to let the program
generate a keypair vulnerable to the attack.
In the latter case, you can use the nbits parameter to state how many
bits (how strong) you want the key size to be. The larger the key
size, the more complicated the calculation (and thus longer time). If
you leave it blank, a default key size value of 1024 bits will be
used. However in reality, it is more common to use 2048 or 4096 bits.
:param n: the modulus N of your given RSA key
:param e: the public exponent e of your given RSA key
:param nbits: size of generated vulnerable RSA key. Default to 1024.
"""
print("=" * 20, "Initialization", "=" * 20)
if n is None or e is None:
print("[!] No public key given. Will generate a new keypair.")
n, e = self._generateKeys(nbits)
# the local variable n is in lowercase to comply with naming conventions
self.N = n
self.e = e
# Will be used to store encrypted ciphertext
self.enc_message: bytes or None = None
print("=" * 20, "Initialization", "=" * 20)
def _generateKeys(self, nbits: int) -> Tuple[int, int]:
"""
An internal method called to generate an RSA keypair vulnerable to
Wiener's Attack. The nbits parameter is passed down from the __init__
method.
:param nbits: size of generated vulnerable RSA key.
:return: the public key (N,e)
"""
# Note that during keypair generation, the records of p and q are
# destroyed as they are only intermediate variables and the leak of
# which will render the keypair vulnerable.
e, n, d = generateKeys(nbits)
print("[+] Generated an RSA keypair vulnerable to Wiener's attack.")
print("N:\t", n)
print("e:\t", e)
print("d:\t", d)
# We will not return the private exponent d, which is highly
# secretive and unknown to attacker.
return n, e
def encrypt(self, message: str or None = None) -> bytes:
"""
Encrypts a message with the public key stored in the class.
:param message: The message for encryption. Leave it blank to prompt
for user input.
:return: The encrypted message in binary bytes
"""
print("\n" + "=" * 20, "Encryption", "=" * 20)
print("Let's say Alex owns the private key and publishes the public "
"key so that others can send him messages only he can decrypt.")
print("Now, Ye uses the public key to send Alex an encrypted message.")
if message is None:
# No message entered, prompt for user input
message = input("[+] Message Content: ")
else:
print("[+] Message Content:", message)
# Here the message is first encoded from a string into byte using UTF-8
pubkey = rsa.PublicKey(self.N, self.e)
enc_message = rsa.encrypt(message.encode("utf8"), pubkey)
# The encrypted bytes is converted into hexadecimal digits to print out.
print("[+] Encrypted Message (hex):\t", enc_message.hex())
# Only the ciphertext is stored at the class level, since only Ye
# knows the original text message.
self.enc_message = enc_message
print("=" * 20, "Encryption", "=" * 20)
return enc_message
def crack(self) -> Tuple[int, int, int, int]:
"""
Cracks the RSA cipher using Wiener's Attack. It uses properties of
continued fractions to guess the value of k/d using the convergents
of k/N. Detailed proof is given in our presentation.
:return: Cracked RSA private key defined by (p, q, d, φ(N))
"""
print("\n" + "=" * 20, "CRACKING", "=" * 20)
print("As attacker, we intercepted the encrypted message.")
print("We also have knowledge of the public key (N,e).")
print("Let us now apply Wiener's Attack to the known public key.")
print("To do so, we are going to approximate d through the continued "
"fraction expansion of e/N")
input("Press Enter to start cracking...")
# the local variable n is in lowercase to comply with naming conventions
e, n = self.e, self.N
cf = ContinuedFraction(e, n)
expansions = cf.expansion()
print("\n[+] Found the continued fraction expansion of e/N")
print(expansions)
# See the slides for detailed proof
print("\nAs demonstrated in slides, we will use these coefficients to "
"recursively calculate the convergents of e/N, and use these "
"convergents to approximate k/d and guess their values.")
print("To verify that our guess is correct, we will go through the "
"following steps \n"
"1. Calculate φ(N) = (ed-1)/k\n"
"2. Solve the equation x^2 - (N-φ(N)+1)x + N = 0. "
"Ideally, p and q will be the two roots.\n"
"3. We will use the property N=pq to verify that.")
input("Press Enter to start iterating over convergents...")
convergents = cf.convergents_iter()
# a flag indicating whether the attack works
solved = False
# The convergent efficiently approximates e/N, which is then be used to
# approximate k/d. That is why we name them as k_guess and d_guess.
# See the slides for detailed proof.
for k_guess, d_guess in convergents:
print(f"[+] Trying k/d = {k_guess} / {d_guess}", end="\t")
if k_guess == 0:
# invalid
print("INCORRECT")
continue
# Recall that k * φ(N) = ed - 1 (because ed ≡ 1 (mod φ(N)))
# With (N,e) known and (k,d) approximated, we can deduce φ(N)
phi_guess = (e * d_guess - 1) // k_guess
# We use sympy to solve this equation.
# See the slides for why p and q are roots.
x = Symbol('x', integer=True)
roots = solve(x ** 2 + (phi_guess - n - 1) * x + n, x)
# There should be exactly two roots (p and q are distinct primes)
# if not, we proceed to the next attempted guess of k/d
if len(roots) != 2:
print("INCORRECT")
continue
# We verify if the guess works by multiplying the roots, which
# should give us N
p_guess, q_guess = roots
if p_guess * q_guess != n:
# This (p,q) pair is incorrect, proceed to next attempted guess
print("INCORRECT")
continue
print('\n\n[+] This guess worked! It gives us:')
print("p:\t", p_guess)
print("q:\t", q_guess)
print("N:\t", n)
print("e:\t", e)
print("d:\t", d_guess)
print('φ(N):', phi_guess)
# Cracked the private key! Breaking out of the iteration
solved = True
break
if not solved:
print("[-] Wiener's Attack failed")
raise Exception("Wiener's Attack failed")
print("=" * 20, "CRACKING", "=" * 20)
return p_guess, q_guess, d_guess, phi_guess
def decrypt(self, enc_message: bytes or None = None) -> str:
"""
Decrypt the message by cracking the cipher.
:param enc_message: The message to be decrypted. Leave it blank to use
the message we encrypted in self.encrypt()
:return: The decrypted message
"""
if enc_message is None:
if self.enc_message is None:
raise Exception("An encrypted message is needed for decryption")
enc_message = self.enc_message
# Call the method to crack the cipher
p_guess, q_guess, d_guess, _ = self.crack()
print("\n" + "=" * 20, "Decrypt the private message", "=" * 20)
print("Since we have now cracked the private key (N,d), let's use it "
"to decrypt the private message sent to Alex!")
# Technically, only (N,d) is needed to recover the message, but the
# library I use requires all variables to instantiate the
# rsa.PrivateKey class.
privkey = rsa.PrivateKey(self.N, self.e, d_guess, p_guess, q_guess)
dec_message = rsa.decrypt(enc_message, privkey).decode("utf8")
print("[+] Decrypted Message:\t", dec_message)
print("=" * 20, "Decrypt the private message", "=" * 20)
return dec_message
if __name__ == '__main__':
w = Wiener()
w.encrypt()
w.decrypt()
print("\nYay! 🎉")
|
#!/usr/bin/env python3
# TODO:
# - Apply bitmask on opcodes to zero out variant bits (e.g. relative and absolute addresses or nops)
# - Hash resulting instruction bytes instead of mnemonics
# - https://www.hex-rays.com/products/ida/tech/flirt/in_depth/#Variability
import filterdiff
import ratio
import r2pipe
import sys
def parse_functions(filename, needle=None):
r2p = r2pipe.open(filename)
r2p.cmd("aaa")
functions = r2p.cmdj("aflj")
parsed_functions = []
for f in functions:
if f["name"].startswith("sym.imp."):
# Skip imports
continue
if needle and f["offset"] != needle:
# Skip unmatched offsets
continue
instructions = []
opcodes = []
# FIXME: Consider `pdrj` for non-linear obfuscated functions
# - [radare2 disassembly commands doesn&\#39;t work properly\. · Issue \#11325 · radareorg/radare2 · GitHub](https://github.com/radareorg/radare2/issues/11325)
for ins in r2p.cmdj(f"pdfj @{f['offset']}")["ops"]:
if 'disasm' not in ins or ins['type'] == 'invalid':
print(f"Skipping invalid function at {hex(ins['offset'])} in file {filename}", file=sys.stderr)
continue
instructions.append(f"{hex(ins['offset'])} {ins['disasm']}")
opcodes.append(ins["disasm"].split()[0])
parsed_functions.append(
{
"name": f["name"],
"offset": f["offset"],
"instructions": instructions,
"opcodes": opcodes,
"hash": hash(tuple(opcodes)),
}
)
return parsed_functions
def matches_from_functions(functions, opcode_hashes, reverse=False):
best_matches = []
for f1 in functions[0]:
best_f1_r = 0
picked_f2 = None
for f2 in functions[1]:
if f1 == f2:
continue
f1_r = ratio.compute_similarity(f1["opcodes"], f2["opcodes"])
if best_f1_r < f1_r:
best_f1_r = f1_r
picked_f2 = f2
if not picked_f2:
picked_f2 = {
"name": "[N/A]",
"offset": 0,
"instructions": [],
"opcodes": [],
"hash": hash(tuple([])),
}
if reverse:
first = picked_f2
second = f1
else:
first = f1
second = picked_f2
best_matches.append(
{"ratio": round(best_f1_r, 4), "first": first, "second": second}
)
best_matches = sorted(best_matches, key=lambda x: x["ratio"], reverse=True)
best_matches = list(
filter(
lambda x: x["ratio"] < 1.0
and (
not x["second"]["hash"] in opcode_hashes
or opcode_hashes[x["second"]["hash"]] != 0
),
best_matches,
)
)
return best_matches
def matches_from_functions_cross(functions, opcode_hashes, reverse=False):
best_matches = []
for f1 in functions[0]:
best_f1_r = 0
picked_f2 = None
for f2 in functions[1]:
if f1 == f2:
continue
f1_r = ratio.compute_similarity(f1["opcodes"], f2["opcodes"])
best_f1_r = f1_r
picked_f2 = f2
if reverse:
first = picked_f2
second = f1
else:
first = f1
second = picked_f2
best_matches.append(
{"ratio": round(best_f1_r, 4), "first": first, "second": second}
)
if not picked_f2:
picked_f2 = {
"name": "[N/A]",
"offset": 0,
"instructions": [],
"opcodes": [],
"hash": hash(tuple([])),
}
if reverse:
first = picked_f2
second = f1
else:
first = f1
second = picked_f2
best_matches.append(
{"ratio": round(best_f1_r, 4), "first": first, "second": second}
)
best_matches = sorted(best_matches, key=lambda x: x["ratio"], reverse=True)
best_matches = list(
filter(
lambda x: x["ratio"] < 1.0
and (
not x["second"]["hash"] in opcode_hashes
or opcode_hashes[x["second"]["hash"]] != 0
),
best_matches,
)
)
return best_matches
def compute_best_matches(filename1, filename2, needle1=None, needle2=None):
parsed_functions_1 = parse_functions(sys.argv[1], needle1)
parsed_functions_2 = parse_functions(sys.argv[2], needle2)
# To avoid false positives due to functions in the first listing
# also existing in the second listing, track relative number of
# occurrences. This way, only functions exclusive to the second listing
# are persisted when filtering matches.
opcode_hashes = {}
for pf1 in parsed_functions_1:
pf1_hash = pf1["hash"]
if pf1_hash not in opcode_hashes:
opcode_hashes[pf1_hash] = 0
opcode_hashes[pf1_hash] += 1
for pf2 in parsed_functions_2:
pf2_hash = pf2["hash"]
if pf2_hash not in opcode_hashes:
opcode_hashes[pf2_hash] = 0
opcode_hashes[pf2_hash] -= 1
if needle1 or needle2:
best_matches = matches_from_functions_cross(
(parsed_functions_1, parsed_functions_2), opcode_hashes
)
else:
best_matches = matches_from_functions(
(parsed_functions_1, parsed_functions_2), opcode_hashes
)
# To include new functions from the second listing, we do a second pass,
# processing the unmatched functions of both listings from the first pass.
best_opcode_hashes = set()
for bm in best_matches:
best_opcode_hashes.add(bm["first"]["hash"])
best_opcode_hashes.add(bm["second"]["hash"])
distinct_functions_1 = list(
filter(
lambda x: x["hash"] not in best_opcode_hashes
and opcode_hashes[x["hash"]] != 0,
parsed_functions_1,
)
)
distinct_functions_2 = list(
filter(
lambda x: x["hash"] not in best_opcode_hashes
and opcode_hashes[x["hash"]] != 0,
parsed_functions_2,
)
)
if needle1 or needle2:
best_matches += matches_from_functions(
(distinct_functions_2, distinct_functions_1), best_opcode_hashes, True
)
else:
best_matches += matches_from_functions(
(distinct_functions_2, distinct_functions_1), best_opcode_hashes, True
)
max_width = 0
for bm in best_matches:
max_width = max(len(bm["first"]["name"]), max_width)
return {"matches": best_matches, "width": max_width}
def compute_diff(bm):
rules = ["((0x[0-9a-f]+)|([0-9]+))"]
text1 = "\n".join(bm["first"]["instructions"])
text2 = "\n".join(bm["second"]["instructions"])
return "\n".join(filterdiff.compute_diffs(rules, text1, text2))
def ratio_summary(bm, max_width):
return f"{bm['ratio']:6} | {bm['first']['name']:>{max_width}} | {bm['second']['name']}"
if __name__ == "__main__":
bms = compute_best_matches(sys.argv[1], sys.argv[2])
for bm in bms["matches"]:
print(ratio_summary(bm, bms["width"]))
|
import concurrent.futures
import tempfile
import uuid
from data_deploy.thirdparty.sshconf import *
import logging
import remoto
from data_deploy.internal.util.printer import *
class RemotoSSHWrapper(object):
'''Simple wrapper containing a remoto connection and the file it is using as ssh config.'''
def __init__(self, connection, ssh_config=None):
self._connection = connection
self._ssh_config = ssh_config
self._open = True
def __enter__(self):
return self
@property
def connection(self):
return self._connection
@property
def ssh_config(self):
return self._ssh_config
@property
def ssh_config_path(self):
return self._ssh_config.name
@property
def open(self):
'''If set, connection is open. Otherwise, Connection is closed'''
return self._open and self._connection != None
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit()
return False
def exit(self):
if self._connection:
self._connection.exit()
if self._ssh_config:
self._ssh_config.close()
self._open = False
def _build_ssh_config(hostname, ssh_params):
'''Writes a temporary ssh config with provided parameters.
Warning: Returned value must be closed properly.
Args:
hostname (str): Hostname to register.
ssh_params (dict): Parameters to set for hostname. A valid dict would be e.g: {"IdentityFile": "/some/key.rsa", "IdentitiesOnly": "yes", "Port": 22}
Returns:
TemporaryFile containing the ssh config.'''
if callable(ssh_params):
ssh_params = ssh_params(node)
if not isinstance(ssh_params, dict):
raise ValueError('ssh_params must be a dict, mapping ssh options to values. E.g: {{"IdentityFile": "/some/key.rsa", "IdentitiesOnly": "yes", "Port": 22}}')
conf = empty_ssh_config_file()
conf.add(hostname, **ssh_params)
tmpfile = tempfile.NamedTemporaryFile()
conf.write(tmpfile.name)
return tmpfile
def _build_conn(hostname, loggername, silent, ssh_configpath=None):
'''Returns a remoto-wrapped execnet connection.
Warning: The `remoto.Connection` objects created here must be properly closed.
Args:
hostname (str): Remote host (or ip) to connect to.
silent (bool): If set, the connection is as silent as possible. Only stderr prints are logged (`logging.ERROR` level).
loggername (str): If `silent` is set, sets quiet logger to have given name. This is useful for when you want to change the log level later.
ssh_configpath (optional str): If set, sets execnet ssh config parameters to given path. This way, we can change ssh-based connection behaviour.
Returns:
configured `remoto.Connection` object on success, `None` on failure.'''
logging.basicConfig()
logger = logging.getLogger(loggername)
logger.setLevel(logging.ERROR if silent else logging.DEBUG)
kwargs = dict()
kwargs['logger'] = logger
if ssh_configpath:
kwargs['ssh_options'] = '-F {}'.format(ssh_configpath)
try:
return remoto.Connection(hostname, **kwargs)
except Exception as e:
printe('Could not connect to remote host {}'.format(hostname))
return None
def get_wrapper(node, hostname, ssh_params=None, loggername=None, silent=False):
'''Gets a connection wrapper.
Warning: The `RemotoSSHWrapper` objects created here must be properly closed. A "with" clause is supported to close all wrappers on function exit.
Args:
node (metareserve.Node): Node to build connection for.
hostname (str, callable): Name to register connection to. Callables must take 1 node as argument, and output the hostname (`str`).
ssh_params (optional dict, callable): If set, builds a temporary ssh config file with provided options to open connection with.
Can be a callable (i.e. function/lambda), which takes 1 node as argument, and outputs the dict with ssh config options (or `None`) for that node.
loggername (optional str, callable): Name for logger. Can be either a `str` or a callable. Callables must take 1 node as argument, and output the logger name (`str`) to use for that node. If not set, uses random logger name.
silent (optional bool): If set, connection is silent (except when reporting errors).
Returns:
`RemotoSSHWrapper` on success, `None` otherwise.'''
if loggername == None:
loggername = 'logger-'+str(uuid.uuid4())
elif callable(loggername):
loggername = loggername(node)
if callable(hostname):
hostname = hostname(node)
if callable(ssh_params):
ssh_params = ssh_params(node)
ssh_config = _build_ssh_config(hostname, ssh_params) if ssh_params else None
conn = _build_conn(hostname, loggername, silent, ssh_configpath=ssh_config.name if ssh_config else None)
return RemotoSSHWrapper(conn, ssh_config=ssh_config)
def get_wrappers(nodes, hostnames, ssh_params=None, loggername=None, parallel=True, silent=False):
'''Gets multiple wrappers at once.
Warning: The `RemotoSSHWrapper` objects created here must be properly closed.
Args:
nodes (iterable of metareserve.Node): Nodes to build connection for.
hostnames (dict(metareserve.Node, str), callable): Names to register connections to. Can be either a dict mapping nodes to their hostname or a callable taking 1 node as argument, outputting its hostname.
ssh_params (optional dict or callable): If set, builds a temporary ssh config file with provided options to open connection with.
Can be a callable (i.e. function/lambda), which takes 1 node as argument, and outputs the dict with ssh config options (or `None`) for that node.
loggername (optional callable): Callable must take 1 node as argument, and output the logger name (`str`) to use for that node. If not set, uses random logger names.
parallel (optional bool): If set, creates wrappers in parallel. Otherwise, creates sequentially.
silent (optional bool): If set, connections are silent (except when reporting errors).
Returns:
`dict(metareserve.Node, RemotoSSHWrapper)`, Maps metareserve.Node to open remoto connection wrapper. Wrapper can be `None`, indicating failure to connect to key node'''
hostnames = hostnames if isinstance(hostnames, dict) else {x: hostnames(x) for x in nodes}
if parallel:
with concurrent.futures.ThreadPoolExecutor(max_workers=len(nodes)) as executor:
futures_get_wrappers = {x: executor.submit(get_wrapper, x, hostnames[x], ssh_params=ssh_params, loggername=loggername, silent=silent) for x in nodes}
return {k: v.result() for k,v in futures_get_wrappers.items()}
else:
return {x: get_wrapper(x, hostnames[x], ssh_params=ssh_params, loggername=loggername, silent=silent) for x in nodes}
def close_wrappers(wrappers, parallel=True):
'''Closes an iterable of wrappers.
Args:
wrappers (RemotoSSHWrapper, list(RemotoSSHWrapper), dict(RemotoSSHWrapper)): Wrappers to close.
parallel (optional bool): If set, closes connections in parallel. Otherwise, closes connections sequentially.'''
if isinstance(wrappers, RemotoSSHWrapper):
closables = [wrappers]
elif isinstance(wrappers, dict):
if isinstance(list(wrappers.keys())[0], RemotoSSHWrapper):
closables = wrappers.keys()
elif isinstance(wrappers[list(wrappers.keys())[0]], RemotoSSHWrapper):
closables = wrappers.values()
else:
raise ValueError('Provided dict has no RemotoSSHWrapper keys(={}) or values(={})'.format(type(list(wrappers.keys())[0]), type(wrappers[list(wrappers.keys())[0]])))
elif isinstance(wrappers, list):
closables = wrappers
else:
raise ValueError('Cannot close given wrappers: No dict, list, or single wrapper passed: {}'.format(wrappers))
if parallel:
with concurrent.futures.ThreadPoolExecutor(max_workers=len(closables)) as executor:
futures_close = [executor.submit(x.exit) for x in closables]
for x in futures_close:
x.result()
else:
for x in closables:
x.exit() |
"""
Copyright [2017-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import requests
import datetime
from module import gff_file
def get_recent_genes_from_apollo(base_url, username, password, days=1):
webservice_data = {'username': username, 'password': password, 'days': days}
url = base_url + 'annotationEditor/getRecentAnnotations'
response = requests.post(url, json=webservice_data)
if response.status_code == requests.codes.ok:
return response.json()
else:
return False
def get_gff(base_url, username, password, genes, out_dir):
features = list()
for key in genes:
features.append({'uniquename': key})
webservice_data = {'username': username, 'password': password, 'features': features}
url = base_url + 'annotationEditor/getGff3'
response = requests.post(url, json=webservice_data)
time_stamp = str(datetime.datetime.now().date())
file_name = out_dir + 'apollo_' + time_stamp + '.gff'
if response.status_code == requests.codes.ok:
file_handle = open(file_name, 'w')
file_handle.write(response.text)
return file_name
else:
return False
def download_gff(base_url, username, password, organism, out_dir):
webservice_data = {'username': username,
'password': password,
'type': 'GFF3',
'seqType': 'genomic',
'organism': organism,
'output': 'text',
'exportAllSequences': 'true',
'exportGff3Fasta': 'false'}
url = base_url + 'IOService/write'
response = requests.post(url, json=webservice_data)
time_stamp = str(datetime.datetime.now().date())
file_name = out_dir + 'apollo_' + time_stamp + '.gff'
if response.status_code == requests.codes.ok:
file_handle = open(file_name, 'w')
file_handle.write(response.text)
return file_name
else:
print(organism + ': ' + str(response.text) + '\n')
return False
def validate_gff(base_url, gff_file_path, gene_organism, moderator):
gff_file_object = gff_file.HandleGFF(gff_file_path, gene_organism, moderator)
gff_file_object.read_gff_file()
gff_file_object.scan_gff_for_errors()
gff_file_object.scan_mrna_sequence(base_url=base_url)
if gff_file_object.errors != {}:
return gff_file_object
else:
return None
def sort_and_write_errors(dict_of_list, order_of_lists, index, out_dir, file_handle=None):
file_handle = file_handle
# print("length of list:", order_of_lists[index], len(dict_of_list[order_of_lists[index]]))
if index < 0:
return False
elif index == 0 and len(dict_of_list[order_of_lists[index]]) == 0:
return False
elif len(dict_of_list[order_of_lists[index]]) > 0 and index + 1 < len(order_of_lists):
# print("copy to and writing out for", order_of_lists[index + 1])
copy_function(dict_of_list, order_of_lists, index)
file_handle = write_function(dict_of_list, order_of_lists, index + 1, out_dir, file_handle)
sort_and_write_errors(dict_of_list, order_of_lists, index + 1, out_dir, file_handle)
else:
# print("clear list", order_of_lists[index])
# print("going up to", order_of_lists[index - 1])
dict_of_list[order_of_lists[index]].clear()
sort_and_write_errors(dict_of_list, order_of_lists, index - 1, out_dir, file_handle)
def copy_function(dict_of_list, order_of_lists, index):
search_term = order_of_lists[index + 1]
search_value = str()
for error_object in dict_of_list[order_of_lists[index]]:
# print("search value", search_value, "search term", error_object.__dict__[search_term])
if not search_value:
search_value = error_object.__dict__[search_term]
# print("setting search value to", search_value)
dict_of_list[order_of_lists[index + 1]].append(error_object)
continue
if error_object.__dict__[search_term] == search_value:
# print("copy to", order_of_lists[index + 1])
dict_of_list[order_of_lists[index + 1]].append(error_object)
if dict_of_list[order_of_lists[index + 1]]:
for delete_object in dict_of_list[order_of_lists[index + 1]]:
# print("delete from", order_of_lists[index])
dict_of_list[order_of_lists[index]].remove(delete_object)
def write_function(dict_of_list, order_of_list, index, out_dir, file_handle=None):
file_handle = file_handle
if order_of_list[index] == 'owner':
if file_handle:
file_handle.close()
owner = dict_of_list['owner'][0].owner
time_stamp = str(datetime.datetime.now().date())
file_name = out_dir + owner + '_' + time_stamp + '.error'
file_handle = open(file_name, 'a')
file_handle.write(owner + "\n")
file_handle.write("Dear Annotator (" + owner + ")," + "\n")
file_handle.write("Your annotation in Apollo hosted at VEuPathDB.org contains errors." + "\n")
elif order_of_list[index] == 'organism_name':
organism_name = dict_of_list['organism_name'][0].organism_name
organism_str = "Species: {}\n".format(organism_name)
file_handle.write(organism_str)
elif order_of_list[index] == 'gene_id':
gene_name = dict_of_list['gene_id'][0].gene_name
gene_id = dict_of_list['gene_id'][0].gene_id
locus = dict_of_list['gene_id'][0].locus
gene_str = "Gene: {} (ID:{})\nLocation: {}\n".format(gene_name, gene_id, locus)
file_handle.write(gene_str)
for error_object in dict_of_list['gene_id']:
if error_object.mrna_id is None:
for string in error_object.gff_error_text():
file_handle.write(string)
elif order_of_list[index] == 'mrna_id':
for error_object in dict_of_list['mrna_id']:
for string in error_object.gff_error_text():
file_handle.write(string)
for string in error_object.sequence_error_text():
file_handle.write(string)
return file_handle
def write_summary_text(annotator_summary, out_dir):
file_name = out_dir + annotator_summary.email + '.summary'
file_handle = open(file_name, 'w')
gene_list_name = out_dir + annotator_summary.email + '.gene_list'
gene_list_handle = open(gene_list_name, 'w')
unfinished_genes = annotator_summary.total_gene_count - annotator_summary.finished_gene_count
file_handle.write(annotator_summary.email + "\n")
file_handle.write('Dear Annotator (' + annotator_summary.email + '),' + "\n")
file_handle.write('Here is a summary of your annotation in Apollo hosted at VEuPathDB.org.' + "\n")
file_handle.write('Finished Genes: ' + str(annotator_summary.finished_gene_count) + "\n")
file_handle.write('Unfinished Genes: ' + str(unfinished_genes))
# file_handle.write('The annotation contains the following errors:')
for gene_name in annotator_summary.unfinished_gene_list:
gene_list_handle.write(gene_name + "\n")
def send_email_mailgun(url, api_key, from_address, email_address, subject, message, file_attached=None):
if file_attached:
return requests.post(url, auth=("api", api_key), files=[("attachment", ("unfinished_genes.txt",
open(file_attached, "rb").read()))],
data={"from": from_address, "to": email_address, "subject": subject, "text": message})
else:
return requests.post(url, auth=("api", api_key), data={"from": from_address, "to": email_address,
"subject": subject, "text": message})
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
import sys
from .. import utils
from .trace import EventTypes
logger = utils.get_logger()
def merge_ranges(src_ranges, is_sorted=False):
merged_ranges = []
if len(src_ranges) > 0:
if not is_sorted:
src_ranges.sort(key=lambda x: x[0])
src_id = 0
merged_ranges.append(
(src_ranges[src_id][0], src_ranges[src_id][1]))
for src_id in range(1, len(src_ranges)):
dst_id = len(merged_ranges) - 1
if src_ranges[src_id][1] > merged_ranges[dst_id][1]:
if src_ranges[src_id][0] <= merged_ranges[dst_id][1]:
merged_ranges[dst_id] = (merged_ranges[dst_id][0], src_ranges[src_id][1])
else:
merged_ranges.append(
(src_ranges[src_id][0], src_ranges[src_id][1]))
return merged_ranges
def subtract_ranges_lists(range_list1, range_list2):
range_list_dst = []
if len(range_list1) == 0:
return range_list_dst
if len(range_list2) == 0:
range_list_dst = list(range_list1)
return range_list_dst
r1 = range_list1[0]
r2 = range_list2[0]
i1 = i2 = 0
while i1 < len(range_list1):
if i2 == len(range_list2):
range_list_dst.append(r1)
r1, i1 = pop_list(range_list1, i1)
elif r2[1] <= r1[0]:
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0] and r2[1] < r1[1]:
r1 = (r2[1], r1[1])
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0]:
assert (r2[1] >= r1[1])
r2 = (r1[1], r2[1])
r1, i1 = pop_list(range_list1, i1)
elif r2[0] < r1[1]:
assert (r2[0] > r1[0])
range_list_dst.append((r1[0], r2[0]))
r1 = (r2[0], r1[1])
else:
assert (r2[0] >= r1[1])
range_list_dst.append(r1)
r1, i1 = pop_list(range_list1, i1)
return range_list_dst
def intersection_ranges_lists(range_list1, range_list2):
range_list_dst = []
if len(range_list1) == 0 or len(range_list2) == 0:
return range_list_dst
r1 = range_list1[0]
r2 = range_list2[0]
i1 = i2 = 0
while i1 < len(range_list1):
if i2 == len(range_list2):
break
elif r2[1] <= r1[0]:
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0] and r2[1] < r1[1]:
assert (r2[1] > r1[0])
range_list_dst.append((r1[0], r2[1]))
r1 = (r2[1], r1[1])
r2, i2 = pop_list(range_list2, i2)
elif r2[0] <= r1[0]:
assert (r2[1] >= r1[1])
range_list_dst.append(r1)
r2 = (r1[1], r2[1])
r1, i1 = pop_list(range_list1, i1)
elif r2[1] < r1[1]:
assert (r2[0] > r1[0])
range_list_dst.append(r2)
r1 = (r2[1], r1[1])
r2, i2 = pop_list(range_list2, i2)
elif r2[0] < r1[1]:
assert (r2[1] >= r1[1])
range_list_dst.append((r2[0], r1[1]))
r2 = (r1[1], r2[1])
r1, i1 = pop_list(range_list1, i1)
else:
assert (r2[0] >= r1[1])
r1, i1 = pop_list(range_list1, i1)
return range_list_dst
def get_ranges_sum(ranges):
sum = 0
for range in ranges:
sum += (range[1] - range[0])
return sum
def pop_list(range_list, index):
next_index = index + 1
if next_index >= len(range_list):
return None, len(range_list)
next_item = range_list[next_index]
return next_item, next_index
class OverallParser(object):
class Costs:
def __init__(self):
self.step_total_cost = 0
self.kernel_cost = 0
self.memcpy_cost = 0
self.memset_cost = 0
self.runtime_cost = 0
self.dataloader_cost = 0
self.cpuop_cost = 0
self.other_cost = 0
def calculate_costs(self, statistics, step):
self.step_total_cost = step[1] - step[0]
self.kernel_cost = get_ranges_sum(statistics.kernel_cost_ranges)
self.memcpy_cost = get_ranges_sum(statistics.memcpy_cost_ranges)
self.memset_cost = get_ranges_sum(statistics.memset_cost_ranges)
self.runtime_cost = get_ranges_sum(statistics.runtime_cost_ranges)
self.dataloader_cost = get_ranges_sum(statistics.dataloader_cost_ranges)
self.cpuop_cost = get_ranges_sum(statistics.cpuop_cost_ranges)
self.other_cost = get_ranges_sum(statistics.other_cost_ranges)
class Statistics:
def __init__(self):
self.kernel_cost_ranges = []
self.memcpy_cost_ranges = []
self.memset_cost_ranges = []
self.runtime_cost_ranges = []
self.dataloader_cost_ranges = []
self.cpuop_cost_ranges = []
self.other_cost_ranges = []
def intersection_with_step(self, step):
result = OverallParser.Statistics()
step = [step]
result.kernel_cost_ranges = intersection_ranges_lists(step, self.kernel_cost_ranges)
result.memcpy_cost_ranges = intersection_ranges_lists(step, self.memcpy_cost_ranges)
result.memset_cost_ranges = intersection_ranges_lists(step, self.memset_cost_ranges)
result.runtime_cost_ranges = intersection_ranges_lists(step, self.runtime_cost_ranges)
result.dataloader_cost_ranges = intersection_ranges_lists(step, self.dataloader_cost_ranges)
result.cpuop_cost_ranges = intersection_ranges_lists(step, self.cpuop_cost_ranges)
result.other_cost_ranges = intersection_ranges_lists(step, self.other_cost_ranges)
return result
def __init__(self):
self.kernel_ranges = []
self.memcpy_ranges = []
self.memset_ranges = []
self.runtime_ranges = []
self.dataloader_ranges = []
self.cpuop_ranges = []
self.steps = []
self.steps_names = []
self.is_gpu_used = False
self.min_ts = sys.maxsize
self.max_ts = -sys.maxsize - 1
self.steps_costs = []
self.avg_costs = OverallParser.Costs()
def parse_events(self, events):
logger.debug("Overall, parse events")
for event in events:
self.parse_event(event)
self.kernel_ranges = merge_ranges(self.kernel_ranges)
self.memcpy_ranges = merge_ranges(self.memcpy_ranges)
self.memset_ranges = merge_ranges(self.memset_ranges)
self.runtime_ranges = merge_ranges(self.runtime_ranges)
self.dataloader_ranges = merge_ranges(self.dataloader_ranges)
self.cpuop_ranges = merge_ranges(self.cpuop_ranges)
if len(self.steps) == 0:
self.steps.append((self.min_ts, self.max_ts))
self.steps_names.append("0")
merged_steps = list(self.steps)
merged_steps = merge_ranges(merged_steps)
logger.debug("Overall, statistics")
global_stats = OverallParser.Statistics()
global_stats.kernel_cost_ranges = self.kernel_ranges
slots = subtract_ranges_lists(merged_steps, self.kernel_ranges)
global_stats.memcpy_cost_ranges = intersection_ranges_lists(slots, self.memcpy_ranges)
slots = subtract_ranges_lists(slots, global_stats.memcpy_cost_ranges)
global_stats.memset_cost_ranges = intersection_ranges_lists(slots, self.memset_ranges)
slots = subtract_ranges_lists(slots, global_stats.memset_cost_ranges)
global_stats.runtime_cost_ranges = intersection_ranges_lists(slots, self.runtime_ranges)
slots = subtract_ranges_lists(slots, global_stats.runtime_cost_ranges)
global_stats.dataloader_cost_ranges = intersection_ranges_lists(slots, self.dataloader_ranges)
slots = subtract_ranges_lists(slots, global_stats.dataloader_cost_ranges)
global_stats.cpuop_cost_ranges = intersection_ranges_lists(slots, self.cpuop_ranges)
slots = subtract_ranges_lists(slots, global_stats.cpuop_cost_ranges)
global_stats.other_cost_ranges = slots
logger.debug("Overall, aggregation")
valid_steps = len(self.steps)
for i in range(valid_steps):
steps_stat = global_stats.intersection_with_step(self.steps[i])
self.steps_costs.append(OverallParser.Costs())
self.steps_costs[i].calculate_costs(steps_stat, self.steps[i])
self.avg_costs.step_total_cost += self.steps_costs[i].step_total_cost
self.avg_costs.kernel_cost += self.steps_costs[i].kernel_cost
self.avg_costs.memcpy_cost += self.steps_costs[i].memcpy_cost
self.avg_costs.memset_cost += self.steps_costs[i].memset_cost
self.avg_costs.runtime_cost += self.steps_costs[i].runtime_cost
self.avg_costs.dataloader_cost += self.steps_costs[i].dataloader_cost
self.avg_costs.cpuop_cost += self.steps_costs[i].cpuop_cost
self.avg_costs.other_cost += self.steps_costs[i].other_cost
self.avg_costs.step_total_cost /= valid_steps
self.avg_costs.kernel_cost /= valid_steps
self.avg_costs.memcpy_cost /= valid_steps
self.avg_costs.memset_cost /= valid_steps
self.avg_costs.runtime_cost /= valid_steps
self.avg_costs.dataloader_cost /= valid_steps
self.avg_costs.cpuop_cost /= valid_steps
self.avg_costs.other_cost /= valid_steps
def parse_event(self, event):
ts = event.ts
dur = event.duration
evt_type = event.type
if evt_type == EventTypes.KERNEL:
self.kernel_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.MEMCPY:
self.memcpy_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.MEMSET:
self.memset_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.RUNTIME:
self.runtime_ranges.append((ts, ts + dur))
elif evt_type == EventTypes.OPERATOR and event.name.startswith("enumerate(DataLoader)#") \
and event.name.endswith(".__next__"):
self.dataloader_ranges.append((ts, ts + dur))
elif event.type == EventTypes.PROFILER_STEP:
self.steps.append((ts, ts + dur))
self.steps_names.append(str(event.step))
elif evt_type in [EventTypes.PYTHON, EventTypes.OPERATOR]:
self.cpuop_ranges.append((ts, ts + dur))
if evt_type == EventTypes.RUNTIME:
self.is_gpu_used = True
if ts < self.min_ts:
self.min_ts = ts
if ts + dur > self.max_ts:
self.max_ts = ts + dur
|
<reponame>tanaysh7/Youtube_Inspector
"""
This code parses flatfile data containing timestamps and youtube subtitles
and creates a JSON file from it.
"""
from os import path, listdir
import re
import json
# def remove_timestamps():
# import os
# dirPath = os.path.dirname(os.path.realpath(__file__))+"/data"
# print(dirPath)
# for f in listdir(dirPath):
# match = re.search('_(.*)', f)
# if match:
# fileName = match.groups()[0]
# os.rename(dirPath+"/"+f, dirPath+"/"+fileName)
# print(match)
def transfer_labels():
f1 = open("reviewed.json", 'r')
to_file = json.loads(f1.read())
f2 = open("reviewed2.json", 'r')
from_file = json.loads(f2.read())
for (k, v) in from_file.items():
if v and v != "":
to_file[k] = v
f3 = open("reviewed.json", "w")
f3.write(json.dumps(to_file))
def count_final():
import json
with open("final.json", 'r') as f:
json_data = json.loads(f.read())
print(len(json_data['corpus']))
class Review:
def __init__(self):
with open('reviewed.json', 'r') as f:
self.reviewed_json = json.loads(f.read())
with open('to_review.json', 'r') as f:
self.to_review = json.loads(f.read())
def log_reviewed(self, file_name, finalized, details):
self.reviewed_json[file_name] = finalized
if not finalized:
self.to_review[file_name] = details
def write_to_file(self):
with open('reviewed.json', 'w') as f:
f.write(json.dumps(self.reviewed_json))
with open('to_review.json', 'w') as f:
f.write(json.dumps(self.to_review))
def bad_word_parse():
with open('bad_words.txt', 'r') as f:
text = f.read()
words = text.split(", ")
with open('bad_word.json', 'w') as f:
f.write(json.dumps({"offensive_words": words}))
def count_words(text):
pass
def main():
import os
review = Review()
dirPath = os.path.dirname(os.path.realpath(__file__))+"/data"
jsonFile = {}
jsonFile["description"] = ["This project attempt to annotate and classify Youtube videos taking into account the content of the video and its composition. While youtube flags content inappropriate for young audiences by requiring viewers to sign in, a lot of youtube content is generally unaudited if the uploader of the video does not flag it so. Also there is no distinction between which content is appropriate for what age groups. We will classify content based the film rating system: G, PG, PG-13 and R. We will also apply a binary classification for classifying clickbait videos."]
jsonFile["authors"] = {
"author1": "<NAME>",
"author2": "<NAME>",
"author3": "<NAME>",
"author4": "<NAME>"
}
jsonFile["emails"] = {
"email1": "<EMAIL>",
"email2": "<EMAIL>",
"email3": "<EMAIL>",
"email4": "<EMAIL>"
}
jsonFile["corpus"] = []
empty_files = []
for f in listdir(dirPath):
text = ''
file_name = ""
with open(path.join(dirPath, f)) as file:
file_name = f
while True:
lineNo = file.readline().strip()
if not lineNo:
break
lineTimestamp = file.readline().strip()
lineText = file.readline().strip()
blankLine = file.readline().strip()
if not text:
text += lineText
else:
text += ' ' + lineText
# print text
if len(text) == 0:
empty_files.append(file_name)
continue
if f in review.reviewed_json and review.reviewed_json[f]:
# print("reviewed", f)
label = review.reviewed_json[f]
jsonFile["corpus"].append({
'data': text,
'label': label,
'title': file_name
})
continue
offensiveWords, profaneWords = {}, {}
with open('bad_words.json') as badwords:
data = json.load(badwords)
data['offensive'] = list(set(data['offensive']))
data['profane'] = list(set(data['profane']))
for offensiveW in data['offensive']:
phrase = offensiveW.split()
if len(phrase) == 1:
offensiveWords[phrase[0]] = -1
elif len(phrase) > 1:
offensiveWords[phrase[0]] = phrase
for profaneW in data['profane']:
phrase = profaneW.split()
if len(phrase) == 1:
profaneWords[phrase[0]] = -1
elif len(profaneW) > 1:
profaneWords[phrase[0]] = phrase
proWordCount, offWordCount = 0, 0
text = text.split()
offensive_words_list, profane_words_list = [], []
for word in text:
if word in profaneWords:
if profaneWords[word] == -1:
profane_words_list.append(word)
proWordCount += 1
else:
total_word = word
idx = text.index(word) + 1
match = True
for w in profaneWords[word][1:]:
if idx == len(text):
match = False
break
if w != text[idx]:
match = False
break
idx += 1
total_word += w
if match == True:
profane_words_list.append(total_word)
proWordCount += 1
if word in offensiveWords:
if offensiveWords[word] == -1:
offensive_words_list.append(word)
offWordCount += 1
else:
total_word = word
idx = text.index(word) + 1
match = True
for w in offensiveWords[word][1:]:
if idx == len(text):
match = False
break
if w != text[idx]:
match = False
break
idx += 1
total_word += w
if match == True:
offensive_words_list.append(total_word)
offWordCount += 1
if offWordCount == 0:
if proWordCount <= 2:
label = 'G'
elif proWordCount > 2 and proWordCount <= 4:
# ********** Call Kaladhar's Function **********
# No context and not sexual
review.log_reviewed(file_name, False, [{"offensive":
offensive_words_list, "profane": profane_words_list}])
label = '#'
# Context
# label = 'PG13'
elif proWordCount > 4:
label = 'PG13'
elif offWordCount < 4:
# print "offensive < 4"
# ********** Call Kaladhar's Function **********
# Context Sexual
review.log_reviewed(file_name, False, [{"offensive":
offensive_words_list, "profane": profane_words_list}])
label = '#'
# Context not sexual and offWordCount > 1 and offWordCount < 4
# label = 'PG13'
else:
# print "offensive >= 4"
label = 'R'
# match = re.search('_(.*)', f)
# fileName = match.groups()[0]
# print fileName
if label != "#":
jsonFile["corpus"].append({
'data': " ".join(text),
'label': label,
'title': file_name
})
print("empty_files", len(empty_files))
review.write_to_file()
# print subtitles
with open('final.json', 'w') as file:
json.dump(jsonFile, file)
if __name__ == "__main__":
main()
|
<reponame>allbuttonspressed/pyjs
#!/usr/bin/env python
# example progressbar.py
import pygtk
pygtk.require('2.0')
import gtk, gobject
# Update the value of the progress bar so that we get
# some movement
def progress_timeout(pbobj):
if pbobj.activity_check.get_active():
pbobj.pbar.pulse()
else:
# Calculate the value of the progress bar using the
# value range set in the adjustment object
new_val = pbobj.pbar.get_fraction() + 0.01
if new_val > 1.0:
new_val = 0.0
# Set the new value
pbobj.pbar.set_fraction(new_val)
# As this is a timeout function, return TRUE so that it
# continues to get called
return True
class ProgressBar:
# Callback that toggles the text display within the progress
# bar trough
def toggle_show_text(self, widget, data=None):
if widget.get_active():
self.pbar.set_text("some text")
else:
self.pbar.set_text("")
# Callback that toggles the activity mode of the progress
# bar
def toggle_activity_mode(self, widget, data=None):
if widget.get_active():
self.pbar.pulse()
else:
self.pbar.set_fraction(0.0)
# Callback that toggles the orientation of the progress bar
def toggle_orientation(self, widget, data=None):
if self.pbar.get_orientation() == gtk.PROGRESS_LEFT_TO_RIGHT:
self.pbar.set_orientation(gtk.PROGRESS_RIGHT_TO_LEFT)
elif self.pbar.get_orientation() == gtk.PROGRESS_RIGHT_TO_LEFT:
self.pbar.set_orientation(gtk.PROGRESS_LEFT_TO_RIGHT)
# Clean up allocated memory and remove the timer
def destroy_progress(self, widget, data=None):
gobject.source_remove(self.timer)
self.timer = 0
gtk.main_quit()
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_resizable(True)
self.window.connect("destroy", self.destroy_progress)
self.window.set_title("ProgressBar")
self.window.set_border_width(0)
vbox = gtk.VBox(False, 5)
vbox.set_border_width(10)
self.window.add(vbox)
vbox.show()
# Create a centering alignment object
align = gtk.Alignment(0.5, 0.5, 0, 0)
vbox.pack_start(align, False, False, 5)
align.show()
# Create the ProgressBar
self.pbar = gtk.ProgressBar()
align.add(self.pbar)
self.pbar.show()
# Add a timer callback to update the value of the progress bar
self.timer = gobject.timeout_add (100, progress_timeout, self)
separator = gtk.HSeparator()
vbox.pack_start(separator, False, False, 0)
separator.show()
# rows, columns, homogeneous
table = gtk.Table(2, 2, False)
vbox.pack_start(table, False, True, 0)
table.show()
# Add a check button to select displaying of the trough text
check = gtk.CheckButton("Show text")
table.attach(check, 0, 1, 0, 1,
gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL,
5, 5)
check.connect("clicked", self.toggle_show_text)
check.show()
# Add a check button to toggle activity mode
self.activity_check = check = gtk.CheckButton("Activity mode")
table.attach(check, 0, 1, 1, 2,
gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL,
5, 5)
check.connect("clicked", self.toggle_activity_mode)
check.show()
# Add a check button to toggle orientation
check = gtk.CheckButton("Right to Left")
table.attach(check, 0, 1, 2, 3,
gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL,
5, 5)
check.connect("clicked", self.toggle_orientation)
check.show()
# Add a button to exit the program
button = gtk.Button("close")
button.connect("clicked", self.destroy_progress)
vbox.pack_start(button, False, False, 0)
# This makes it so the button is the default.
button.set_flags(gtk.CAN_DEFAULT)
# This grabs this button to be the default button. Simply hitting
# the "Enter" key will cause this button to activate.
button.grab_default ()
button.show()
self.window.show()
def main():
gtk.main()
return 0
if __name__ == "__main__":
ProgressBar()
main()
|
"""
This module tests element_agent.py
"""
import numpy as np
import sys
# Check the version of Python
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
import threading
import unittest
# agent and stream and helper_control are in ../core
from IoTPy.core.agent import Agent
from IoTPy.core.stream import Stream, StreamArray, _no_value, _multivalue, run
from IoTPy.core.helper_control import _close
# recent_values are in ../helper_functions
from IoTPy.helper_functions.recent_values import recent_values
from IoTPy.helper_functions.print_stream import print_stream
# op is in ../agent_types
from IoTPy.agent_types.op import map_element, map_element_f
from IoTPy.agent_types.op import filter_element, filter_element_f
from IoTPy.agent_types.op import map_list, map_list_f
from IoTPy.agent_types.op import timed_window
from IoTPy.agent_types.basics import fmap_e, map_e, sink_e, merge_e, merge_asynch
from IoTPy.agent_types.basics import merge_sink_e
#------------------------------------------------------------------------------------------------
# A SIMPLE EXAMPLE TEST
#------------------------------------------------------------------------------------------------
# This example is to illustrate the steps in the test.
# The later examples test several agents whereas this simple
# test only tests a single agent.
# The seven steps in this test may occur in different orders
# in the later tests.
class test_element(unittest.TestCase):
def test_example_1(self):
# Specify streams
x = Stream('x')
y = Stream('y')
# Specify encapsulated functions (if any)
def f(v): return 2*v
# Specify agents.
map_element(func=f, in_stream=x, out_stream=y)
# Execute a step
# Put test values in the input streams.
x.extend(list(range(3)))
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(y) == [0, 2, 4]
# Execute a step
# Put test values in the input streams.
x.extend([10, 20, 30])
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(y) == [0, 2, 4, 20, 40, 60]
# Execute a step
# Put test values in the input streams.
x.extend([0, -10])
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(y) == [0, 2, 4, 20, 40, 60, 0, -20]
def test_example_2(self):
# Specify streams
x = Stream('x')
y = Stream('y')
# Specify encapsulated functions (if any)
def f(v): return v < 3
# Specify agents.
filter_element(func=f, in_stream=x, out_stream=y)
# Execute a step
# Put test values in the input streams.
x.extend(list(range(5)))
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(y) == [0, 1, 2]
def test_example_3(self):
# Specify streams
x = Stream('x')
y = Stream('y')
# Specify encapsulated functions (if any)
def f(v, state):
final, prefinal = state
next_output = final + prefinal
# In the next state:
# prefinal becomes final
# final becomes next_output
next_state = next_output, final
return next_output, next_state
def g(v, divisor):
if v % divisor == 0:
return _no_value
else:
return v
# Specify agents.
map_element(func=f, in_stream=y, out_stream=x, state=(0, 1))
map_element(func=g, in_stream=x, out_stream=y, divisor=4)
# Execute a step
# Put test values in the input streams.
y.append(0)
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(x) == [1, 1, 2, 3, 5, 8]
# Execute a step
# Put test values in the input streams.
y.append(0)
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(x) == \
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
def test_example_4(self):
# Illustrates a cycle of agents and also shows use
# of a class within a wrapper.
# Specify network: streams, functions, agents
# (a) Specify streams
x = Stream('x')
y = Stream('y')
# (b) Specify encapsulated functions (if any)
def f(v, state):
final, prefinal = state
next_output = final + prefinal
# In the next state:
# prefinal becomes final
# final becomes next_output
next_state = next_output, final
return next_output, next_state
class G(object):
def __init__(self):
self.divisor = 4
def g(self, v):
if v % self.divisor == 0:
return _no_value
else:
return v
# (c) Specify agents.
encapsulator = G()
map_element(func=f, in_stream=y, out_stream=x, state=(0, 1))
map_element(func=encapsulator.g, in_stream=x, out_stream=y)
# Drive the network in steps.
# Execute a step
# Put test values in the input streams.
y.append(0)
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(x) == [1, 1, 2, 3, 5, 8]
# Execute a step after changing agent parameters
encapsulator.divisor = 2
# Put test values in the input streams.
y.append(0)
# Execute a step
run()
# Look at recent values of output streams.
assert recent_values(x) == \
[1, 1, 2, 3, 5, 8, 13, 21, 34]
def test_example_5(self):
# Fibonacci
# Illustrates use of a dict to save state.
# Specify network: streams, functions, agents
# (a) Specify streams
x = Stream('x')
y = Stream('y')
s = {'a':0, 'b':1}
# (b) Specify encapsulated functions (if any)
def f(v, s):
final, prefinal = s['a'], s['b']
post_final = final + prefinal
# In the next state:
# prefinal becomes final
# final becomes next_output
s['a'], s['b'] = post_final, final
return final
map_element(f, x, y, s=s)
x.extend(list(range(10)))
run()
assert recent_values(y) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
def test_example_6(self):
import numpy as np
# Fibonacci
# Illustrates use of a dict to save state.
# Specify network: streams, functions, agents
# (a) Specify streams
x = Stream('x')
y = Stream('y')
s = {'final':0, 'prefinal': 1}
# (b) Specify encapsulated functions (if any)
def f(v, s):
post_final = s['final'] + s['prefinal']
# In the next state:
# prefinal becomes final
# final becomes next_output
s['prefinal'] = s['final']
s['final'] = post_final
return s['prefinal']
map_element(f, x, y, s=s)
x.extend(list(range(10)))
run()
assert recent_values(y) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
def test_1(self):
# From map_element_examples
x = Stream('x')
y = Stream('y')
def f(in_stream_element):
out_stream_element = 2*in_stream_element
return out_stream_element
map_element(func=f, in_stream=x, out_stream=y)
x.extend(list(range(5)))
run()
assert recent_values(y) == [0, 2, 4, 6, 8]
def test_2(self):
# From map_element_examples
x = Stream('x')
y = Stream('y')
def multiply_and_add(
in_stream_element, multiplicand, addend):
out_stream_element = \
multiplicand*in_stream_element + addend
return out_stream_element
map_element(func=multiply_and_add, in_stream=x, out_stream=y,
multiplicand=2, addend=10)
x.extend(list(range(5)))
run()
assert recent_values(y) == [10, 12, 14, 16, 18]
def test_3(self):
# From map_element_examples
x = Stream('x')
y = Stream('y')
# In this example, the output stream is the same as the input stream
# except that only values that are less than the threshold are passed
# through to the output stream. Here threshold is a keyword argument
def f(in_stream_element, threshold):
if in_stream_element < threshold:
out_stream_element = in_stream_element
else:
out_stream_element = _no_value
return out_stream_element
map_element(func=f, in_stream=x, out_stream=y, threshold=5)
x.extend(list(range(20)))
run()
assert recent_values(y) == [0, 1, 2, 3, 4]
# If x is [0, 1, 2, 3, 4,....20] then y is [0, 1, 2, 3, 4]
def test_4(self):
# From map_element_examples
x = Stream('x')
y = Stream('y')
def f(in_stream_element):
x, y = in_stream_element
if x > 5 and y > 5:
out_stream_element = _multivalue((x,y))
elif x > 5:
out_stream_element = x
elif y > 5:
out_stream_element = y
else:
out_stream_element = _no_value
return out_stream_element
map_element(func=f, in_stream=x, out_stream=y)
x.extend( [(10, 10), (2, 20), (30, 3), (4, 4), (1, 3), (60, 70)] )
run()
assert recent_values(y) == [10, 10, 20, 30, 60, 70]
#------------------------------------------------------------------------------------------------
# ELEMENT AGENT TESTS
#------------------------------------------------------------------------------------------------
def test_element_simple(self):
# SPECIFY STREAMS
m = Stream('m')
n = Stream('n')
o = Stream('o')
q = Stream('q')
r = Stream('r')
s = Stream('s')
t = Stream('t')
u = Stream('u')
v = Stream('v')
w = Stream('w')
x = Stream('x')
y = Stream('y')
z = Stream('z')
#----------------------------------------------------------------
# Test simple map using map_element
# func operates on an element of the input stream and returns an element of
# the output stream.
# SPECIFY ENCAPSULATED FUNCTIONS (IF ANY)
def double(v): return 2*v
# SPECIFY AGENTS
a = map_element(func=double, in_stream=x, out_stream=y, name='a')
ymap = map_element_f(func=double, in_stream=x)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test filtering
def filtering(v): return v <= 2
# yfilter is a stream consisting of those elements in stream x with
# values less than or equal to 2.
# The elements of stream x that satisfy the boolean, filtering(), are
# passed through.
yfilter = filter_element_f(func=filtering, in_stream=x)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test map with state using map_element
# func operates on an element of the input stream and state and returns an
# element of the output stream and the new state.
def f(x, state):
return x+state, state+2
b = map_element(func=f, in_stream=x, out_stream=z, state=0, name='b')
bmap = map_element_f(func=f, in_stream=x, state=0)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test map with call streams
# The agent executes a state transition when a value is added to call_streams.
c = map_element(func=f, in_stream=x, out_stream=v, state=10,
call_streams=[w], name='c')
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test _no_value
# func returns _no_value to indicate that no value
# is placed on the output stream.
def f_no_value(v):
""" Filters out odd values
"""
if v%2:
# v is odd. So filter it out.
return _no_value
else:
# v is even. So, keep it in the output stream.
return v
no_value_stream = Stream(name='no_value_stream')
no_value_agent = map_element(
func=f_no_value, in_stream=x, out_stream=no_value_stream,
name='no_value_agent')
no_value_map = map_element_f(func=f_no_value, in_stream=x)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test _multivalue
# func returns _multivalue(output_list) to indicate that
# the list of elements in output_list should be placed in the
# output stream.
def f_multivalue(v):
if v%2:
return _no_value
else:
return _multivalue([v, v*2])
multivalue_stream = Stream('multivalue_stream')
multivalue_agent = map_element(
func=f_multivalue, in_stream=x, out_stream=multivalue_stream,
name='multivalue_agent')
multivalue_map = map_element_f(func=f_multivalue, in_stream=x)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test map_element with args
def function_with_args(x, multiplicand, addition):
return x*multiplicand+addition
## EXPLANATION FOR agent BELOW
## agent_test_args = map_element(
## func=function_with_args, in_stream = x, out_stream=r,
## state=None, call_streams=None, name='agent_test_args',
## multiplicand=2, addition=10)
agent_test_args = map_element(
function_with_args, x, r,
None, None, 'agent_test_args',
2, 10)
stream_test_args = map_element_f(function_with_args, x, None, 2, 10)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test map_element with kwargs
agent_test_kwargs = map_element(
func=function_with_args, in_stream = x, out_stream=u,
state=None, call_streams=None, name='agent_test_kwargs',
multiplicand=2, addition=10)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test map_element with state and kwargs
# func operates on an element of the input stream and state and returns an
# element of the output stream and the new state.
def f_map_args_kwargs(u, state, multiplicand, addend):
return u*multiplicand+addend+state, state+2
agent_test_kwargs_and_state = map_element(
func=f_map_args_kwargs, in_stream=x, out_stream=s,
state=0, name='agent_test_kwargs_and_state',
multiplicand=2, addend=10)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test map_element with state and args
aa_map_args_agent = map_element(
f_map_args_kwargs, x, t,
0, None, 'aa_map_args_agent',
2, 10)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test filter_element
def is_even_number(v):
return not v%2
filter_element(func=is_even_number, in_stream=x, out_stream=q)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test filter_element with state
def less_than_n(v, state):
return v <= state, state+1
x0 = Stream('x0')
q0 = Stream('q0')
# state[i] = i
# Pass through elements in x0 where x0[i] <= state[i]
filter_element(
func=less_than_n, in_stream=x0, out_stream=q0, state=0)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test filter_element_stream
# p is a stream consisting of odd-numbered elements of x
# Even-numbered elements are filtered out.
p = filter_element_f(is_even_number, x)
#----------------------------------------------------------------
#----------------------------------------------------------------
# Test cycles in the module connection graph
filter_element(func=lambda v: v <= 5, in_stream=o, out_stream=n)
map_element(func=lambda v: v+2, in_stream=n, out_stream=o)
#----------------------------------------------------------------
#----------------------------------------------------------------
# PUT TEST VALUES INTO INPUT STREAMS
#----------------------------------------------------------------
# Put test values into streams x, x0 and n.
x.extend(list(range(3)))
x0.extend([0, 1, 3, 3, 6, 8])
n.append(0)
# STEP 6: EXECUTE A STEP OF THE SCHEDULER
run()
# STEP 7: LOOK AT OUTPUT STREAMS
assert recent_values(x) == [0, 1, 2]
assert recent_values(y) == [0, 2, 4]
assert recent_values(q0) == [0, 1, 3]
assert recent_values(ymap) == recent_values(y)
assert recent_values(yfilter) == [0, 1, 2]
assert recent_values(z) == [0, 3, 6]
assert recent_values(bmap) == recent_values(z)
assert recent_values(v) == []
assert recent_values(no_value_stream) == [0, 2]
assert recent_values(no_value_map) == recent_values(no_value_stream)
assert recent_values(multivalue_stream) == [0, 0, 2, 4]
assert recent_values(multivalue_map) == recent_values(multivalue_stream)
assert recent_values(r) == [10, 12, 14]
assert recent_values(stream_test_args) == recent_values(r)
assert recent_values(u) == recent_values(r)
assert recent_values(s) == [10, 14, 18]
assert recent_values(s) == recent_values(t)
assert recent_values(q) == [0, 2]
assert recent_values(q) == recent_values(p)
assert recent_values(n) == [0, 2, 4]
assert recent_values(o) == [2, 4, 6]
#----------------------------------------------------------------
#----------------------------------------------------------------
x.extend(list(range(3, 5, 1)))
run()
assert recent_values(x) == [0, 1, 2, 3, 4]
assert recent_values(y) == [0, 2, 4, 6, 8]
assert recent_values(ymap) == recent_values(y)
assert recent_values(yfilter) == [0, 1, 2]
assert recent_values(z) == [0, 3, 6, 9, 12]
assert recent_values(bmap) == recent_values(z)
assert recent_values(no_value_stream) == [0, 2, 4]
assert recent_values(no_value_map) == recent_values(no_value_stream)
assert recent_values(multivalue_stream) == [0, 0, 2, 4, 4, 8]
assert recent_values(multivalue_map) == recent_values(multivalue_stream)
assert recent_values(r) == [10, 12, 14, 16, 18]
assert recent_values(stream_test_args) == recent_values(r)
assert recent_values(u) == recent_values(r)
assert recent_values(s) == [10, 14, 18, 22, 26]
assert recent_values(s) == recent_values(t)
assert recent_values(q) == [0, 2, 4]
assert recent_values(q) == recent_values(p)
#----------------------------------------------------------------
#----------------------------------------------------------------
w.append(0)
run()
assert recent_values(x) == [0, 1, 2, 3, 4]
assert recent_values(y) == [0, 2, 4, 6, 8]
assert recent_values(ymap) == recent_values(y)
assert recent_values(yfilter) == [0, 1, 2]
assert recent_values(z) == [0, 3, 6, 9, 12]
assert recent_values(bmap) == recent_values(z)
assert recent_values(v) == [10, 13, 16, 19, 22]
assert recent_values(no_value_stream) == [0, 2, 4]
assert recent_values(no_value_map) == recent_values(no_value_stream)
assert recent_values(multivalue_stream) == [0, 0, 2, 4, 4, 8]
assert recent_values(multivalue_map) == recent_values(multivalue_stream)
assert recent_values(r) == [10, 12, 14, 16, 18]
assert recent_values(stream_test_args) == recent_values(r)
assert recent_values(u) == recent_values(r)
assert recent_values(s) == [10, 14, 18, 22, 26]
assert recent_values(s) == recent_values(t)
assert recent_values(q) == [0, 2, 4]
assert recent_values(q) == recent_values(p)
#----------------------------------------------------------------
#------------------------------------------------------------------------------------------------
# ELEMENT AGENT TESTS FOR STREAM ARRAY
#------------------------------------------------------------------------------------------------
import numpy as np
m = StreamArray('m')
n = StreamArray('n')
o = StreamArray('o')
map_element(func=np.sin, in_stream=m, out_stream=n)
filter_element(func=lambda v: v <= 0.5, in_stream=n, out_stream=o)
input_array = np.linspace(0.0, 2*np.pi, 20)
m.extend(input_array)
run()
expected_output = np.sin(input_array)
assert np.array_equal(recent_values(n), expected_output)
expected_output = expected_output[expected_output <= 0.5]
assert np.array_equal(recent_values(o), expected_output)
return
def test_timed_window(self):
x = Stream('x')
y = Stream('y')
def f(v): return v
timed_window(
func=f, in_stream=x, out_stream=y,
window_duration=10, step_time=10)
x.extend([(1, 'a'), (8, 'b'), (12, 'c')])
run()
assert(recent_values(y) == [(10, [(1, 'a'), (8, 'b')])])
x.extend([(14, 'd'), (36, 'e'), (43, 'g'), (75, 'h')])
run()
assert(recent_values(y) == [(10, [(1, 'a'), (8, 'b')]),
(20, [(12, 'c'), (14, 'd')]),
(40, [(36, 'e')]), (50, [(43, 'g')])])
x.extend([(79, 'i'), (101, 'j')])
run()
assert(recent_values(y) == [
(10, [(1, 'a'), (8, 'b')]), (20, [(12, 'c'), (14, 'd')]),
(40, [(36, 'e')]), (50, [(43, 'g')]), (80, [(75, 'h'), (79, 'i')])])
return
def test_map_list(self):
scheduler = Stream.scheduler
x = Stream('x')
y = Stream('y')
z = Stream('z')
w = Stream('w')
map_list(func = lambda v: v, in_stream=x, out_stream=y)
def f(lst):
return list(filter(lambda v: v%2, lst))
def g(lst):
return [v*2 if v%2 else v/2 for v in lst]
map_list(f, x, z)
map_list(g, x, w)
x_values = list(range(10))
x.extend(x_values)
run()
assert recent_values(y) == recent_values(x)
assert recent_values(z) == f(x_values)
assert recent_values(w) == g(x_values)
def test_stream_arrays_2(self):
"""
Example where the input stream of an agent is a stream array and
its output stream is not a stream array.
"""
x = StreamArray(name='x', dimension=3, dtype=float)
y = Stream()
map_element(func=np.median, in_stream=x, out_stream=y)
x.append(np.array([1., 2., 3.]))
run()
assert y.recent[:y.stop] == [2.0]
x.extend(np.array([[4., 5., 6.], [7., 8., 9.]]))
run()
assert y.recent[:y.stop] == [2.0, 5.0, 8.0]
def test_class(self):
class example(object):
def __init__(self, multiplicand):
self.multiplicand = multiplicand
self.running_sum = 0
def step(self, v):
result = v * self.multiplicand + self.running_sum
self.running_sum += v
return result
x = Stream()
y = Stream()
eg = example(multiplicand=2)
map_element(func=eg.step, in_stream=x, out_stream=y)
x.extend(list(range(5)))
run()
assert y.recent[:y.stop] == [0, 2, 5, 9, 14]
def test_halt_agent(self):
def double(v): return v*2
x = Stream('x')
y = Stream('y')
a = map_element(func=double, in_stream=x, out_stream=y)
x.extend(list(range(5)))
run()
assert recent_values(y) == [0, 2, 4, 6, 8]
a.halt()
run()
assert recent_values(y) == [0, 2, 4, 6, 8]
x.extend(list(range(10,15)))
run()
assert recent_values(y) == [0, 2, 4, 6, 8]
assert recent_values(x) == list(range(5)) + list(range(10,15))
##
## # What follows is nondeterministic and so may fail
## # the test.
## a.restart()
## run()
## assert recent_values(y) == [0, 2, 4, 6, 8]
## run()
## assert recent_values(y) == [0, 2, 4, 6, 8]
## x.extend(list(range(100,101)))
## run()
## assert recent_values(y) == [
## 0, 2, 4, 6, 8, 20, 22, 24, 26, 28, 200]
def test_initial_value(self):
def double(v): return v*2
x = Stream('x')
y = Stream(name='y', initial_value=[0]*5)
a = map_element(func=double, in_stream=x, out_stream=y)
x.extend(list(range(5)))
run()
assert recent_values(y) == [0]*5 + [0, 2, 4, 6, 8]
def test_multiple_relations(self):
def double(v): return v*2
def add10(v): return v+10
x = Stream('x')
y = Stream('y')
z = Stream('z')
a = map_element(func=add10, in_stream=z, out_stream=y)
b = map_element(func=double, in_stream=x, out_stream=y)
c = map_element(func=double, in_stream=x, out_stream=y)
x.extend(list(range(5)))
z.extend(list(range(100, 106)))
run()
## # Nondeterministic.
## assert recent_values(y) == [
## 0, 2, 4, 6, 8, 0, 2, 4, 6, 8,
## 110, 111, 112, 113, 114, 115]
def test_multiple_relations_2(self):
@map_e
def double(v): return v*2
x = Stream('x', [10, 11])
y = Stream('y')
double(x, y)
double(x, y)
x.extend(list(range(5)))
run()
## # Nondeterministic.
## assert recent_values(y) == [
## 0, 2, 4, 6, 8, 0, 2, 4, 6, 8,
## 110, 111, 112, 113, 114, 115]
def test_multiple_functions(self):
@fmap_e
def double(v): return v*2
@fmap_e
def add10(v): return v+10
x = Stream('x')
y = double(x)
y = add10(x)
x.extend(list(range(5)))
run()
assert recent_values(y) == [10, 11, 12, 13, 14]
def test_class(self):
class C(object):
def __init__(self):
return
def f(self, value):
if value > 0:
return self.pos(value)
else:
return self.neg(value)
def pos(self, value):
return value * value
def neg(self, value):
return value + value
s = Stream('s')
t = Stream('t')
c = C()
@map_e
def g(v): return c.f(v)
g(in_stream=s, out_stream=t)
s.extend(list(range(-4, 4)))
run()
assert (recent_values(t) == [
-8, -6, -4, -2, 0, 1, 4, 9])
def test_None_in_stream(self):
x = Stream('x', discard_None=False)
y = Stream(name='y', discard_None=False)
z = Stream(name='z')
map_element(lambda v: v, x, y)
map_element(lambda v: v, x, z)
x.extend([0, None, 1, None, 2, _no_value, 3])
run()
assert (recent_values(y) == [0, None, 1, None, 2, 3])
assert (recent_values(z) == [0, 1, 2, 3])
def test_pass_parameter(self):
result = []
def f(v, state, result):
state += v
result.append(state)
return v, state
x = Stream('x')
y = Stream('y')
map_element(func=f, in_stream=x, out_stream=y, state=0, result=result)
x.extend(list(range(5)))
run()
assert result == [0, 1, 3, 6, 10]
def count_pos_and_non_pos(self, count, lst):
"""
Parameters
----------
count : list
list with at least 2 elements
lst : list
The input list
Return
------
count[0]: number of non-positive values
in lst
count[1]: number of positive values in
lst.
"""
@sink_e
def f(v, count):
if v > 0: count[1] += 1
else: count[0] += 1
x = Stream('x')
f(x, count=count)
x.extend(lst)
run()
def test_count_pos_and_non_pos(self):
count = [0, 0]
lst = [-2, -1, 0, 1, 2]
self.count_pos_and_non_pos(count, lst)
assert count == [3, 2]
def test_thread_1(self):
def thread_target(q_in, q_out, finished):
@sink_e
def f(w): q_out.put(w*2)
x = Stream('x')
f(x)
while True:
v = q_in.get()
if v == finished:
break
x.append(v)
run()
return
q_in = queue.Queue()
q_out = queue.Queue()
finished = 'finished'
thr = threading.Thread(target=thread_target, args=(q_in, q_out, finished))
thr.start()
# Put data into input queue
N = 5
for i in range(N): q_in.put(i)
q_in.put(finished)
thr.join()
# Assert contents of the output queue.
output_list = []
while not q_out.empty():
output_list.append(q_out.get())
assert output_list == [i*2 for i in range(N)]
return
def thread_target(self, q_in, q_out, streams, finished):
name_to_stream = {}
for s in streams:
name_to_stream[s.name] = s
while True:
v = q_in.get()
if v == finished:
q_out.put(finished)
break
s_name, value = v
s = name_to_stream[s_name]
s.append(value)
run()
def test_thread_2(self):
@merge_sink_e
def f(list_of_elements, q_out):
q_out.put(sum(list_of_elements))
x = Stream('x')
y = Stream('y')
q_in = queue.Queue()
q_out = queue.Queue()
f([x, y], q_out=q_out)
streams = [x, y]
finished = _close
thr = threading.Thread(target=self.thread_target,
args=(q_in, q_out, streams, finished))
thr.start()
# Put data into input queue
q_in.put(('x', 1))
q_in.put(('y', 100))
q_in.put(finished)
output = []
while True:
w = q_out.get()
if w == finished:
break
else:
output.append(w)
thr.join()
assert output == [101]
def square_and_count_pos_and_non_pos(self, count, input_list):
@map_e
def f(v, count):
if v > 0: count[1] += 1
else: count[0] += 1
return v*v
x = Stream('x')
y = Stream('y')
f(in_stream=x, out_stream=y, count=count)
x.extend(input_list)
run()
return recent_values(y)
def test_square_and_count_pos_and_non_pos(self):
count = [0, 0]
input_list = [1, -1, 2, 3, -4]
y_values = self.square_and_count_pos_and_non_pos(count, input_list)
assert count == [2, 3]
assert y_values == [1, 1, 4, 9, 16]
def test_filter_number_1(self):
input_list = list(range(-5, 5))
in_stream = Stream('input')
out_stream = Stream('output')
def f(v): return v > 0
filter_element(f, in_stream, out_stream)
in_stream.extend(input_list)
run()
assert recent_values(out_stream) == list(filter(f, input_list))
def test_filter_number_2(self):
in_stream = Stream('input')
out_stream = Stream('output')
def f(v, threshold):
# v is an element of the input stream
# threshold is a constant
return v > threshold
# Create an agent by encapsulating f.
filter_element(f, in_stream, out_stream, threshold=10)
input_list = list(range(-20, 20))
in_stream.extend(input_list)
run()
assert recent_values(out_stream) == list(range(11, 20))
def test_filter_number_3(self):
in_stream = Stream('input')
out_stream = Stream('output')
def f(v, w):
# w is the current state.
# v is the current element of the input stream.
# Returns: (1) a boolean: v <= w
# If this boolean is true then
# v appears in the output.
# (2) the next state, v.
# The next state is the current input, v.
# Keeps those elements in the stream that are
# decreasing
return v < w, v
# Create an agent with an initial state of 0
filter_element(f, in_stream, out_stream, state=0)
input_list = [10, 9, 5, 7, 8, 12, 11, 3, 6, 9]
in_stream.extend(input_list)
run()
assert recent_values(out_stream) == [9, 5, 11, 3]
def test_filter_number_4(self):
in_stream = Stream('input')
out_stream = Stream('output')
def f(v, w, increase):
# v is an element of the input stream
# w is the current state
# increase is a constant parameter specified in the
# call that creates the agent
# return (1) a Boolean and (2) the next state.
# In this example, the next state is the current input v.
return v < w*increase, v
# Create an agent by encapsulating function f. This agent
# has an initial state of 0, and a parameter, increase, with constant value 1.01
filter_element(f, in_stream, out_stream, state=0, increase=1.01)
input_list = [10, 10, 9, 200, 201, 200, 202, 203, 1, 0, 2]
in_stream.extend(input_list)
run()
assert recent_values(out_stream) == [10, 9, 201, 200, 203, 1, 0]
if __name__ == '__main__':
unittest.main()
|
# STRING DATA TYPE
# strings are arrays of bytes representing unicode characters
s1="text" # string is created with double quotation marks
s2='this is also a string' # single quotation marks are the same thing
s3='abc123!# "/(@£$.. ""abc123ABC' # strings can contain any characters
s4="""one liner""" # three " or '-characters indicates a multiline string
s5= """line1
line2
line3
the last line is here""" # a multiline string can cover multiple lines
# individual characters are accessed by indexing with square brackets
a = "text" # string that contains 4 characters
a[0] # the indexing start from zero, similar to lists and tuples
print(a[0])
print(a[1])
print(a[2])
print(a[3])
print(len(a)) # len() built-in function returns the number of characters in a string
a[0] # the index 0 is always the first element of a string...
a[len(a)-1] # ...so the index len(a)-1 is always the last element
a[-1] # negative indexing means counting elements from the end
print(a[-1]) # negative indices start from -1
print(a[-2])
print(a[-3])
print(a[-4])
# notice that a[-1] is always the same element as a[len(a)-1]
# strings are unchangeable, so the elements cannot be changed
# a[0]=6 this would create an error, because strings are unchangeable
# SLICING
# characters of strings can be taken with square brackets [] and an index
b = "Hello, World!"
print(b[2:5]) # elements from index 2 to 4 (does not include 5)
print(b[:5]) # elements from index 0 to 4 (does not include 5)
print(b[2:]) # elements from index 2 to the last
print(b[:]) # all the elements
print(b[-5:-2]) # elements from index -5 to -3 (does not include -2)
print(b[:-1]) # elements from index 0 to -2 (does not include -1)
print(b[-4:]) # elements from index -4 to -1
# indices can also be applied straight into a string literal
print("Hello, World!"[2:5])
# STRING OPERATORS: + and *
a = "Hello"
b = "World"
c = a + b # plus a+b operator concatenates strings a and b
print(c)
c = a + " " + b + "!"
print(c)
c = 3*a # times n*a operator concatenates string a for n times
print(c)
# ESCAPE CHARACTER: \
# special characters can be inserted into strings with \ operator
txt = "Hello \bWorld!" # backspace
print(txt)
txt = "Hello\tWorld!" # tab
print(txt)
txt = "Hello\rWorld!" # carriage return
print(txt)
txt = "Hello\nWorld!" # new line
print(txt)
txt = "This will insert one \\ (backslash)." # backslash
print(txt)
txt = "This will NOT insert new line \\n."
print(txt)
txt = 'It\'s alright.' # single quote
print(txt)
txt = 'It works for \" also.' # double quote
print(txt)
txt = "\x48\x65\x6c\x6c\x6f" # hexadecimal
print(txt)
txt = "\110\145\154\154\157" # octal
print(txt)
txt = "This is a full block character: \u2588" # unicode characters
print(txt)
# raw strings are not formatted in any way
txt = r"new line \n will not appear" # letter r before the string
print(txt)
# FORMATTING OPERATORS: %, {} and f-strings
# old % operator
txt = "String %s will be replaced." %"with this" # s is format character for strings
print(txt)
name = "John"
age = 23
txt = "%s is %d years old." % (name, age) # s is for strings, d for integers
print(txt)
# txt = "%s is %d years old." % (name, name) # this would create an error
txt = "Hey %(name)s, you are %(age).3f years old!" %{"age": 25,"name": name}
print(txt)
# Python has at least the following format characters:
# %s - Strings (or any object with a string representation, like numbers)
# %d - Integers
# %f - Floating point numbers
# %.<number of digits>f - Float with a given amount of digits
# new {} operator
quantity = 3
itemno = 567
price = 49.95
txt = "I want {} pieces of item {} for {:.1f} dollars."
print(txt.format(quantity, itemno, price))
txt = "I want to pay {2} dollars for {0} pieces of item {1}."
print(txt.format(quantity, itemno, price))
txt = "I have a {carname}, it is a {model}."
print(txt.format(carname = "Ford", model = "Mustang"))
# Formatted string literals = f-strings (from Python 3.6 forwards)
a = 5
b = 10
txt = f"Five plus ten is {a + b} and not {2 * (a + b)}."
print(txt) # f at the beginning does the same thing as format()-function
name = "John"
age = 23
txt = f"Hey {name}, you are {age:.1f} years old!"
print(txt)
# STRING METHODS
# there are a lot of methods for strings, below is just a few of them
# notice that the methods do not change the original string, but return a new one
a = "Hello, World!"
b = a.upper() # every character into upper case
print(b)
c = a.lower() # every character into lower case
print(c)
print(a) # the string a itself is still the original
a = " Hello, World! "
d = a.strip() # strips spaces from the beginning and end
print(d) # prints "Hello, World!"
a = "Hello, World!"
e = a.replace("H", "J") # replaces all the H letters with J
print(e) # prints "Jello, World!"
f = a.split(",") # splits the string from the , characters
print(f) # prints ['Hello', ' World!']
f[0]="new string" # f is now a list, and we can change the elements
print(f)
# all the methods can also be applied straight into a string literal
print("word!".upper())
|
#!/usr/bin/python
# coding: utf-8
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, distribute with modifications, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name(s) of the above copyright
# holders shall not be used in advertising or otherwise to promote the
# sale, use or other dealings in this Software without prior written
# authorization.
# Imports
# -------
from __future__ import print_function
import pandas as pd
import numpy as np
import math
import sys
BASE_PHOTONS = 19100 # photoelectrons per mm^2 and second of a magnitude 0 G2 star
# Geometric Transformations
# -------------------------
def angles_to_vector(azimuth, altitude):
"""Transforms azimuth altitude representation to unit-length vectors."""
caz = np.cos(azimuth)
saz = np.sin(azimuth)
cal = np.cos(altitude)
sal = np.sin(altitude)
x = caz * cal
y = saz * cal
z = sal
return np.array([x, y, z]).transpose()
def vector_to_angles(vectors):
"""Transforms unit-length vectors to the azimuth altitude representation."""
x, y, z = split_vectors(vectors)
az = np.arctan2(y, x)
alt = np.arcsin(z)
return np.array([az, alt])
def split_vectors(vectors):
"""Splits vectors into their x, y and z components."""
return vectors[..., 0], vectors[..., 1], vectors[..., 2]
def randomVectors(num):
"""Generates `num` random three dimensional unit-length vectors."""
rands = np.random.rand(num, 2)
ca = np.cos(2. * np.pi * rands[:, 0])
sa = np.sin(2. * np.pi * rands[:, 0])
z = rands[:, 1] * 2 - 1
r = np.sqrt(1 - z * z)
return np.vstack([r * ca, r * sa, z]).transpose()
def random_matrix():
"""Generates a random 3x3 orientation matrix.
Based on <NAME>'s algorithm from Graphics Gems III, pages 117-120"""
rands = np.random.rand(3)
ca = np.cos(2. * np.pi * rands[0])
sa = np.sin(2. * np.pi * rands[0])
R = ([[ca, sa, 0],
[-sa, ca, 0],
[0, 0, 1.]])
cb = np.cos(2 * np.pi * rands[1])
sb = np.sin(2 * np.pi * rands[1])
sc = np.sqrt(rands[2])
mc = np.sqrt(1 - rands[2])
v = np.array([[cb * sc, sb * sc, mc]])
H = np.eye(3) - 2 * np.dot(v.transpose(), v)
matrix = -np.dot(H, R)
return matrix
def add_vector_noise(base_vectors, stddev):
"""Adds Gaussian noise to a list of vectors."""
num = len(base_vectors)
v_r = randomVectors(num)
v_r = np.cross(base_vectors, v_r)
v_r /= np.linalg.norm(v_r, axis=1).reshape((-1, 1))
v_r = base_vectors + stddev * (np.random.randn(num, 1) * v_r)
return v_r / np.linalg.norm(v_r, axis=1).reshape((-1, 1))
# Camera
# ------
class Camera(object):
def __init__(self, resolution, pixel_ar = 1, principal_point = (0.5, 0.5)):
self.resolution = resolution
self.pixel_ar = pixel_ar
self.principal_point = principal_point
def from_angles(self, azimuth, altitude):
"""Transforms camera-relative azimuth altitude information to pixel coordinates."""
res_x, res_y = self.resolution
pp_x, pp_y = self.principal_point
ar = self.pixel_ar * res_x / res_y
theta = np.pi / 2 - altitude
# x-axis flip: note that we look in the positive z-axis direction,
# so in a right-handed coordinate system the x-axis goes to the left,
# but we want it to go to the right, so we flip the x-axis
# the y-axis still goes up
# this equals transforming the azimuth as follows:
alpha = np.pi - azimuth
r = self.project(theta)
x = ( np.cos(alpha) * r + pp_x) * res_x
y = (ar * np.sin(alpha) * r + pp_y) * res_y
return np.array((y, x)).transpose()
def to_angles(self, pos):
"""Transforms pixel coordinates to camera-relative azimuth altitude information."""
res_x, res_y = self.resolution
pp_x, pp_y = self.principal_point
ar = self.pixel_ar * res_x / res_y
x = pos[:, 1]
y = pos[:, 0]
x = (x / res_x - pp_x)
y = (y / res_y - pp_y) / ar
az = np.pi - np.arctan2(y, x)
r = np.sqrt(x ** 2 + y ** 2)
theta = self.unproject(r)
alt = np.pi / 2 - theta
return az, alt
class EquidistantCamera(Camera):
def __init__(self, f, resolution, pixel_ar = 1., principal_point = (0.5, 0.5)):
self.f = f
super(EquidistantCamera, self).__init__(resolution, pixel_ar, principal_point)
def project(self, theta):
return theta * self.f
def unproject(self, r):
return r / self.f
class RectilinearCamera(Camera):
def __init__(self, f, resolution, pixel_ar = 1., principal_point = (0.5, 0.5)):
self.f = f
super(RectilinearCamera, self).__init__(resolution, pixel_ar, principal_point)
def project(self, theta):
result = np.tan(theta) * self.f
result[theta > np.pi / 2.] = 1.e9
return result
def unproject(self, r):
return np.arctan(r / self.f)
class OrthographicCamera(Camera):
def __init__(self, f, resolution, pixel_ar = 1., principal_point = (0.5, 0.5)):
self.f = f
super(OrthographicCamera, self).__init__(resolution, pixel_ar, principal_point)
def project(self, theta):
result = np.sin(theta) * self.f
result[theta > np.pi / 2.] = 1.e9
return result
def unproject(self, r):
return np.arcsin(r / self.f)
class EquisolidAngleCamera(Camera):
def __init__(self, f, resolution, pixel_ar = 1., principal_point = (0.5, 0.5)):
self.f = f
super(EquisolidAngleCamera, self).__init__(resolution, pixel_ar, principal_point)
def project(self, theta):
result = np.sin(theta / 2.) * 2. * self.f
result[theta > np.pi] = 1.e9
return result
def unproject(self, r):
return 2. * np.arcsin(r / (2. * self.f))
class StereographicCamera(Camera):
def __init__(self, f, resolution, pixel_ar = 1., principal_point = (0.5, 0.5)):
self.f = f
super(StereographicCamera, self).__init__(resolution, pixel_ar, principal_point)
def project(self, theta):
result = np.tan(theta / 2.) * 2. * self.f
result[theta > np.pi] = 1.e9
return result
def unproject(self, r):
return 2. * np.arctan(r / (2. * self.f))
class CubicCamera(Camera):
def __init__(self, k1, k2, resolution, pixel_ar = 1., principal_point = (0.5, 0.5)):
self.k1 = k1
self.k2 = k2
super(CubicCamera, self).__init__(resolution, pixel_ar, principal_point)
def project(self, theta):
return self.k1 * theta + self.k2 * theta ** 3
def unproject(self, r):
# 0 = a * theta ^ 3 + c * theta + d
a = self.k2
c = self.k1
d = -r
if a == 0:
theta = r/c
return theta
delta_0 = -3. * a * c
delta_1 = 27. * a * a * d
if a < 0:
C = ((delta_1 + np.sqrt(np.complex64(delta_1 ** 2 - 4. * delta_0 ** 3))) / 2.) ** (1. / 3.)
theta = np.real(-1. / (3. * a) * (C + delta_0 / (C)))
else:
C = ((delta_1 + np.sqrt(delta_1 ** 2 - 4. * delta_0 ** 3)) / 2) ** (1. / 3.)
theta = -1. / (3. * a) * (C + delta_0 / (C))
return theta
# Star Catalog
# ------------
class StarCatalog:
def __init__(self, filename='hip_main.dat'):
if filename is not None:
self.read(filename)
self.preprocess()
def preprocess(self):
filter_index = np.logical_not(np.logical_or(np.isnan(self.catalog['RAdeg']), np.isnan(self.catalog['Vmag'])))
self.catalog = self.catalog[filter_index]
self.star_vectors = angles_to_vector(np.deg2rad(self.catalog['RAdeg']), np.deg2rad(self.catalog['DEdeg']))
self.magnitudes = self.catalog['Vmag'].values
# alternative magnitude
#VT = self.catalog['VTmag']
#BT = self.catalog['BTmag']
## http://www.aerith.net/astro/color_conversion.html
## http://ads.nao.ac.jp/cgi-bin/nph-bib_query?bibcode=2002AJ....124.1670M&db_key=AST&high=3d1846678a19297
#self.magnitudes = VT + 0.00097 - 0.1334 * (BT - VT) + 0.05486 * (BT - VT) ** 2 - 0.01998 * (BT - VT) ** 3
# randomly fill out missing magnitudes
#magnitudes = self.catalog['Vmag'].values
#nans = np.isnan(magnitudes)
#magnitudes[nans] = np.random.choice(magnitudes[~nans], np.sum(nans))
#self.catalog['Vmag'] = magnitudes
def lookup_indices(self, indices):
result = indices.copy()
result[indices >= 0] = self.catalog['HIP'].iloc[indices[indices >= 0]]
return result
def read(self, filename='hip_main.dat'):
"""Loads the Hipparchos star catalog."""
columns = [
"Catalog",
"HIP",
"Proxy",
"RAhms",
"DEdms",
"Vmag",
"VarFlag",
"r_Vmag",
"RAdeg",
"DEdeg",
"AstroRef",
"Plx",
"pmRA",
"pmDE",
"e_RAdeg",
"e_DEdeg",
"e_Plx",
"e_pmRA",
"e_pmDE",
"DE:RA",
"Plx:RA",
"Plx:DE",
"pmRA:RA",
"pmRA:DE",
"pmRA:Plx",
"pmDE:RA",
"pmDE:DE",
"pmDE:Plx",
"pmDE:pmRA",
"F1",
"F2",
"---",
"BTmag",
"e_BTmag",
"VTmag",
"e_VTmag",
"m_BTmag",
"B-V",
"e_B-V",
"r_B-V",
"V-I",
"e_V-I",
"r_V-I",
"CombMag",
"Hpmag",
"e_Hpmag",
"Hpscat",
"o_Hpmag",
"m_Hpmag",
"Hpmax",
"HPmin",
"Period",
"HvarType",
"moreVar",
"morePhoto",
"CCDM",
"n_CCDM",
"Nsys",
"Ncomp",
"MultFlag",
"Source",
"Qual",
"m_HIP",
"theta",
"rho",
"e_rho",
"dHp",
"e_dHp",
"Survey",
"Chart",
"Notes",
"HD",
"BD",
"CoD",
"CPD",
"(V-I)red",
"SpType",
"r_SpType",
]
self.catalog = pd.read_csv(filename, sep='|', names=columns, skipinitialspace=True)
# Star Detector
# -----
class StarDetector:
def __init__(self, sigma_psf, t_exp, aperture, base_flux):
self.sigma_psf = sigma_psf
self.t_exp = t_exp
self.aperture = aperture
self.base_flux = base_flux
@staticmethod
def norm_gaussian(sigma):
return math.erf((2*sigma)**-0.5)**2/4.
#photon_floor is the cutoff at which we can round the number of photons from a star down to zero
def compute_flux(self, magnitude, add_noise=True,photon_floor=0.001):
flux = self.base_flux * (10 ** (-magnitude / 2.5))
if add_noise:
flux = flux + np.random.normal(0, IMAGE_VARIANCE, len(flux))
flux_per_photon = self.base_flux* self.t_exp * self.aperture ** 2 * np.pi/BASE_PHOTONS
flux += flux_per_photon*np.random.normal(0, self.norm_gaussian(self.sigma_psf)*np.sqrt(np.clip(flux/flux_per_photon,photon_floor,None)), len(flux))
return np.clip(flux,photon_floor*flux_per_photon,None)
def compute_magnitude(self, flux):
return -2.5 * np.log10(flux / self.base_flux)
def compute_magnitude_threshold(self):
threshold = THRESH_FACTOR*IMAGE_VARIANCE
return self.compute_magnitude(threshold)
def add_noise(self, magnitude):
return self.compute_magnitude(self.compute_flux(magnitude))
# Scene
# -----
class Scene:
def __init__(self, catalog, camera, detector, gaussian_noise_sigma=None, quantization_noise=None, magnitude_gaussian=None):
self.catalog = catalog
self.camera = camera
self.detector = detector
self.gaussian_noise_sigma = gaussian_noise_sigma
self.quantization_noise = quantization_noise
self.magnitude_gaussian = magnitude_gaussian
self.orientation = None
self.pos = None
self.ids = None
self.magnitude_threshold = detector.compute_magnitude_threshold()
def compute(self, orientation=None):
"""Generates a scene for the star tracker.
If not orientation is given a random one is generated.
Gaussian noise is applied to star positions if enabled."""
res_x, res_y = self.camera.resolution
if orientation is None:
orientation = random_matrix()
self.orientation = orientation
star_ids = np.arange(len(self.catalog.star_vectors))
pos = np.dot(self.catalog.star_vectors, orientation.transpose())
# instead vector noise
if self.gaussian_noise_sigma:
pos = add_vector_noise(pos, self.gaussian_noise_sigma)
az, alt = vector_to_angles(pos)
scene = self.camera.from_angles(az, alt)
if self.quantization_noise:
scene = np.int32(scene)
selection = np.logical_and(np.logical_and(scene[:, 0] >= 0, scene[:, 0] < res_y), np.logical_and(scene[:, 1] >= 0, scene[:, 1] < res_x))
scene = scene[selection]
scene_ids = star_ids[selection]
self.pos = scene
self.magnitudes = self.catalog.magnitudes[scene_ids]
self.ids = self.catalog.lookup_indices(scene_ids)
def add_false_stars(self, false_stars):
"""Adds randomly generated false stars to a scene."""
if isinstance(false_stars, int):
res_x, res_y = self.camera.resolution
false_star_pos = np.random.rand(false_stars, 2) * (res_y, res_x)
else:
false_star_pos = false_stars
false_stars = len(false_stars)
if self.quantization_noise:
false_star_pos = np.int32(false_star_pos)
self.pos = np.concatenate([self.pos, false_star_pos])
self.magnitudes = np.concatenate([self.magnitudes, np.random.choice(self.catalog.magnitudes[self.catalog.magnitudes < self.magnitude_threshold], size=false_stars)])
self.ids = np.concatenate([self.ids, -np.ones(false_stars, np.int32)])
def copy(self, camera=None, orientation=None, copy_false_stars=True):
false_stars = self.pos[self.ids == -1] if copy_false_stars else []
if camera is None:
camera = self.camera
else:
false_stars = camera.from_angles(*self.camera.to_angles(false_stars))
if orientation is None:
orientation = self.orientation
scene = Scene(self.catalog, camera, self.detector, self.gaussian_noise_sigma, self.quantization_noise, self.magnitude_gaussian)
scene.compute(orientation)
scene.add_false_stars(false_stars)
scene.scramble()
scene.add_magnitude_noise(self.magnitude_gaussian)
scene.filter_magnitudes()
return scene
def scramble(self):
"""Scrambles the order of stars in a scene."""
scramble_index = np.random.permutation(range(len(self.ids))) #permutation returns ndarray
if len(scramble_index) == 0:
return None
self.pos = self.pos[scramble_index, ...]
self.magnitudes = self.magnitudes[scramble_index]
self.ids = self.ids[scramble_index]
def add_magnitude_noise(self, gaussian=None):
#if catalog:
# self.magnitudes[self.scene_ids >= 0] += np.random.normal(0, self.catalog.catalog['e_VTmag'])
self.magnitudes = self.detector.add_noise(self.magnitudes)
if gaussian is not None:
self.magnitudes += np.random.normal(0, gaussian, size=len(self.magnitudes))
def filter_magnitudes(self):
filter_index = self.magnitudes <= self.magnitude_threshold
self.pos = self.pos[filter_index]
self.magnitudes = self.magnitudes[filter_index]
self.ids = self.ids[filter_index]
@staticmethod
def random(catalog, camera, detector, min_true, max_true, min_false, max_false, min_stars=1, max_tries = 1000, gaussian_noise_sigma=None, quantization_noise=None, magnitude_gaussian=None):
scene = Scene(catalog, camera, detector, gaussian_noise_sigma, quantization_noise, magnitude_gaussian)
num_stars = 0
tries = 0
ok = False
while not ok and tries < max_tries:
scene.compute()
scene.add_false_stars(np.random.randint(min_false, max_false + 1))
scene.scramble()
scene.add_magnitude_noise(magnitude_gaussian)
scene.filter_magnitudes()
num_stars = np.sum(scene.ids >= 0)
ok = num_stars >= min_true and num_stars <= max_true and len(scene.ids) > min_stars
tries += 1
if tries == max_tries:
return None
return scene
#only do this part if we were run as a python script
if __name__ == '__main__':
if len(sys.argv)!=4:
sys.stdout.write("Usage: python simulator.py calibration.txt input.csv results.csv\n")
exit()
exec(open(sys.argv[1]).read())
res_x = float(IMG_X) # pixels
res_y = float(IMG_Y) # pixels
# normalized focal length
f = 0.5 / np.tan(np.deg2rad((IMG_X*PIXSCALE/3600)) / 2.)
# pixel aspect ratio
pixel_ar = 1.
# normalized principal point
ppx = 0.5
ppy = 0.5
gaussian_noise_sigma = (np.pi*PIXSCALE)*np.sqrt(POS_VARIANCE)/(180*3600.0) # rad
cam = 0
sigma_psf = 0.5*float(DOUBLE_STAR_PX) # pixel
t_exp = float(EXPOSURE_TIME) # s
aperture = float(APERTURE) # mm
base_flux = float(BASE_FLUX)
magnitude_gaussian = 0.02 # mag
min_true = 0
max_true = 1000
min_false = 0
max_false = MAX_FALSE_STARS
catalog = StarCatalog()
cameras = [
RectilinearCamera, #no distortion
EquidistantCamera, # the rest model barrel and pin-cushion distortion
EquisolidAngleCamera,
StereographicCamera,
OrthographicCamera,
]
camera = cameras[cam](f, (res_x, res_y), pixel_ar, (ppx, ppy))
detector = StarDetector(sigma_psf, t_exp, aperture, base_flux)
num_scenes = 100
inputs = []
outputs = []
for i in range(num_scenes):
scene = Scene.random(catalog, camera, detector, min_true, max_true, min_false, max_false, gaussian_noise_sigma=gaussian_noise_sigma, magnitude_gaussian=magnitude_gaussian)
inputs.append(np.hstack((scene.pos[::, ::-1], scene.magnitudes.reshape(-1, 1))).flatten())
outputs.append(scene.ids)
def write_csv(filename, lines):
with open(filename, 'w') as f:
for line in lines:
f.write(','.join(str(value) for value in line) + '\n')
write_csv(sys.argv[2], inputs)
write_csv(sys.argv[3], outputs)
|
<filename>matchstrings.py
import numpy as np
from weighted_levenshtein import lev
class MatchString:
def __init__(self):
self.insert_costs = np.ones(128,
dtype=np.float64) # make an array of all 1's of size 128, the number of ASCII characters
# Insert Weight Distance
self.insert_costs[ord(' ')] = 1.5
self.insert_costs[ord('1')] = 1.5
self.insert_costs[ord('2')] = 1.5
self.insert_costs[ord('3')] = 1.5
self.insert_costs[ord('4')] = 1.5
self.insert_costs[ord('5')] = 1.5
self.insert_costs[ord('6')] = 1.5
self.insert_costs[ord('7')] = 1.5
self.insert_costs[ord('8')] = 1.5
self.insert_costs[ord('9')] = 1.5
self.substitute_costs = np.ones((128, 128), dtype=np.float64) # make a 2D array of 1's
# Common OCR Deletion Mistakes
self.delete_costs = np.ones(128, dtype=np.float64)
self.delete_costs[ord(' ')] = 0.25
# Common OCR Mistakes - Group 1
self.substitute_costs[ord('o'), ord('o')] = 0.25
self.substitute_costs[ord('d'), ord('o')] = 0.25
self.substitute_costs[ord('q'), ord('d')] = 0.25
self.substitute_costs[ord('d'), ord('q')] = 0.25
self.substitute_costs[ord('o'), ord('q')] = 0.25
self.substitute_costs[ord('q'), ord('o')] = 0.25
# Common OCR Mistakes - Group 2
self.substitute_costs[ord('i'), ord('j')] = 0.25
self.substitute_costs[ord('j'), ord('i')] = 0.25
self.substitute_costs[ord('i'), ord('l')] = 0.25
self.substitute_costs[ord('l'), ord('i')] = 0.25
self.substitute_costs[ord('i'), ord('t')] = 0.25
self.substitute_costs[ord('t'), ord('i')] = 0.25
self.substitute_costs[ord('j'), ord('l')] = 0.25
self.substitute_costs[ord('l'), ord('j')] = 0.25
self.substitute_costs[ord('j'), ord('t')] = 0.25
self.substitute_costs[ord('t'), ord('j')] = 0.25
self.substitute_costs[ord('l'), ord('t')] = 0.25
self.substitute_costs[ord('t'), ord('l')] = 0.25
# Common OCR Mistakes - Group 3
self.substitute_costs[ord('u'), ord('v')] = 0.25
self.substitute_costs[ord('v'), ord('u')] = 0.25
# Common OCR Mistakes - Group 4
self.substitute_costs[ord('f'), ord('p')] = 0.25
self.substitute_costs[ord('p'), ord('f')] = 0.25
# Common OCR Mistakes - Group 5
self.substitute_costs[ord('c'), ord('g')] = 0.25
self.substitute_costs[ord('g'), ord('c')] = 0.25
def match(self, string1, string2):
# Testing
return lev(string1.lower(), string2.lower(), substitute_costs=self.substitute_costs,
delete_costs=self.delete_costs, insert_costs=self.insert_costs) |
<reponame>skelleher/subtitled
import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.image_embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
image_features = self.resnet(images)
image_features = image_features.view(image_features.size(0), -1)
image_features = self.image_embed(image_features)
return image_features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super().__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.lstm_hidden = None
# Word embedding. It accepts an vector (of any shape) of long word token(s) and maps them to a vector of <embed_size>
self.word_embed = nn.Embedding(vocab_size, embed_size)
# LSTM outputs a vector of hidden_size based on input and previous history
# It does not predict the next token directly; we still need a fully-connected layer for that.
# NOTE: batch_first must be set to handle a standard batch order of [batch, seq, embed];
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True, dropout = 0.4)
# Predict probability distribution over entire vocabulary
self.word_likelihood = nn.Linear(hidden_size, vocab_size)
# initialize the weights
self.init_weights()
def init_lstm_hidden(self, n_seqs):
''' Initializes hidden state; do this before every training epoch, and inference '''
# Create two new tensors with sizes n_layers x n_seqs x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
return (weight.new(self.num_layers, n_seqs, self.hidden_size).zero_(),
weight.new(self.num_layers, n_seqs, self.hidden_size).zero_())
def init_weights(self):
''' Initialize weights for fully connected layer '''
initrange = 0.1
# Set bias tensor to all zeros
self.word_likelihood.bias.data.fill_(0)
# FC weights as random uniform
self.word_likelihood.weight.data.uniform_(-1, 1)
def forward(self, image_features, captions):
# image_features[] is a batch of already-embedded image vectors
# captions[] is a batch of word tokens that need to be embedded
# Init LSTM hidden layer to take N inputs, where N = batch size
if (not self.lstm_hidden):
self.lstm_hidden = self.init_lstm_hidden(captions.shape[0])
print("*** init lstm_hidden = ", self.lstm_hidden[0].shape)
# embed captions
caption_embeddings = self.word_embed(captions)
# concatenate image features with captions, batch-wise
image_features = image_features.unsqueeze(1)
embeddings = torch.cat((image_features, caption_embeddings), 1)
# pass batch of sequences (including the image) to LSTM
# NOTE: saving the hidden state in a class variable, and passing it back in to lstm(),
# throws error "Trying to backward through the graph a second time, but the buffers have already been freed"
outputs, _ = self.lstm(embeddings)
probabilities = self.word_likelihood(outputs)
# HACK: trim the first element because Udacity grader code asserts(length = caption_length)
# this is gross for training because .contiguous() does a big memcpy
probabilities = probabilities[:, 0:-1, :].contiguous()
return probabilities
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
hidden = states
next_input = inputs
tokens = []
for i in range(max_len):
outputs, hidden = self.lstm(next_input, hidden)
probabilities = self.word_likelihood(outputs)
probabilities = probabilities.squeeze()
p, token = probabilities.max(0)
tokens.append(token.item())
next_input = self.word_embed(token).unsqueeze(0).unsqueeze(0)
return tokens
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# These xcode_settings affect stripping:
# "Deployment postprocessing involves stripping the binary, and setting
# its file mode, owner, and group."
#'DEPLOYMENT_POSTPROCESSING': 'YES',
# "Specifies whether to strip symbol information from the binary.
# Prerequisite: $DEPLOYMENT_POSTPROCESSING = YES" "Default Value: 'NO'"
#'STRIP_INSTALLED_PRODUCT': 'YES',
# "Values:
# * all: Strips the binary completely, removing the symbol table and
# relocation information
# * non-global: Strips nonglobal symbols but saves external symbols.
# * debugging: Strips debugging symbols but saves local and global
# symbols."
# (maps to no flag, -x, -S in that order)
#'STRIP_STYLE': 'non-global',
# "Additional strip flags"
#'STRIPFLAGS': '-c',
# "YES: Copied binaries are stripped of debugging symbols. This does
# not cause the binary produced by the linker to be stripped. Use
# 'STRIP_INSTALLED_PRODUCT (Strip Linked Product)' to have the linker
# strip the binary."
#'COPY_PHASE_STRIP': 'NO',
{
'targets': [
{
'target_name': 'no_postprocess',
'type': 'shared_library',
'sources': [ 'file.c', ],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'NO',
'STRIP_INSTALLED_PRODUCT': 'YES',
},
},
{
'target_name': 'no_strip',
'type': 'shared_library',
'sources': [ 'file.c', ],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'YES',
'STRIP_INSTALLED_PRODUCT': 'NO',
},
},
{
'target_name': 'strip_all',
'type': 'shared_library',
'sources': [ 'file.c', ],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'YES',
'STRIP_INSTALLED_PRODUCT': 'YES',
'STRIP_STYLE': 'all',
},
},
{
'target_name': 'strip_nonglobal',
'type': 'shared_library',
'sources': [ 'file.c', ],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'YES',
'STRIP_INSTALLED_PRODUCT': 'YES',
'STRIP_STYLE': 'non-global',
},
},
{
'target_name': 'strip_debugging',
'type': 'shared_library',
'sources': [ 'file.c', ],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'YES',
'STRIP_INSTALLED_PRODUCT': 'YES',
'STRIP_STYLE': 'debugging',
},
},
{
'target_name': 'strip_all_custom_flags',
'type': 'shared_library',
'sources': [ 'file.c', ],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'YES',
'STRIP_INSTALLED_PRODUCT': 'YES',
'STRIP_STYLE': 'all',
'STRIPFLAGS': '-c',
},
},
{
'target_name': 'strip_all_bundle',
'type': 'shared_library',
'mac_bundle': '1',
'sources': [ 'file.c', ],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'YES',
'STRIP_INSTALLED_PRODUCT': 'YES',
'STRIP_STYLE': 'all',
},
},
{
'target_name': 'strip_save',
'type': 'shared_library',
'sources': [ 'file.c', ],
'dependencies': [
'subdirectory/subdirectory.gyp:nested_strip_save',
'subdirectory/subdirectory.gyp:nested_strip_save_postbuild',
],
'xcode_settings': {
'DEPLOYMENT_POSTPROCESSING': 'YES',
'STRIP_INSTALLED_PRODUCT': 'YES',
'STRIPFLAGS': '-s $(CHROMIUM_STRIP_SAVE_FILE)',
'CHROMIUM_STRIP_SAVE_FILE': 'strip.saves',
},
},
],
}
|
__all__ = ['print_context', 'write', 'iwrite', 'details', 'plt2html', 'set_dir', 'textbox',
'image','svg','format_html','format_css','alert','colored','keep_format',
'source','raw','enable_zoom','html_node','sig','doc']
__all__.extend(['rows','block'])
__all__.extend([f'block_{c}' for c in ['r','g','b','y','c','m','k','o','w','p']])
import sys, linecache, os, re
import textwrap
import inspect
from io import StringIO
from io import BytesIO # For PIL image
from contextlib import contextmanager
from IPython.core.getipython import get_ipython
from markdown import markdown
from IPython.display import HTML, display, Code, SVG
from IPython.utils.capture import capture_output
from IPython.core.display import Image, __all__ as __all
import ipywidgets as ipw
from .objs_formatter import format_object, syntax_css, _fix_code, fix_ipy_image
from .objs_formatter import plt2html # For backward cimpatibility and inside class
__reprs__ = [rep.replace('display_','') for rep in __all if rep.startswith('display_')] # Can display these in write command
__md_extensions = ['fenced_code','tables','codehilite','footnotes'] # For MArkdown Parser
class _HTML_Widget(ipw.HTML):
"Class for HTML widgets based on ipywidgets.HTML, but with `_repr_html_` method."
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
def _repr_html_(self):
"Make it available in `write` command as well."
return self.value
# Do not use this in main work, just inside a function
class _Source_Widget(ipw.HTML):
"Source code widget for IPython, give html fixed code as value."
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self._code = self.value # Save original code for later operations
self.raw = '' # Raw code
def _repr_html_(self):
"Make it available in `write` command as well."
return self.value
def show_lines(self, lines):
"Return source object with selected lines from list/tuple/range of lines."
self.value = self._code # Reset to original code first
if isinstance(lines,(list,tuple,range)):
_lines, link = [],0
for i, line in enumerate(self._code.split('<code>'), start = -1):
if i == -1:
_lines.append(line) # start things
elif i in lines:
if link + 1 != i and i > 0:
_lines.append(f'<code class="code-no-focus"> + {i - link} more lines ... </code>')
_lines.append('<code>' + line) # Do not pick altogther if not in lines
link = i
# i will go up to some value, so we need to add the last lines
if i > link:
_last_line = self._code.split('</pre>')[-1] # Get closing characters to add
_lines.append(f'<code class="code-no-focus"> + {i - link} more lines ... </code></pre>{_last_line}')
self.value = ''.join(_lines) # update value
return self
else:
raise TypeError(f'lines must be list, tuple or range, not {type(lines)}')
def show_all(self):
"Show all lines. Call this after you may consumed lines using `show_lines`."
self.value = self._code
return self
def focus_lines(self, lines):
"Return source object with focus on given list/tuple/range of lines."
self.value = self._code # Reset to original code first
if isinstance(lines,(list,tuple,range)):
_lines = []
for i, line in enumerate(self._code.split('<code>'), start = -1):
if i == -1:
_lines.append(line) # start things
elif i not in lines:
_lines.append('<code class="code-no-focus">' + line)
else:
_lines.append('<code class="code-focus">' + line)
self.value = ''.join(_lines) # update value
return self
else:
raise TypeError(f'lines must be list, tuple or range, not {type(lines)}')
@contextmanager
def print_context():
"Use `print` or function printing with onside in this context manager to display in order."
with capture_output() as cap:
yield
if cap.stderr:
return cap.stderr
write(raw(cap.stdout)) # clean whitspace preserved
@contextmanager
def set_dir(path):
current = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(current)
def _fix_repr(obj):
if isinstance(obj,str):
_obj = obj.strip().replace('\n',' \n') #Markdown doesn't like newlines without spaces
_html = markdown(_obj,extensions=__md_extensions)
return _fix_code(_html)
else:
# Next prefer custom methods of objects.
is_true, _html = format_object(obj)
if is_true:
return _html
# Ipython objects
_reprs_ = [rep for rep in [getattr(obj,f'_repr_{r}_',None) for r in __reprs__] if rep]
for _rep_ in _reprs_:
_out_ = _rep_()
if _out_: # If there is object in _repr_<>_, don't return None
return _out_
# Return __repr__ if nothing above
return f"<div class='PyRepr'>{obj.__repr__()}</div>"
def _fmt_write(*columns,width_percents=None,className=None):
if not width_percents and len(columns) >= 1:
widths = [f'{int(100/len(columns))}%' for _ in columns]
else:
widths = [f'{w}%' for w in width_percents]
_class = className if isinstance(className,str) else ''
_cols = [_c if isinstance(_c,(list,tuple)) else [_c] for _c in columns]
_cols = ''.join([f"""<div style='width:{w};overflow-x:auto;height:auto'>
{''.join([_fix_repr(row) for row in _col])}
</div>""" for _col,w in zip(_cols,widths)])
_cols = syntax_css() + _cols if 'codehilite' in _cols else _cols
if len(columns) == 1:
return _cols.replace('<div', f'<div class = "{_class}"',1) if _class else _cols
return f'''<div class="columns {_class}">{_cols}</div>''' if _class else f'''<div class="columns">{_cols}</div>'''
def write(*columns,width_percents=None,className=None):
'''Writes markdown strings or IPython object with method `_repr_<html,svg,png,...>_` in each column of same with. If width_percents is given, column width is adjusted.
Each column should be a valid object (text/markdown/html/ have _repr_<format>_ or to_<format> method) or list/tuple of objects to form rows or explictly call `rows`.
- Pass int,float,dict,function etc. Pass list/tuple in a wrapped list for correct print as they used for rows writing too.
- Give a code object from `ipyslides.get_cell_code()` to it, syntax highlight is enabled.
- Give a matplotlib `figure/Axes` to it or use `ipyslides.utils.plt2html()`.
- Give an interactive plotly figure.
- Give a pandas dataframe `df` or `df.to_html()`.
- Give any object which has `to_html` method like Altair chart. (Note that chart will not remain interactive, use display(chart) if need interactivity like brushing etc.)
- Give an IPython object which has `_repr_<repr>_` method where <repr> is one of ('html','markdown','svg','png','jpeg','javascript','pdf','pretty','json','latex').
- Give a function/class/module (without calling) and it will be displayed as a pretty printed code block.
If an object is not in above listed things, `obj.__repr__()` will be printed. If you need to show other than __repr__, use `display(obj)` outside `write` command or use
methods specific to that library to show in jupyter notebook.
If you give a className, add CSS of it using `format_css` function and provide it to `write` function.
Note: Use `keep_format` method to keep format of object for example `keep_format(altair_chart.to_html())`.
Note: You can give your own type of data provided that it is converted to an HTML string.
Note: `_repr_<format>_` takes precedence to `to_<format>` methods. So in case you need specific output, use `object.to_<format>`.
'''
return display(HTML(_fmt_write(*columns,width_percents=width_percents,className=className)))
def _fmt_iwrite(*columns,width_percents=None):
if not width_percents:
widths = [f'{int(100/len(columns))}%' for _ in columns]
else:
widths = [f'{w}%' for w in width_percents]
_cols = [_c if isinstance(_c,(list,tuple)) else [_c] for _c in columns] #Make list if single element
# Conver to other objects to HTML
fixed_cols = []
for j, _rows in enumerate(_cols):
row = []
for i, item in enumerate(_rows):
try:
ipw.Box([item]) # Check for widget first
item._grid_location = {'row':i,'column':j}
row.append(item)
except:
tmp = _HTML_Widget(value = _fix_repr(item))
if '<script>' in tmp.value:
tmp.value, = block_r('Error displaying object',f'Can not display object {item!r} as it needs Javascript. Use `write` or `display`.').values()
tmp._grid_location = {'row':i,'column':j}
row = [*row,tmp]
fixed_cols.append(row)
children = [ipw.VBox(children = _c, layout = ipw.Layout(width=f'{_w}')) for _c, _w in zip(fixed_cols,widths)]
# Format things as given in input
out_cols = tuple(tuple(row) if len(row) > 1 else row[0] for row in fixed_cols)
out_cols = tuple(out_cols) if len(out_cols) > 1 else out_cols[0]
return ipw.HBox(children = children).add_class('columns'), out_cols #Return display widget and list of objects for later use
def iwrite(*columns,width_percents=None,className=None):
"""Each obj in columns could be an IPython widget like `ipywidgets`,`bqplots` etc
or list/tuple (or wrapped in `rows` function) of widgets to display as rows in a column.
Other objects (those in `write` command) will be converted to HTML widgets if possible.
Object containing javascript code may not work, use `write` command for that.
If you give a className, add CSS of it using `format_css` function and provide it to `iwrite` function.
**Returns**: grid,columns as reference to use later and update. rows are packed in columns.
**Examples**:
grid, x = iwrite('X')
grid, (x,y) = iwrite('X','Y')
grid, (x,y) = iwrite(['X','Y'])
grid, [(x,y),z] = iwrite(['X','Y'],'Z')
#We unpacked such a way that we can replace objects with new one using `grid.update`
new_obj = grid.update(x, 'First column, first row with new data') #You can update same `new_obj` with it's own widget methods.
"""
_grid, _objects = _fmt_iwrite(*columns,width_percents=width_percents)
if isinstance(className,str):
_grid.add_class(className)
display(_grid) # Actually display the widget
def update(self, old_obj, new_obj):
"Updates `old_obj` with `new_obj`. Returns reference to created/given widget, which can be updated by it's own methods."
row, col = old_obj._grid_location['row'], old_obj._grid_location['column']
widgets_row = list(self.children[col].children)
try:
ipw.Box([new_obj]) # Check for widget first
tmp = new_obj
except:
tmp = _HTML_Widget(value = _fix_repr(new_obj))
if '<script>' in tmp.value:
tmp.value, = block_r('Error displaying object',f'Can not update object {new_obj!r} as it needs Javascript. Use `write` or `display` commands').values()
return # Don't update
tmp._grid_location = old_obj._grid_location # Keep location
widgets_row[row] = tmp
self.children[col].children = widgets_row
return tmp
_grid.update = update.__get__(_grid,type(_grid)) #attach update method to grid
return _grid, _objects
def format_html(*columns,width_percents=None,className=None):
'Same as `write` except it does not display but give a dict object that can be passed to `write` and `iwrite`.'
return keep_format(_fmt_write(*columns,width_percents=width_percents,className=className))
def format_css(selector, **css_props):
"Provide CSS values with - replaced by _ e.g. font-size to font_size. selector is a string of valid tag/class/id etc."
_css_props = {k.replace('_','-'):f"{v}" for k,v in css_props.items()} #Convert to CSS string if int or float
_css_props = {k:v.replace('!important','').replace(';','') + '!important;' for k,v in _css_props.items()}
props_str = '\n'.join([f" {k}: {v}" for k,v in _css_props.items()])
out_str = f"<style>\n{selector} {{\n{props_str}\n}}\n</style>"
return keep_format(out_str)
def details(str_html,summary='Click to show content'):
"Show/Hide Content in collapsed html."
return f"""<details style='max-height:100%;overflow:auto;'><summary>{summary}</summary>{str_html}</details>"""
def __check_pil_image(data):
"Check if data is a PIL Image or numpy array"
if data.__repr__().startswith('<PIL'):
im_bytes = BytesIO()
data.save(im_bytes,data.format,quality=95) #Save image to BytesIO in format of given image
return im_bytes.getvalue()
return data # if not return back data
def image(data=None,width='80%',caption=None, zoomable=True,**kwargs):
"""Displays PNG/JPEG files or image data etc, `kwrags` are passed to IPython.display.Image.
You can provide following to `data` parameter:
- An opened PIL image. Useful for image operations and then direct writing to slides.
- A file path to image file.
- A url to image file.
- A str/bytes object containing image data.
"""
if isinstance(width,int):
width = f'{width}px'
_data = __check_pil_image(data) #Check if data is a PIL Image or return data
img = fix_ipy_image(Image(data = _data,**kwargs),width=width)
if caption:
img = img + textbox(caption) # Add caption
if zoomable:
return f'<div class="zoom-container">{img}</div>'
return img
def svg(data=None,caption=None,zoomable=True,**kwargs):
"Display svg file or svg string/bytes with additional customizations. `kwrags` are passed to IPython.display.SVG. You can provide url/string/bytes/filepath for svg."
svg = SVG(data=data, **kwargs)._repr_svg_()
if caption:
svg = svg + textbox(caption) # Add caption
if zoomable:
return f'<div class="zoom-container">{svg}</div>'
return svg
def enable_zoom(obj):
"Add zoom-container class to given object, whether a widget or html/IPYthon object"
try:
return ipw.Box([obj]).add_class('zoom-container')
except:
return {'__keep_format__': f'<div class="zoom-container">{_fix_repr(obj)}</div>'}
def html_node(tag,children = [],className = None,**node_attrs):
"""Returns html node with given children and node attributes like style, id etc.
`tag` can be any valid html tag name.
`children` expects:
- str: A string to be added as node's text content.
- html_node: A html_node to be added as child node.
- list/tuple of [str, html_node]: A list of str and html_node to be added as child nodes.
Example:
html_node('img',src='ir_uv.jpg') #Returns IPython.display.HTML("<img src='ir_uv.jpg'></img>") and displas image if last line in notebook's cell.
"""
if isinstance(children,str):
content = children
elif isinstance(children,(list,tuple)):
content = ''.join(child if isinstance(child,str) else child._repr_html_() for child in children)
else:
try:
content = children._repr_html_() #Try to get html representation of children if HTML object
except:
raise ValueError(f'Children should be a list/tuple of html_node or str, not {type(children)}')
attrs = ' '.join(f'{k}="{v}"' for k,v in node_attrs.items()) # Join with space is must
if className:
attrs = f'class="{className}"' + ' ' + attrs # space is must after className
return HTML(f'<{tag} {attrs}>{content}</{tag}>')
def _file2code(filename,language='python',name=None):
"Only reads plain text or StringIO, return source object with `show_lines` and `focus_lines` methods."
try:
text = filename.read() # if stringIO
except:
with open(filename,'r') as f:
text = f.read()
if isinstance(name,str):
_title = name
else:
_title = language[0].capitalize() + language[1:]
_class = _title.replace('.','').replace('\s+','')
if 'ython' in language:
code = markdown(f'```{language}\n{text}\n```',extensions=__md_extensions)
else:
code = Code(data = text,language=language)._repr_html_()
_arr = [_h.split('</pre>') for _h in code.split('<pre>')]
start, middle, end = [v for vs in _arr for v in vs] # Flatten
middle = ''.join(f'<code>{line}</code>' for line in middle.strip().splitlines())
code = f'<div class="codehilite {_class}"> {start} <pre> {middle} </pre> {end} </div>'
code = f'''<style> div.codehilite.{_class}::before {{
content: '🔴 🟡 🟢 {_title}' !important;
}}</style>''' + code
out = _Source_Widget(value = _fix_code(code))
out.raw = text
return out
def _str2code(text,language='python',name=None):
"Only reads plain text source code, return source object with `show_lines` and `focus_lines` methods."
s = StringIO(text)
return _file2code(s,language=language,name=name)
def textbox(text, **css_props):
"""Formats text in a box for writing e.g. inline refrences. `css_props` are applied to box and `-` should be `_` like `font-size` -> `font_size`.
`text` is not parsed to general markdown i.e. only bold italic etc. applied, so if need markdown, parse it to html before. You can have common CSS for all textboxes using class `TextBox`."""
css_props = {'display':'inline-block','white-space': 'pre', **css_props} # very important to apply text styles in order
# white-space:pre preserves whitspacing, text will be viewed as written.
_style = ' '.join([f"{key.replace('_','-')}:{value};" for key,value in css_props.items()])
return f"<span class='TextBox' style = {_style!r}> {text} </span>" # markdown="span" will avoid inner parsing
def alert(text):
"Alerts text!"
return f"<span style='color:#DC143C;'>{text}</span>"
def colored(text,fg='blue',bg=None):
"Colored text, `fg` and `bg` should be valid CSS colors"
return f"<span style='background:{bg};color:{fg};'>{text}</span>"
def keep_format(plaintext_or_html):
"Bypasses from being parsed by markdown parser. Useful for some graphs, e.g. keep_raw(obj.to_html()) preserves its actual form."
if not isinstance(plaintext_or_html,str):
return plaintext_or_html # if not string, return as is
return {'__keep_format__':plaintext_or_html}
def raw(text):
"Keep shape of text as it is, preserving whitespaces as well."
return {'__keep_format__':f"<div class='PyRepr'>{text}<div>"}
def rows(*objs):
"Returns tuple of objects. Use in `write`, `iwrite` for better readiability of writing rows in a column."
return objs # Its already a tuple
def block(title,*objs,bg = 'olive'):
"Format a block like in LATEX beamer. *objs expect to be writable with `write` command."
_title = f"""<center style='background:var(--secondary-bg);margin:0px -4px;'>
<b>{title}</b></center>"""
_out = _fmt_write(objs) # single column
return keep_format(f"""<div style='padding:4px' class='block'>
<div style='border-top:4px solid {bg};box-shadow: 0px 0px 4px {bg};border-radius:4px;padding:0 4px;'>
{_title}
{_out}
</div></div>""")
def block_r(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='crimson')
def block_b(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='navy')
def block_g(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='#006400')
def block_y(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='#E4D00A')
def block_o(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='orange')
def block_p(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='purple')
def block_c(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='#48d1cc')
def block_m(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='magenta')
def block_w(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='whitesmoke')
def block_k(title,*objs):
"See documentation of `block`."
return block(title,*objs,bg='#343434')
class source:
current: None
def __init__(self):
raise Exception("""This class is not meant to be instantiated.
Use source.context() to get a context manager for source.
Use source.current to get the current source object.
Use source.from_file(filename) to get a source object from a file.
Use source.from_string(string) to get a source object from a string.
Use source.from_callable(callable) to get a source object from a callable.
""")
@classmethod
def from_string(cls,text,language='python',name=None):
"Creates source object from string. `name` is alternate used name for language"
cls.current = _str2code(text,language=language,name=name)
return cls.current
@classmethod
def from_file(cls, filename,language='python',name=None):
"Returns source object with `show_lines` and `focus_lines` methods. `name` is alternate used name for language"
cls.current = _file2code(filename,language=language,name=name)
return cls.current
@classmethod
def from_callable(cls, callable):
"Returns source object from a given callable [class,function,module,method etc.] with `show_lines` and `focus_lines` methods."
for _type in ['class','function','module','method','builtin','generator']:
if getattr(inspect,f'is{_type}')(callable):
source = inspect.getsource(callable)
cls.current = _str2code(source,language='python',name=None)
return cls.current
@classmethod
@contextmanager
def context(cls, collapsed = False, focus_lines = None):
"""Excute and displays source code in the context manager. Set `collapsed = True` to display in collapse.
`foucs_lines` is a list/tuple/range of line index to be highlighted. Useful when source is written inside context manager itself.
**Usage**:
```python
with source.context() as s: #if not used as `s`, still it is stored in variable `__current_source_code__` that you can acess by this name or from `LiveSlides.current_source`
do_something()
#s is the source code that will be avaialble outside the context manager
write(s)
#s.raw, s.value are accesible attributes.
#s.focus_lines, s.show_lines are methods that return object of same type.
# iwite(s) will update the source even inside the context manager.
```
"""
def frame():
"This is better than traceback as it works same for IPython and script.py"
return (sys._getframe().f_back.f_back.f_back.f_code.co_filename,
sys._getframe().f_back.f_back.f_back.f_lineno) #should be in function and go back three times
file, l1 = frame()
_alert = alert('You can get code once you exit context manager for `write` command <center>OR</center>use it will auto update inside `iwrite` command')
return_obj = _Source_Widget(value=_alert)
return_obj.raw = ''
cls.current = return_obj # add to user namespace, this does not create extra object, just points to same
try:
yield return_obj
finally:
file, l2 = frame()
lines = linecache.getlines(file)[l1:l2]
code = textwrap.dedent(''.join(lines))
return_obj.raw = code
out_code = _str2code(code).value #needs further processing
if collapsed:
return_obj._code = details(out_code,summary='Show Code')
else:
return_obj._code = out_code
return_obj.value = return_obj._code # Update the value of the widget
if isinstance(focus_lines,(list,tuple,range)):
_ = return_obj.focus_lines(focus_lines) # highlight lines, no need to return self here
def sig(callable,prepend_str = None):
"Returns signature of a callable. You can prepend a class/module name."
try:
_sig = f'<b>{callable.__name__}</b><span style="font-size:85%;color:var(--secondary-fg);">{str(inspect.signature(callable))}</span>'
if prepend_str:
_sig = alert(prepend_str + '.') + _sig
return {'__keep_format__':_sig}
except:
raise TypeError(f'Object {callable} is not a callable')
def doc(callable,prepend_str = None):
"Returns documentation of a callable. You can prepend a class/module name."
try:
_doc = _fix_repr(inspect.getdoc(callable))
_sig = sig(callable,prepend_str)['__keep_format__']
return {'__keep_format__':f"<div class='PyRepr'>{_sig}<br>{_doc}</div>"}
except:
raise TypeError(f'Object {callable} is not a callable')
|
from typing import Callable, Optional
import jax
import jax.numpy as np
import objax
from chex import Array, dataclass
from rbig_jax.transforms.block import InitRBIGBlock, RBIGBlockParams
from rbig_jax.utils import get_minimum_zeroth_element, reverse_dataclass_params
from rbig_jax.information.total_corr import init_information_reduction_loss
@dataclass
class InfoLossState:
max_layers: int
ilayer: int
info_loss: Array
class IterativeGaussianization:
def __init__(
self,
uni_uniformize: Callable,
rot_transform: Callable,
n_features: int,
n_samples: int = 10_000,
zero_tolerance: int = 50,
zero_tolerance_buffer: int = 10,
eps: float = 1e-5,
max_layers: int = 10_000,
p: float = 0.1,
):
# create Gaussinization block
fit_transform_f, forward_f, grad_f, inverse_f = InitRBIGBlock(
uni_uniformize, rot_transform, eps
)
self.info_loss_f = init_information_reduction_loss(
n_samples=n_samples, base=2, p=0.1
)
self.max_layers = max_layers
self.zero_tolerance = zero_tolerance
self.zero_tolerance_buffer = zero_tolerance_buffer
self.n_features = n_features
self.block_fit_transform = jax.jit(fit_transform_f)
self.block_transform = forward_f
self.block_inverse_transform = inverse_f
self.block_gradient_transform = grad_f
self.info_loss_f = jax.jit(
init_information_reduction_loss(n_samples=n_samples, base=2, p=p)
)
# self.block_forward = jax.partial(
# rbig_block_forward, marginal_gauss_f=gaussianize_f
# )
# self.block_transform = rbig_block_transform
# self.block_inverse = rbig_block_inverse
# self.block_gradient = rbig_block_transform_gradient
# self.max_layers = max_layers
# # INFORMATION THEORY LOSS
# tol_dims = get_tolerance_dimensions(n_samples)
# self.uni_ent_est = uni_ent_est
# self.loss_f = jax.partial(
# information_reduction, uni_entropy=self.uni_ent_est, tol_dims=tol_dims, p=p
# )
# # jit arguments (much faster!)
# if jitted:
# self.block_forward = jax.jit(self.block_forward)
# self.block_transform = jax.jit(self.block_transform)
# self.block_inverse = jax.jit(self.block_inverse)
# self.block_gradient = jax.jit(self.block_gradient)
# self.loss_f = jax.jit(self.loss_f)
def fit(self, X):
_ = self.fit_transform(X)
return self
def fit_transform(self, X: Array) -> Array:
window = np.ones(self.zero_tolerance) / self.zero_tolerance
def condition(state):
# rolling average
x_cumsum_window = np.convolve(np.abs(state.info_loss), window, "valid")
n_zeros = int(np.sum(np.where(x_cumsum_window > 0.0, 0, 1)))
return jax.lax.ne(n_zeros, 1) or state.ilayer > state.max_layers
state = InfoLossState(
max_layers=self.max_layers, ilayer=0, info_loss=np.ones(self.max_layers)
)
X_g = X
params = []
while condition(state):
layer_loss = jax.partial(self.info_loss_f, X_before=X_g)
# compute
X_g, layer_params = self.block_fit_transform(X_g)
# get information reduction
layer_loss = layer_loss(X_after=X_g)
# update layer loss
info_losses = jax.ops.index_update(
state.info_loss, state.ilayer, layer_loss
)
state = InfoLossState(
max_layers=self.max_layers,
ilayer=state.ilayer + 1,
info_loss=info_losses,
)
params.append(layer_params)
params = RBIGBlockParams(
support=np.stack([iparam.support for iparam in params]),
quantiles=np.stack([iparam.quantiles for iparam in params]),
empirical_pdf=np.stack([iparam.empirical_pdf for iparam in params]),
support_pdf=np.stack([iparam.support_pdf for iparam in params]),
rotation=np.stack([iparam.rotation for iparam in params]),
)
# self.n_layers = i_layer
self.params = params
self.info_loss = info_losses[: state.ilayer]
self.n_layers = state.ilayer
return X_g
def transform(self, X: Array) -> Array:
def f_apply(inputs, params):
outputs = self.block_transform(params, inputs)
return outputs, 0
X, _ = jax.lax.scan(f_apply, X, self.params, None)
return X
def inverse_transform(self, X: Array) -> Array:
def f_invapply(inputs, params):
outputs = self.block_inverse_transform(params, inputs)
return outputs, 0
# reverse the parameters
params_reversed = reverse_dataclass_params(self.params)
X, _ = jax.lax.scan(f_invapply, X, params_reversed, None)
return X
def log_det_jacobian(self, X: Array) -> Array:
def fscan_gradient(inputs, params):
return self.block_gradient_transform(params, inputs)
# loop through params
_, X_ldj_layers = jax.lax.scan(fscan_gradient, X, self.params, None)
# summarize the layers (L, N, D) -> (N, D)
X_ldj = np.sum(X_ldj_layers, axis=0)
return X_ldj
def score_samples(self, X: Array) -> Array:
def fscan_gradient(inputs, params):
return self.block_gradient_transform(params, inputs)
# loop through params
X, X_ldj_layers = jax.lax.scan(fscan_gradient, X, self.params, None)
# summarize the layers (L, N, D) -> (N, D)
X_ldj = np.sum(X_ldj_layers, axis=0)
# calculate log probability
latent_prob = jax.scipy.stats.norm.logpdf(X)
# log probability
log_prob = (latent_prob + X_ldj).sum(-1)
return log_prob
def score(self, X: Array) -> Array:
return -self.score_samples(X).mean()
def sample(self, n_samples: int, generator=objax.random.Generator()) -> Array:
# generate independent Gaussian samples
X_gauss = objax.random.normal((n_samples, self.n_features), generator=generator)
# inverse transformation
return self.inverse_transform(X_gauss)
def total_correlation(self, base: int = 2) -> np.ndarray:
return np.sum(self.info_loss) # * np.log(base)
def entropy(self, X: np.ndarray, base: int = 2) -> np.ndarray:
return self.uni_ent_est(X).sum() * np.log(base) - self.total_correlation(base)
|
<reponame>gisce/esios
# -*- coding: utf-8 -*-
from datetime import datetime
from dateutil import relativedelta
from libsaas import http, parsers, port
from libsaas.services import base
from esios.utils import translate_param, serialize_param
LIQUICOMUN_PRIORITY = [
'C7', 'A7', 'C6', 'A6', 'C5', 'A5', 'C4', 'A4', 'C3', 'A3', 'C2', 'A2',
'C1', 'A1'
]
def parser_none(body, code, headers):
return body
class Archive(base.RESTResource):
path = 'archives'
def get_filename(self):
return self.__class__.__name__
def order_key_function(self, param):
return param['name']
def validate_range(self, start, end):
return True
@base.apimethod
def get(self, start_date, end_date, taxonomy_terms=None):
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
assert self.validate_range(start_date, end_date), "Dates are not in the expected range for the requested version"
date_type = 'datos'
start_date = start_date.isoformat()
end_date = end_date.isoformat()
locale = 'en'
param_list = ('locale', 'start_date', 'end_date', 'date_type')
if taxonomy_terms:
param_list += ('taxonomy_terms',)
params = base.get_params(
param_list,
locals(),
translate_param=translate_param,
serialize_param=serialize_param,
)
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def download(self, start_date, end_date, taxonomy_terms=None, next=0):
"""
Download the best available version for a range of dates and the desired taxonomy terms.A1_liquicomun
Optionally fetch the next (n) available version instead of the higher one (next=1, 2, 3, ..., n)
"""
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
# gets filename from class name
filename = self.get_filename()
body = self.get(start_date, end_date, taxonomy_terms)
regs = [a for a in body['archives'] if filename in a['name']]
sorted_list = sorted(regs, key=self.order_key_function)
# Assert that desired next exists, and fetch it
assert type(next) == int and next >= 0, "Desired next value is not correct."
assert (len(sorted_list) >= next + 1), "The desired version (next +{}) is not available. Available versions '{}'".format(next, ", ".join ([name['name'][:2] for name in sorted_list]))
url = sorted_list[next]['download']['url']
request = http.Request('GET', self.parent.get_url() + url)
return request, parser_none
class Liquicomun(Archive):
def get_filename(self):
return super(Liquicomun, self).get_filename().lower()
def order_key_function(self, param):
return LIQUICOMUN_PRIORITY.index(param['name'][:2])
def get(self, start_date, end_date, taxonomy_terms=None):
if taxonomy_terms is None:
taxonomy_terms = []
taxonomy_terms.append('Settlements')
return super(Liquicomun, self).get(start_date, end_date, taxonomy_terms)
class Generic_Liquicomun(Archive):
""" Generic Liquicomun class, suitable to subsitute Liquicomun, pending to clarify """
# Liquicomun version
version = None
expected_range_start = None
expected_range_end = None
def order_key_function(self, param):
if type(param) == list:
param = param[0]
name = param['name']
assert name[:2] == self.version, "Expected version must be '{}', current '{}' ['{}']". format(self.version, name[:2], name)
return LIQUICOMUN_PRIORITY.index(name[:2])
def validate_range(self, start, end):
""" Validate range for generic period """
try:
assert start >= self.expected_range_start
assert end <= self.expected_range_end
except:
return False
return True
class A1_liquicomun(Generic_Liquicomun):
""" A1: This month and future """
### toDo acotar future
version = "A1"
# First day of current month
expected_range_start = datetime.today().replace(day=1, hour=0, minute=0, second=0, microsecond=0)
# Last day of current month
expected_range_end = expected_range_start + relativedelta.relativedelta(months=2) - relativedelta.relativedelta(days=1)
class A2_liquicomun(Generic_Liquicomun):
""" A2: Just previous month """
version = "A2"
# First day of -1 month
expected_range_start = datetime.today().replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta.relativedelta(months=1)
# Last day of -1 month
expected_range_end = datetime.today()
class C2_liquicomun(Generic_Liquicomun):
""" C2: <=-2months last day to <=-2months last day """
version = "C2"
# Last day of -2 month
expected_range_start = datetime.today().replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
# Last day of -2 month
expected_range_end = expected_range_start
def validate_range(self, start, end):
""" Validate C2 range start < 31/-2n and end <= 31/-2n """
try:
assert start <= self.expected_range_start
assert end <= self.expected_range_end
except:
return False
return True
class P48Cierre(Archive):
def get_filename(self):
return super(P48Cierre, self).get_filename().lower()
def get(self, start_date, end_date, taxonomy_terms=None):
if taxonomy_terms is None:
taxonomy_terms = []
taxonomy_terms.append('Schedules')
return super(P48Cierre, self).get(start_date, end_date, taxonomy_terms)
|
<reponame>tenet-ac-za/NZ-ORCID-Hub<filename>orcid_hub/__init__.py<gh_stars>0
# -*- coding: utf-8 -*- # noqa
"""
ORCID-Hub
~~~~~~~~~
The New Zealand ORCID Hub allows all Consortium members to productively engage with ORCID
regardless of technical resources. The technology partner, with oversight from
the IT Advisory Group, lead agency and ORCID, will develop and maintain the Hub.
:copyright: (c) 2017, 2018, 2019 Royal Society of New Zealand.
:license: MIT, see LICENSE for more details.
$Format:%CI$ ($Format:%H$)
"""
import logging
import os
import pkg_resources
from datetime import date, datetime
from functools import wraps
import click
from flask import abort
from flask.json import JSONEncoder as _JSONEncoder
from flask_login import current_user, LoginManager, login_user
from flask import Flask, request
from flask_oauthlib.provider import OAuth2Provider
from flask_peewee.rest import Authentication, RestAPI
from flask_restful import Api
from playhouse import db_url
# disable Sentry if there is no SENTRY_DSN:
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.rq import RqIntegration
from . import config
from .failover import PgDbWithFailover
from flask_admin import Admin
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr
from flask_caching import Cache
# from werkzeug.contrib.cache import SimpleCache
IDENT = "$Id: 6b4b430b27bbe857c58bdb8dc2dad8452fd0a02d $"
try:
dist = pkg_resources.get_distribution(__name__)
__version__ = dist.version
except pkg_resources.DistributionNotFound:
__version__ = None
# http://docs.peewee-orm.com/en/latest/peewee/database.html#automatic-reconnect
# class ReconnectablePostgresqlDatabase(RetryOperationalError, PostgresqlDatabase):
# """Support for reconnecting closed DB connectios."""
# pass
# instance_relative_config=True ## Instance directory relative to the app scrip or app module
instance_path = os.path.join(os.getcwd(), "instance")
settings_filename = os.path.join(instance_path, "settings.cfg")
app = Flask(__name__, instance_path=instance_path)
app.config.from_object(config)
cache = Cache(app)
if not app.config.from_pyfile(settings_filename, silent=True) and app.debug:
print(f"*** WARNING: Failed to load local application configuration from '{settings_filename}'")
# if "DATABASE_URL" in os.environ:
# app.config["DATABASE_URL"] = os.getenv("DATABASE_URL")
app.url_map.strict_slashes = False
class OAuthProvider(OAuth2Provider):
"""Applicaton oauth provider."""
def require_oauth(self, *scopes):
"""Protect resource with specified scopes."""
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
for func in self._before_request_funcs:
func()
if hasattr(request, "oauth") and request.oauth:
return f(*args, **kwargs)
valid, req = self.verify_request(scopes)
for func in self._after_request_funcs:
valid, req = func(valid, req)
if not valid:
if self._invalid_response:
return self._invalid_response(req)
return abort(401)
request.oauth = req
# login only if the user hasn't logged in yet
if not current_user.is_authenticated:
login_user(req.user)
return f(*args, **kwargs)
return decorated
return wrapper
oauth = OAuthProvider(app)
api = Api(app)
limiter = Limiter(
app,
key_func=get_ipaddr,
headers_enabled=True,
default_limits=[
"40 per second", # burst: 40/sec
"1440 per minute", # allowed max: 24/sec
])
if app.config.get("LOAD_TEST"):
limiter.enabled = False
DATABASE_URL = app.config.get("DATABASE_URL")
# TODO: implement connection factory
db_url.register_database(PgDbWithFailover, "pg+failover", "postgres+failover")
# db_url.PostgresqlDatabase = ReconnectablePostgresqlDatabase
if DATABASE_URL.startswith("sqlite"):
db = db_url.connect(DATABASE_URL, autorollback=True)
else:
db = db_url.connect(DATABASE_URL, autorollback=True, connect_timeout=3)
class JSONEncoder(_JSONEncoder):
"""date and datetime encoding into ISO format for JSON payload."""
def default(self, o):
"""Provide default endocing for date and datetime."""
if isinstance(o, datetime):
return o.isoformat(timespec="seconds")
elif isinstance(o, date):
return o.isoformat()
return super().default(o)
app.config["JSON_AS_ASCII"] = False
app.json_encoder = JSONEncoder
class UserAuthentication(Authentication):
"""Use Flask-OAuthlib authentication and application authentication."""
def authorize(self): # noqa: D102
return current_user.is_authenticated
class AppAuthentication(Authentication):
"""Use Flask-OAuthlib authentication and application authentication."""
def __init__(self, roles_required=None, app_auth=True, protected_methods=None):
"""Initialize the Authenticator for accessing DB via REST API usig OAuth2."""
super().__init__(protected_methods=protected_methods)
self.roles_required = roles_required
self.app_auth = app_auth
def authorize(self): # noqa: D102
if self.app_auth:
# Eithe user application authentication or Access Token
if current_user and current_user.is_authenticated:
if not self.roles_required or current_user.has_role(self.roles_required):
return True
return False
if not super().authorize():
return False
if hasattr(request, "oauth") and request.oauth:
return True
valid, req = oauth.verify_request(())
# verify if the token owner has any of the roles:
# if self.roles_required and not current_user.has_role(self.roles_required):
# return False
if not valid:
return False
request.oauth = req
return True
class DataRestAPI(RestAPI):
"""Customized ORM model CRUD API."""
def configure_routes(self): # noqa: D102
for url, callback in self.get_urls():
self.blueprint.route(url)(callback)
for provider in self._registry.values():
api_name = provider.get_api_name()
for url, callback in provider.get_urls():
full_url = '/%s%s' % (api_name, url)
self.blueprint.add_url_rule(
full_url,
'%s_%s' % (api_name, callback.__name__),
self.auth_wrapper(callback, provider),
methods=provider.allowed_methods,
strict_slashes=False,
)
default_auth = AppAuthentication(app_auth=True)
data_api = DataRestAPI(app, prefix="/data/api/v0.1", default_auth=default_auth, name="data_api")
admin = Admin(
app, name="NZ ORCiD Hub", template_mode="bootstrap3", base_template="admin/master.html")
SENTRY_DSN = app.config.get("SENTRY_DSN")
if SENTRY_DSN:
sentry_sdk.init(
SENTRY_DSN,
integrations=[FlaskIntegration(), RqIntegration(), RedisIntegration()],
debug=app.debug,
environment=app.config.get("ENV"),
attach_stacktrace=True,
traces_sample_rate=1.0,
with_locals=True,
send_default_pii=True)
login_manager = LoginManager()
login_manager.login_view = "index"
login_manager.login_message_category = "info"
login_manager.init_app(app)
from .queuing import __redis_available, rq # noqa: F401,E402
from . import models # noqa: F401,E402
from .apis import * # noqa: F401,F403,E402
from .data_apis import * # noqa: F401,F403,E402
from .authcontroller import * # noqa: F401,F403,E402
from .views import * # noqa: F401,F403,E402
from .oauth import * # noqa: F401,F403,E402
from .reports import * # noqa: F401,F403,E402
from .utils import process_records # noqa: E402
if app.testing:
from .mocks import mocks
app.register_blueprint(mocks)
if __redis_available:
from . import schedule # noqa: E402
schedule.setup()
@app.before_first_request
def setup_app():
"""Set-up logger to log to STDOUT (eventually conainer log), set up the DB, and some other setttings."""
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.DEBUG if app.debug else logging.WARNING)
models.create_tables()
if app.config.get("SHIBBOLETH_DISABLED") is None:
app.config["SHIBBOLETH_DISABLED"] = not (
("mod_wsgi.version" in request.environ and "SHIB_IDP_DOMAINNAME" in os.environ)
or "EXTERNAL_SP" in app.config)
@app.after_request
def apply_x_frame(response):
"""Include X-frame header in http response to protect against clickhiJacking."""
response.headers["X-Frame-Options"] = "SAMEORIGIN"
return response
@app.cli.command()
@click.option("-d", "--drop", is_flag=True, help="Drop tables before creating...")
@click.option("-f", "--force", is_flag=True, help="Enforce table creation.")
@click.option("-A", "--audit", is_flag=True, help="Create adit trail tables.")
@click.option(
"-V",
"--verbose",
is_flag=True,
help="Shows SQL statements that get sent to the server or DB.")
def initdb(create=False, drop=False, force=False, audit=True, verbose=False):
"""Initialize the database."""
if verbose:
logger = logging.getLogger("peewee")
if logger:
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
if drop and force:
models.drop_tables()
try:
models.create_tables()
except Exception:
app.logger.exception("Failed to create tables...")
if audit:
app.logger.info("Creating audit tables...")
models.create_audit_tables()
@app.cli.command("cradmin")
@click.option("-f", "--force", is_flag=True, help="Enforce creation of the super-user.")
@click.option("-V", "--verbose", is_flag=True, help="Shows SQL statements.")
@click.option("-N", "--name", help="User full name.")
@click.option("-O", "--org-name", help="Organisation name.")
@click.option("--orcid", help="User's ORCID iD (for the users authenticated via ORCID).")
@click.option("-I", "--internal-org-name", help="Internal organisation name (e.g., used by IdPs).")
@click.argument("email", nargs=1)
def create_hub_administrator(email,
name=None,
force=False,
verbose=False,
org_name=None,
orcid=None,
internal_org_name=None):
"""Create a hub administrator, an organisation and link the user to the Organisation."""
if verbose:
logger = logging.getLogger("peewee")
if logger:
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
if not models.User.table_exists() or not models.Organisation.table_exists():
models.create_tables()
super_user, created = models.User.get_or_create(email=email)
super_user.name = name or org_name or internal_org_name
super_user.confirmed = True
super_user.is_superuser = True
super_user.orcid = orcid
if not org_name and super_user.organisation:
org_name = super_user.organisation.name
org, _ = models.Organisation.get_or_create(name=org_name or "ORCID Hub")
if internal_org_name:
org.tuakiri_name = internal_org_name
org.confirmed = True
org.save()
models.UserOrg.get_or_create(user=super_user, org=org)
if not super_user.organisation or super_user.organisation != org:
super_user.organisation = org
super_user.save()
@app.cli.group()
@click.option("-v", "--verbose", is_flag=True)
def load(verbose):
"""Load data from files."""
app.verbose = verbose
@load.command()
@click.argument('input', type=click.File('r'), required=True)
def org_info(input):
"""Pre-loads organisation data."""
row_count = models.OrgInfo.load_from_csv(input)
click.echo(f"Loaded {row_count} records")
@app.cli.command()
@click.option("-n", default=20, help="Max number of rows to process.")
def process(n):
"""Process uploaded records."""
process_records(n)
if os.environ.get("ENV") == "dev0":
# This allows us to use a plain HTTP callback
os.environ['DEBUG'] = "1"
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
app.debug = True
if app.debug:
try:
from flask_debugtoolbar import DebugToolbarExtension
toolbar = DebugToolbarExtension(app)
# logger = logging.getLogger('peewee')
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
except ModuleNotFoundError:
pass
|
import numpy as np
import os
import string
import re
import tensorflow as tf
from Bio import SeqIO
from Bio.PDB.DSSP import DSSP
from Bio.PDB import PDBParser
tf.compat.v1.enable_eager_execution()
mapping = np.array([0, 4.001, 6.001, 8.001, 10.001, 12.001, 14.001, 16.001, 18.001, 20.001])
def probability(n):
try:
counts = int(n)
except:
return 0.
return 2.**(-counts/1000.)
def find_rows(filename):
with open(filename) as f:
contents=f.read()
result = re.search('LENG (.*) match', contents)
return int(result.group(1))
def find_hashtag(data):
for i,line in enumerate(data):
if line=="#\n":
return i
def parse_a3m(filename):
seqs = []
table = str.maketrans(dict.fromkeys(string.ascii_lowercase))
for line in open(filename,"r"):
if line[0] != '>' and line[0] != '#':
# remove lowercase letters and right whitespaces
seqs.append(line.rstrip().translate(table))
# convert letters into numbers
alphabet = np.array(list("ARNDCQEGHILKMFPSTWYV-"), dtype='|S1').view(np.uint8)
msa = np.array([list(s) for s in seqs], dtype='|S1').view(np.uint8)
for i in range(alphabet.shape[0]):
msa[msa == alphabet[i]] = i
# treat all unknown characters as gaps
msa[msa > 20] = 20
return msa
def fast_dca(msa1hot, weights, penalty = 4.5):
nr = tf.shape(msa1hot)[0]
nc = tf.shape(msa1hot)[1]
ns = tf.shape(msa1hot)[2]
with tf.name_scope('covariance'):
x = tf.reshape(msa1hot, (nr, nc * ns))
num_points = tf.reduce_sum(weights) - tf.sqrt(tf.reduce_mean(weights))
mean = tf.reduce_sum(x * weights[:,None], axis=0, keepdims=True) / num_points
x = (x - mean) * tf.sqrt(weights[:,None])
cov = tf.matmul(tf.transpose(x), x)/num_points
with tf.name_scope('inv_convariance'):
cov_reg = cov + tf.eye(nc * ns) * penalty / tf.sqrt(tf.reduce_sum(weights))
inv_cov = tf.linalg.inv(cov_reg)
x1 = tf.reshape(inv_cov,(nc, ns, nc, ns))
x2 = tf.transpose(x1, [0,2,1,3])
features = tf.reshape(x2, (nc, nc, ns * ns))
x3 = tf.sqrt(tf.reduce_sum(tf.square(x1[:,:-1,:,:-1]),(1,3))) * (1-tf.eye(nc))
apc = tf.reduce_sum(x3,0,keepdims=True) * tf.reduce_sum(x3,1,keepdims=True) / tf.reduce_sum(x3)
contacts = (x3 - apc) * (1-tf.eye(nc))
return tf.concat([features, contacts[:,:,None]], axis=2)
def reweight(msa1hot, cutoff):
"""reweight MSA based on cutoff"""
with tf.name_scope('reweight'):
id_min = tf.cast(tf.shape(msa1hot)[1], tf.float32) * cutoff
id_mtx = tf.tensordot(msa1hot, msa1hot, [[1,2], [1,2]])
id_mask = id_mtx > id_min
w = 1.0/tf.reduce_sum(tf.cast(id_mask, dtype=tf.float32),-1)
return w
def getDistMat(pdb_file):
parser = PDBParser(PERMISSIVE=1, QUIET=True)
structure = parser.get_structure(pdb_file, pdb_file)
residues = list(structure.get_residues())
first = residues[0].id[1]
last = residues[-1].id[1]
coords = np.empty((last - first + 1,3))
coords[:] = np.nan
for residue in residues:
try:
if residue.resname == 'GLY':
coords[residue.id[1]-first] = residue["CA"].get_coord()
else:
coords[residue.id[1]-first] = residue["CB"].get_coord()
except:
pass
X = coords[None,:,:] - coords[:,None,:]
X = X**2
X = np.sum(X,axis=2)
X = np.sqrt(X)
return X
def binDistMat(dist_mat):
bin_mat = np.empty(dist_mat.shape,dtype=np.int32)
L = dist_mat.shape[0]
for i in range(L):
for j in range(L):
if np.isnan(dist_mat[i][j]):
bin_mat[i][j] = 0
elif dist_mat[i][j] == 0:
bin_mat[i][j] = 0
else:
for pos, bound in enumerate(mapping):
if bound > dist_mat[i][j]:
break
bin_mat[i][j] = pos
return bin_mat.astype(np.int8)
def binContacts(dist_mat):
contact_mat = np.zeros(dist_mat.shape, dtype=np.int8)
L = dist_mat.shape[0]
for i in range(L):
for j in range(L):
if not np.isnan(dist_mat[i][j]):
if dist_mat[i][j] <= 8.0:
contact_mat[i][j] = 1
return contact_mat
'''
dssp tuple:
(dssp index, amino acid, secondary structure, relative ASA, phi, psi,
NH_O_1_relidx, NH_O_1_energy, O_NH_1_relidx, O_NH_1_energy,
NH_O_2_relidx, NH_O_2_energy, O_NH_2_relidx, O_NH_2_energy)
'''
def getDSSP(filename):
parser = PDBParser(PERMISSIVE=1, QUIET=True)
structure = parser.get_structure(filename,filename)
dssp = DSSP(structure[0], filename, dssp='mkdssp')
return dssp.property_list
def binDSSP(dssp, seq):
dssp = list(dssp)
# Load data without gaps
ss = np.zeros(len(seq), dtype=np.uint8)
asa = np.zeros_like(ss)
psi = np.zeros_like(ss)
phi = np.zeros_like(ss)
ss_symbols = ['H','B','E','G','I','T','S','-']
ss_mapping = {ss_symbols[i]:i+1 for i in range(8)}
for i, record in enumerate(dssp):
ss[i] = ss_mapping[record[2]]
asa[i] = 1 + int(record[3]*9.999)
if record[4] != 360.:
phi[i] = 1 + int((180 + record[4])/10.001)
if record[5] != 360.:
psi[i] = 1 + int((180 + record[5])/10.001)
return ss, asa, psi, phi
def getPSSM(filename):
with open(filename) as f:
data = f.readlines()[3:]
NUM_ROWS = len(data) - 6
NUM_COL = 20
matrix = np.zeros((NUM_ROWS,NUM_COL))
for i in range(NUM_ROWS):
matrix[i] = data[i].split()[2:22]
return matrix
def getSeq(filename):
with open(filename) as f:
return f.readlines()[1]
def getHHM(filename):
with open(filename) as f:
lines=f.readlines()
for i,line in enumerate(lines):
if line=="#\n":
lines = lines[i+5:-2]
break
NUM_COL = 30
NUM_ROW = int((len(lines)+1)/3)
profile = np.zeros((NUM_ROW, NUM_COL))
for i in range(NUM_ROW):
row = lines[3*i].split()[2:-1] + lines[3*i+1].split()
for j in range(NUM_COL):
if row[j] != '*':
profile[i,j] = 2.**(-float(row[j])/1000.)
return profile
class Sequence(object):
def __init__(self, a3m_file, include_labels=False, subsample_hmm_percent=1.0, **kwargs):
self.a3m_file = a3m_file
self.name = os.path.basename(a3m_file.split('.a3m')[0])
self.subsample_hmm_percent = subsample_hmm_percent
self.include_labels = include_labels
def build(self, pdbfile=None):
self.subsample_a3m()
self.get_seq()
self.make_hhm()
self.fast_dca()
os.system('rm '+self.hhm_file)
if self.include_labels:
self.get_label(pdbfile)
def subsample_a3m(self):
if self.subsample_hmm_percent < 1.0:
subsample_a3m = os.path.join(os.path.dirname(self.a3m_file), self.name + '_subsample.a3m')
with open(self.a3m_file) as f:
lns = f.readlines()
n_msas = (len(lns) // 2) - 1 # 1st line is the sequence itself
selected_indices = np.random.choice(n_msas, size=int(n_msas*self.subsample_hmm_percent), replace=False)
with open(subsample_a3m, 'w') as out_file:
# Write the sequence
out_file.write(''.join(lns[:2]))
for i in selected_indices:
out_file.write(''.join(lns[2*i:2*i+2]))
self.a3m_file = subsample_a3m
def get_seq(self):
with open(self.a3m_file) as f:
lns = f.readlines()
#might not always be the second line in the file
seq = ''
l = 0
while seq == '' and l < len(lns):
if lns[l][0] == '>':
seq = lns[l+1].strip('\n')
break
else:
l += 1
if seq == '':
print('ERROR! Unable to derive sequence from input a3m file')
return
self.seq = seq
def make_hhm(self):
#create hhm
self.hhm_file = 'temp.hhm'
os.system('hhmake -i '+self.a3m_file+' -o '+self.hhm_file+' -v 0')
try:
with open(self.hhm_file) as f:
data = f.readlines()
except:
print('ERROR! Unable to process hhm converted from a3m')
return
NUM_COL = 30
NUM_ROW = find_rows(self.hhm_file)
pssm = np.zeros((NUM_ROW, NUM_COL))
line_counter = 0
start = find_hashtag(data)+5
for x in range (0, NUM_ROW * 3):
if x % 3 == 0:
line = data[x + start].split()[2:-1]
for i, element in enumerate(line):
prop = probability(element)
pssm[line_counter,i] = prop
elif x % 3 == 1:
line = data[x+start].split()
for i, element in enumerate(line):
prop = probability(element)
pssm[line_counter, i+20] = prop
line_counter += 1
self.hhm = pssm
def fast_dca(self):
ns = 21
wmin = 0.8
a3m = parse_a3m(self.a3m_file)
ncol = a3m.shape[1]
nrow = tf.Variable(a3m.shape[0])
msa = tf.Variable(a3m)
msa1hot = tf.one_hot(msa, ns, dtype=tf.float32)
w = reweight(msa1hot, wmin)
f2d_dca = tf.cond(nrow>1, lambda: fast_dca(msa1hot, w), lambda: tf.zeros([ncol,ncol,442], tf.float32))
f2d_dca = tf.expand_dims(f2d_dca, axis=0).numpy()
dimensions = f2d_dca.shape
f2d_dca = f2d_dca.reshape(dimensions[1],dimensions[2],dimensions[3])
self.dca = f2d_dca.astype('float16')
def get_label(self, pdbfile, out_dir='./examples/'):
id = os.path.basename(os.path.splitext(pdbfile)[0])
# BASE = os.path.join(out_dir, id)
data = dict()
# PSSM = BASE + '.pssm'
# HHM = BASE + '.hhm'
# FASTA= BASE + '.fasta'
# MAT = BASE + '.mat'
# PKL = BASE + '.pkl'
# data['seq'] = getSeq(FASTA)
# data['PSSM'] = getPSSM(PSSM)
# data['HHM'] = getHHM(HHM)
# potts = scipy.io.loadmat(MAT)
# data['J'] = potts['J'].astype(np.float16)
# data['h'] = potts['h'].astype(np.float16)
# data['frobenius_norm'] = potts['frobenius_norm'].astype(np.float16)
# data['corrected_norm'] = potts['corrected_norm'].astype(np.float16)
with open(pdbfile) as handle:
sequence = next(SeqIO.parse(handle, "pdb-atom"))
seq = str(sequence.seq)
dssp = getDSSP(pdbfile)
data['DSSP'] = dssp
ss, asa, psi, phi = binDSSP(dssp, seq)
data['ss'] = ss
data['asa'] = asa
data['phi'] = phi
data['psi'] = psi
dist_mat = getDistMat(pdbfile)
data['dist_mat'] = dist_mat
data['bin_mat'] = binDistMat(dist_mat)
data['contact_mat'] = binContacts(dist_mat)
self.label_data = data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''oldpf.py - <NAME> (<EMAIL>) - Jan 2017
This contains deprecated and incomplete period-finding tools from periodbase.py:
- dworetsky period finder
- scipy LSP
- townsend LSP
Kept around just in case.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
from multiprocessing import Pool, cpu_count
import numpy as np
# import these to avoid lookup overhead
from numpy import nan as npnan, sum as npsum, abs as npabs, \
roll as nproll, isfinite as npisfinite, std as npstd, \
sign as npsign, sqrt as npsqrt, median as npmedian, \
array as nparray, percentile as nppercentile, \
polyfit as nppolyfit, var as npvar, max as npmax, min as npmin, \
log10 as nplog10, arange as nparange, pi as MPI, floor as npfloor, \
argsort as npargsort, cos as npcos, sin as npsin, tan as nptan, \
where as npwhere, linspace as nplinspace, \
zeros_like as npzeros_like, full_like as npfull_like, \
arctan as nparctan, nanargmax as npnanargmax, nanargmin as npnanargmin, \
empty as npempty, ceil as npceil, mean as npmean, \
digitize as npdigitize, unique as npunique, \
argmax as npargmax, argmin as npargmin
from scipy.signal import lombscargle, find_peaks_cwt
###################
## LOCAL IMPORTS ##
###################
from ..lcmath import phase_magseries, sigclip_magseries, time_bin_magseries, \
phase_bin_magseries
############
## CONFIG ##
############
NCPUS = cpu_count()
#######################
## UTILITY FUNCTIONS ##
#######################
def get_frequency_grid(times,
samplesperpeak=5,
nyquistfactor=5,
minfreq=None,
maxfreq=None,
returnf0dfnf=False):
'''This calculates a frequency grid for the period finding functions in this
module.
Based on the autofrequency function in astropy.stats.lombscargle.
http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency
'''
baseline = times.max() - times.min()
nsamples = times.size
df = 1. / baseline / samplesperpeak
if minfreq is not None:
f0 = minfreq
else:
f0 = 0.5 * df
if maxfreq is not None:
Nf = int(npceil((maxfreq - f0) / df))
else:
Nf = int(0.5 * samplesperpeak * nyquistfactor * nsamples)
if returnf0dfnf:
return f0, df, Nf, f0 + df * nparange(Nf)
else:
return f0 + df * nparange(Nf)
###############################################
## DWORETSKY STRING LENGTH (Dworetsky+ 1983) ##
## (don't use this -- it's very slow) ##
###############################################
def dworetsky_period_find(time,
mag,
err,
init_p,
end_p,
f_step,
verbose=False):
'''
This is the super-slow naive version taken from my thesis work.
Uses the string length method in Dworetsky 1983 to calculate the period of a
time-series of magnitude measurements and associated magnitude
errors. Searches in linear frequency space (which obviously doesn't
correspond to a linear period space).
PARAMETERS:
time: series of times at which mags were measured (usually some form of JD)
mag: timeseries of magnitudes (np.array)
err: associated errs per magnitude measurement (np.array)
init_p, end_p: interval to search for periods between (both ends inclusive)
f_step: step in frequency [days^-1] to use
RETURNS:
tuple of the following form:
(periods (np.array),
string_lengths (np.array),
good_period_mask (boolean array))
'''
mod_mag = (mag - npmin(mag))/(2.0*(npmax(mag) - npmin(mag))) - 0.25
fold_time = npmin(time) # fold at the first time element
init_f = 1.0/end_p
end_f = 1.0/init_p
n_freqs = npceil((end_f - init_f)/f_step)
if verbose:
print('searching %s frequencies between %s and %s days^-1...' %
(n_freqs,init_f,end_f))
out_periods = npempty(n_freqs,dtype=np.float64)
out_strlens = npempty(n_freqs,dtype=np.float64)
p_goodflags = npempty(n_freqs,dtype=bool)
j_range = len(mag)-1
for i in range(int(n_freqs)):
period = 1.0/init_f
# print('P: %s, f: %s, i: %s, n_freqs: %s, maxf: %s' %
# (period, init_f, i, n_freqs, end_f))
phase = (time - fold_time)/period - npfloor((time - fold_time)/period)
phase_sort_ind = npargsort(phase)
phase_sorted = phase[phase_sort_ind]
mod_mag_sorted = mod_mag[phase_sort_ind]
strlen = 0.0
epsilon = 2.0 * npmean(err)
delta_l = 0.34 * (epsilon - 0.5*(epsilon**2)) * (len(time) -
npsqrt(10.0/epsilon))
keep_threshold_1 = 1.6 + 1.2*delta_l
l = 0.212*len(time)
sig_l = len(time)/37.5
keep_threshold_2 = l + 4.0*sig_l
# now calculate the string length
for j in range(j_range):
strlen += npsqrt( (mod_mag_sorted[j+1] - mod_mag_sorted[j])**2 +
(phase_sorted[j+1] - phase_sorted[j])**2 )
strlen += npsqrt( (mod_mag_sorted[0] - mod_mag_sorted[-1])**2 +
(phase_sorted[0] - phase_sorted[-1] + 1)**2 )
if ((strlen < keep_threshold_1) or (strlen < keep_threshold_2)):
p_goodflags[i] = True
out_periods[i] = period
out_strlens[i] = strlen
init_f += f_step
return (out_periods,out_strlens,p_goodflags)
def pwd_phasebin(phases, mags, binsize=0.002, minbin=9):
'''
This bins the phased mag series using the given binsize.
'''
bins = np.arange(0.0, 1.0, binsize)
binnedphaseinds = npdigitize(phases, bins)
binnedphases, binnedmags = [], []
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_phases = phases[thisbin_inds]
thisbin_mags = mags[thisbin_inds]
if thisbin_inds.size > minbin:
binnedphases.append(npmedian(thisbin_phases))
binnedmags.append(npmedian(thisbin_mags))
return np.array(binnedphases), np.array(binnedmags)
def pdw_worker(task):
'''
This is the parallel worker for the function below.
task[0] = frequency for this worker
task[1] = times array
task[2] = mags array
task[3] = fold_time
task[4] = j_range
task[5] = keep_threshold_1
task[6] = keep_threshold_2
task[7] = phasebinsize
we don't need errs for the worker.
'''
frequency = task[0]
times, modmags = task[1], task[2]
fold_time = task[3]
j_range = range(task[4])
keep_threshold_1 = task[5]
keep_threshold_2 = task[6]
phasebinsize = task[7]
try:
period = 1.0/frequency
# use the common phaser to phase and sort the mag
phased = phase_magseries(times,
modmags,
period,
fold_time,
wrap=False,
sort=True)
# bin in phase if requested, this turns this into a sort of PDM method
if phasebinsize is not None and phasebinsize > 0:
bphased = pwd_phasebin(phased['phase'],
phased['mags'],
binsize=phasebinsize)
phase_sorted = bphased[0]
mod_mag_sorted = bphased[1]
j_range = range(len(mod_mag_sorted) - 1)
else:
phase_sorted = phased['phase']
mod_mag_sorted = phased['mags']
# now calculate the string length
rolledmags = nproll(mod_mag_sorted,1)
rolledphases = nproll(phase_sorted,1)
strings = (
(rolledmags - mod_mag_sorted)*(rolledmags - mod_mag_sorted) +
(rolledphases - phase_sorted)*(rolledphases - phase_sorted)
)
strings[0] = (
((mod_mag_sorted[0] - mod_mag_sorted[-1]) *
(mod_mag_sorted[0] - mod_mag_sorted[-1])) +
((phase_sorted[0] - phase_sorted[-1] + 1) *
(phase_sorted[0] - phase_sorted[-1] + 1))
)
strlen = npsum(npsqrt(strings))
if (keep_threshold_1 < strlen < keep_threshold_2):
p_goodflag = True
else:
p_goodflag = False
return (period, strlen, p_goodflag)
except Exception as e:
LOGEXCEPTION('error in DWP')
return(period, npnan, False)
def pdw_period_find(times,
mags,
errs,
autofreq=True,
init_p=None,
end_p=None,
f_step=1.0e-4,
phasebinsize=None,
sigclip=10.0,
nworkers=None,
verbose=False):
'''This is the parallel version of the function above.
Uses the string length method in Dworetsky 1983 to calculate the period of a
time-series of magnitude measurements and associated magnitude errors. This
can optionally bin in phase to try to speed up the calculation.
PARAMETERS:
time: series of times at which mags were measured (usually some form of JD)
mag: timeseries of magnitudes (np.array)
err: associated errs per magnitude measurement (np.array)
init_p, end_p: interval to search for periods between (both ends inclusive)
f_step: step in frequency [days^-1] to use
RETURNS:
tuple of the following form:
(periods (np.array),
string_lengths (np.array),
good_period_mask (boolean array))
'''
# remove nans
find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)
ftimes, fmags, ferrs = times[find], mags[find], errs[find]
mod_mags = (fmags - npmin(fmags))/(2.0*(npmax(fmags) - npmin(fmags))) - 0.25
if len(ftimes) > 9 and len(fmags) > 9 and len(ferrs) > 9:
# get the median and stdev = 1.483 x MAD
median_mag = np.median(fmags)
stddev_mag = (np.median(np.abs(fmags - median_mag))) * 1.483
# sigclip next
if sigclip:
sigind = (np.abs(fmags - median_mag)) < (sigclip * stddev_mag)
stimes = ftimes[sigind]
smags = fmags[sigind]
serrs = ferrs[sigind]
LOGINFO('sigclip = %s: before = %s observations, '
'after = %s observations' %
(sigclip, len(times), len(stimes)))
else:
stimes = ftimes
smags = fmags
serrs = ferrs
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the frequencies to use
if init_p:
endf = 1.0/init_p
else:
# default start period is 0.1 day
endf = 1.0/0.1
if end_p:
startf = 1.0/end_p
else:
# default end period is length of time series
startf = 1.0/(stimes.max() - stimes.min())
# if we're not using autofreq, then use the provided frequencies
if not autofreq:
frequencies = np.arange(startf, endf, stepsize)
LOGINFO(
'using %s frequency points, start P = %.3f, end P = %.3f' %
(frequencies.size, 1.0/endf, 1.0/startf)
)
else:
# this gets an automatic grid of frequencies to use
frequencies = get_frequency_grid(stimes,
minfreq=startf,
maxfreq=endf)
LOGINFO(
'using autofreq with %s frequency points, '
'start P = %.3f, end P = %.3f' %
(frequencies.size,
1.0/frequencies.max(),
1.0/frequencies.min())
)
# set up some internal stuff
fold_time = npmin(ftimes) # fold at the first time element
j_range = len(fmags)-1
epsilon = 2.0 * npmean(ferrs)
delta_l = 0.34 * (epsilon - 0.5*(epsilon**2)) * (len(ftimes) -
npsqrt(10.0/epsilon))
keep_threshold_1 = 1.6 + 1.2*delta_l
l = 0.212*len(ftimes)
sig_l = len(ftimes)/37.5
keep_threshold_2 = l + 4.0*sig_l
# generate the tasks
tasks = [(x,
ftimes,
mod_mags,
fold_time,
j_range,
keep_threshold_1,
keep_threshold_2,
phasebinsize) for x in frequencies]
# fire up the pool and farm out the tasks
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
strlen_results = pool.map(pdw_worker, tasks)
pool.close()
pool.join()
del pool
periods, strlens, goodflags = zip(*strlen_results)
periods, strlens, goodflags = (np.array(periods),
np.array(strlens),
np.array(goodflags))
strlensort = npargsort(strlens)
nbeststrlens = strlens[strlensort[:5]]
nbestperiods = periods[strlensort[:5]]
nbestflags = goodflags[strlensort[:5]]
bestperiod = nbestperiods[0]
beststrlen = nbeststrlens[0]
bestflag = nbestflags[0]
return {'bestperiod':bestperiod,
'beststrlen':beststrlen,
'bestflag':bestflag,
'nbeststrlens':nbeststrlens,
'nbestperiods':nbestperiods,
'nbestflags':nbestflags,
'strlens':strlens,
'periods':periods,
'goodflags':goodflags}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'beststrlen':npnan,
'bestflag':npnan,
'nbeststrlens':None,
'nbestperiods':None,
'nbestflags':None,
'strlens':None,
'periods':None,
'goodflags':None}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'beststrlen':npnan,
'bestflag':npnan,
'nbeststrlens':None,
'nbestperiods':None,
'nbestflags':None,
'strlens':None,
'periods':None,
'goodflags':None}
##################################
## TOWNSEND LSP (Townsend 2010) ##
## don't use this! - incomplete ##
##################################
def townsend_lombscargle_value(times, mags, omega):
'''
This calculates the periodogram value for each omega (= 2*pi*f). Mags must
be normalized to zero with variance scaled to unity.
'''
cos_omegat = npcos(omega*times)
sin_omegat = npsin(omega*times)
xc = npsum(mags*cos_omegat)
xs = npsum(mags*sin_omegat)
cc = npsum(cos_omegat*cos_omegat)
ss = npsum(sin_omegat*sin_omegat)
cs = npsum(cos_omegat*sin_omegat)
tau = nparctan(2*cs/(cc - ss))/(2*omega)
ctau = npcos(omega*tau)
stau = npsin(omega*tau)
leftsumtop = (ctau*xc + stau*xs)*(ctau*xc + stau*xs)
leftsumbot = ctau*ctau*cc + 2.0*ctau*stau*cs + stau*stau*ss
leftsum = leftsumtop/leftsumbot
rightsumtop = (ctau*xs - stau*xc)*(ctau*xs - stau*xc)
rightsumbot = ctau*ctau*ss - 2.0*ctau*stau*cs + stau*stau*cc
rightsum = rightsumtop/rightsumbot
pval = 0.5*(leftsum + rightsum)
return pval
def townsend_lombscargle_wrapper(task):
'''
This wraps the function above for use with mp.Pool.
task[0] = times
task[1] = mags
task[2] = omega
'''
try:
return townsend_lombscargle_value(*task)
# if the LSP calculation fails for this omega, return a npnan
except Exception as e:
return npnan
def parallel_townsend_lsp(times, mags, startp, endp,
stepsize=1.0e-4,
nworkers=4):
'''
This calculates the Lomb-Scargle periodogram for the frequencies
corresponding to the period interval (startp, endp) using a frequency step
size of stepsize cycles/day. This uses the algorithm in Townsend 2010.
'''
# make sure there are no nans anywhere
finiteind = np.isfinite(times) & np.isfinite(mags)
ftimes, fmags = times[finiteind], mags[finiteind]
# renormalize the mags to zero and scale them so that the variance = 1
nmags = (fmags - np.median(fmags))/np.std(fmags)
startf = 1.0/endp
endf = 1.0/startp
omegas = 2*np.pi*np.arange(startf, endf, stepsize)
# parallel map the lsp calculations
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
tasks = [(ftimes, nmags, x) for x in omegas]
lsp = pool.map(townsend_lombscargle_wrapper, tasks)
pool.close()
pool.join()
return np.array(omegas), np.array(lsp)
def parallel_townsend_lsp_sharedarray(times, mags, startp, endp,
stepsize=1.0e-4,
nworkers=16):
'''
This is a version of the above which uses shared ctypes arrays for the times
and mags arrays so as not to copy them to each worker process.
TODO: we'll need to pass a single argument to the worker so make a 2D array
and wrap the worker function with partial?
FIXME: implement this later.
'''
############################################################
## SCIPY LOMB-SCARGLE (basically Townsend 2010 in Cython) ##
## don't use this either - not fully implemented! ##
############################################################
def parallel_scipylsp_worker(task):
'''
This is a worker to wrap the scipy lombscargle function.
'''
try:
return lombscargle(*task)
except Exception as e:
return npnan
def scipylsp_parallel(times,
mags,
errs, # ignored but for consistent API
startp,
endp,
nbestpeaks=5,
periodepsilon=0.1, # 0.1
stepsize=1.0e-4,
nworkers=4,
sigclip=None,
timebin=None):
'''
This uses the LSP function from the scipy library, which is fast as hell. We
try to make it faster by running LSP for sections of the omegas array in
parallel.
'''
# make sure there are no nans anywhere
finiteind = np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]
if len(ftimes) > 0 and len(fmags) > 0:
# sigclip the lightcurve if asked to do so
if sigclip:
worktimes, workmags, _ = sigclip_magseries(ftimes,
fmags,
ferrs,
sigclip=sigclip)
LOGINFO('ndet after sigclipping = %s' % len(worktimes))
else:
worktimes = ftimes
workmags = fmags
# bin the lightcurve if asked to do so
if timebin:
binned = time_bin_magseries(worktimes, workmags, binsize=timebin)
worktimes = binned['binnedtimes']
workmags = binned['binnedmags']
# renormalize the working mags to zero and scale them so that the
# variance = 1 for use with our LSP functions
normmags = (workmags - np.median(workmags))/np.std(workmags)
startf = 1.0/endp
endf = 1.0/startp
omegas = 2*np.pi*np.arange(startf, endf, stepsize)
# partition the omegas array by nworkers
tasks = []
chunksize = int(float(len(omegas))/nworkers) + 1
tasks = [omegas[x*chunksize:x*chunksize+chunksize]
for x in range(nworkers)]
# map to parallel workers
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
tasks = [(worktimes, normmags, x) for x in tasks]
lsp = pool.map(parallel_scipylsp_worker, tasks)
pool.close()
pool.join()
lsp = np.concatenate(lsp)
periods = 2.0*np.pi/omegas
# find the nbestpeaks for the periodogram: 1. sort the lsp array by
# highest value first 2. go down the values until we find five values
# that are separated by at least periodepsilon in period
# make sure we only get finite lsp values
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
bestperiodind = npargmax(finlsp)
sortedlspind = np.argsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
prevbestlspval = sortedlspvals[0]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last period
# and from all the other existing best periods by periodepsilon to
# make sure we jump to an entire different peak in the periodogram
if (perioddiff > periodepsilon and
all(x > periodepsilon for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
return {'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'omegas':omegas,
'periods':periods,
'method':'sls'}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'sls'}
|
<filename>infra/ci/worker/run_job.py
#!/usr/bin/env python3
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Runs the given job in an isolated docker container.
Also streams stdout/err onto the firebase realtime DB.
'''
import fcntl
import logging
import json
import os
import queue
import signal
import socket
import shutil
import subprocess
import sys
import threading
import time
from datetime import datetime, timedelta
from oauth2client.client import GoogleCredentials
from config import DB, SANDBOX_IMG
from common_utils import init_logging, req, ConcurrentModificationError, SCOPES
CUR_DIR = os.path.dirname(__file__)
SCOPES.append('https://www.googleapis.com/auth/firebase.database')
SCOPES.append('https://www.googleapis.com/auth/userinfo.email')
def read_nonblock(fd):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
res = ''
while True:
try:
buf = os.read(fd.fileno(), 1024)
if not buf:
break
res += buf.decode()
except OSError:
break
return res
def log_thread(job_id, queue):
''' Uploads stdout/stderr from the queue to the firebase DB.
Each line is logged as an invidivual entry in the DB, as follows:
MMMMMM-NNNN log line, where M: hex-encodeed timestamp, N: monotonic counter.
'''
uri = '%s/logs/%s.json' % (DB, job_id)
req('DELETE', uri)
while True:
batch = queue.get()
if batch is None:
break # EOF
req('PATCH', uri, body=batch)
logging.debug('Uploader thread terminated')
def main(argv):
init_logging()
if len(argv) != 2:
print('Usage: %s job_id' % argv[0])
return 1
job_id = argv[1]
res = 42
# The container name will be worker-N-sandbox.
container = socket.gethostname() + '-sandbox'
# Remove stale jobs, if any.
subprocess.call(['sudo', 'docker', 'rm', '-f', container])
q = queue.Queue()
# Conversely to real programs, signal handlers in python aren't really async
# but are queued on the main thread. Hence We need to keep the main thread
# responsive to react to signals. This is to handle timeouts and graceful
# termination of the worker container, which dispatches a SIGTERM on stop.
def sig_handler(sig, _):
logging.warning('Job runner got signal %s, terminating job %s', sig, job_id)
subprocess.call(['sudo', 'docker', 'kill', container])
os._exit(1) # sys.exit throws a SystemExit exception, _exit really exits.
signal.signal(signal.SIGTERM, sig_handler)
log_thd = threading.Thread(target=log_thread, args=(job_id, q))
log_thd.start()
# SYS_PTRACE is required for gtest death tests and LSan.
cmd = [
'sudo', 'docker', 'run', '--name', container, '--hostname', container,
'--cap-add', 'SYS_PTRACE', '--rm', '--tmpfs', '/ci/ramdisk:exec',
'--tmpfs', '/tmp:exec', '--env',
'PERFETTO_TEST_JOB=%s' % job_id
]
# Propagate environment variables coming from the job config.
for kv in [kv for kv in os.environ.items() if kv[0].startswith('PERFETTO_')]:
cmd += ['--env', '%s=%s' % kv]
# Rationale for the conditional branches below: when running in the real GCE
# environment, the gce-startup-script.sh mounts these directories in the right
# locations, so that they are shared between all workers.
# When running the worker container outside of GCE (i.e.for local testing) we
# leave these empty. The VOLUME directive in the dockerfile will cause docker
# to automatically mount a scratch volume for those.
# This is so that the CI containers can be tested without having to do the
# work that gce-startup-script.sh does.
if os.getenv('SHARED_WORKER_CACHE'):
cmd += ['--volume=%s:/ci/cache' % os.getenv('SHARED_WORKER_CACHE')]
artifacts_dir = None
if os.getenv('ARTIFACTS_DIR'):
artifacts_dir = os.path.join(os.getenv('ARTIFACTS_DIR'), job_id)
subprocess.call(['sudo', 'rm', '-rf', artifacts_dir])
os.mkdir(artifacts_dir)
cmd += ['--volume=%s:/ci/artifacts' % artifacts_dir]
cmd += os.getenv('SANDBOX_NETWORK_ARGS', '').split()
cmd += [SANDBOX_IMG]
logging.info('Starting %s', ' '.join(cmd))
proc = subprocess.Popen(
cmd,
stdin=open(os.devnull),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=65536)
stdout = ''
tstart = time.time()
while True:
ms_elapsed = int((time.time() - tstart) * 1000)
stdout += read_nonblock(proc.stdout)
# stdout/err pipes are not atomic w.r.t. '\n'. Extract whole lines out into
# |olines| and keep the last partial line (-1) in the |stdout| buffer.
lines = stdout.split('\n')
stdout = lines[-1]
lines = lines[:-1]
# Each line has a key of the form <time-from-start><out|err><counter>
# |counter| is relative to the batch and is only used to disambiguate lines
# fetched at the same time, preserving the ordering.
batch = {}
for counter, line in enumerate(lines):
batch['%06x-%04x' % (ms_elapsed, counter)] = line
if batch:
q.put(batch)
if proc.poll() is not None:
res = proc.returncode
logging.info('Job subprocess terminated with code %s', res)
break
# Large sleeps favour batching in the log uploader.
# Small sleeps favour responsiveness of the signal handler.
time.sleep(1)
q.put(None) # EOF maker
log_thd.join()
if artifacts_dir:
artifacts_uploader = os.path.join(CUR_DIR, 'artifacts_uploader.py')
cmd = ['setsid', artifacts_uploader, '--job-id=%s' % job_id, '--rm']
subprocess.call(cmd)
return res
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<filename>app.py
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
from get_jav_girls import *
from return_one_question import *
from database_king import *
import os
import json
import random
app = Flask(__name__)
line_bot_api = LineBotApi('') #Your Channel Access Token
handler = WebhookHandler('') #Your Channel Secret
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
text = event.message.text #message from user
# user_dict = json.loads(str(event.source))
userId = json.loads(str(event.source))['userId']
status = userStatus(userId)
sexual = userSexual(userId)
print(text)
print(sexual)
# localtime = time.asctime( time.localtime(time.time()) )
# group_id = str(event.source)[13:46]
# myCollection.insert_many([{"group_id":group_id,"text":text,"time":localtime}])
if status == "$start":
if len(text) <= 10:
set_userName(userId, text)
text_message = buttons_template = FlexSendMessage(
alt_text='開始挑戰',
contents= BubbleContainer(
body = BoxComponent(
layout = "vertical",
contents = [TextComponent(text = "歡迎!" + text + '準備好挑戰了嗎?', wrap = True)]
),
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "我要改名", text = "我要改名")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "開始挑戰", text = "答題"))]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
else:
text_message = TextSendMessage(text= "暱稱太長喔,請重新輸入")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif status.isalpha() and judge_pure_english(text):
if ord(status) == 65313:
status = 'A'
elif ord(status) == 65314:
status = 'B'
elif ord(status) == 65315:
status == 'C'
if status.upper() == text:
after_answer(userId, 1)
if sexual == 0 or sexual == 1:
hero_component = ImageComponent(
url = read_src_kanna_jpg(),
size = "full")
elif sexual == 2:
hero_component = ImageComponent(
url = read_src_ogisome_jpg(),
size = "full")
elif sexual == 3:
hero_component = ImageComponent(
url = read_src_cute_jpg(),
size = "full")
elif sexual == 4:
hero_component = ImageComponent(
url = read_src_takagi_jpg(),
size = "full")
text_message = FlexSendMessage(
alt_text='恭喜答對!',
contents= BubbleContainer(
header = BoxComponent(
layout = "vertical",
contents = [TextComponent(text = "恭喜答對,很聰明呢!", wrap = True)]
),
hero = hero_component,
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "目前成績", text = "目前成績")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "繼續挑戰", text = "答題"))]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
else:
after_answer(userId, 0)
if sexual == 0 or sexual == 1:
hero_component = ImageComponent(
url = read_src_pola_jpg(),
size = "full")
elif sexual == 2:
hero_component = ImageComponent(
url = read_src_stickers('mutom'),
size = "full")
elif sexual == 3:
hero_component = ImageComponent(
url = read_src_beastears_jpg(),
size = "full")
elif sexual == 4:
hero_component = ImageComponent(
url = read_src_kizuna_jpg(),
size = "full")
text_message = FlexSendMessage(
alt_text='可惜答錯!',
contents= BubbleContainer(
header = BoxComponent(
layout = "vertical",
contents = [TextComponent(text = "可惜答錯QQ,再努力吧!\n答案是:" + status, wrap = True)]
),
hero = hero_component,
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "目前成績", text = "目前成績")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "繼續挑戰", text = "答題"))]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
if '母湯' in text:
img_src = read_src_mutom_jpg()
line_bot_api.reply_message(
event.reply_token,
ImageSendMessage(original_content_url= img_src,
preview_image_url= img_src))
return 0
elif "我要改名" == text:
re_userName(userId)
text_message = TextSendMessage(text= "請輸入您的新暱稱(限制10字元以內)")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "我要換圖" == text:
text_message = FlexSendMessage(
alt_text='更換圖片',
contents= BubbleContainer(
body = BoxComponent(
layout = "vertical",
contents = [TextComponent(text = "請問您想更換成什麼類型的圖片?", wrap = True)]
),
footer = BoxComponent(
layout = "horizontal",
contents = [BoxComponent(
layout = "vertical",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "正妹", text = "更換圖片-正妹")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "帥哥", text = "更換圖片-帥哥"))]
),
SeparatorComponent(margin = 'xl'),
BoxComponent(
layout = "vertical",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "可愛動物", text = "更換圖片-可愛動物")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "二次元", text = "更換圖片-二次元"))]
)]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "更換圖片-" == text[:5]:
if text[5:] == "正妹":
set_userSexual(userId, 1)
elif text[5:] == "帥哥":
set_userSexual(userId, 2)
elif text[5:] == "可愛動物":
set_userSexual(userId, 3)
elif text[5:] == "二次元":
set_userSexual(userId, 4)
else:
return 0
text_message = TextSendMessage(text= "已成功更換圖片類型")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "開始挑戰" == text:
exist_or_not = initSeting(userId)
if exist_or_not == False:
text_message = TextSendMessage(text= "歡迎挑戰台大知識王,請輸入您的暱稱(限制10字元以內)")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
else:
text_message = TextSendMessage(text= "您已開始進行挑戰囉!")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "測試" == text:
question, choice_1, choice_2, choice_3, answer = return_one()
text_message = TextSendMessage(text="請問" + question + '\n' + '(A)' + choice_1 + '\n' + '(B)' + choice_2 + '\n' + '(C)' + choice_3)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "答題" == text and status.isalpha():
text_message = TextSendMessage(text="目前正在答題中,請先答完!")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "答題" == text and (not status.isalpha()):
index, index2 = index_index2(userId)
if index == "wrong":
text_message = TextSendMessage(text="恭喜你,全部答完囉!")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
question, choice_1, choice_2, choice_3, answer = return_one(index, index2)
change_status(userId, answer)
if sexual == 0 or sexual == 1:
hero_component = ImageComponent(
url = read_src_liyin_jpg(),
size = "full")
elif sexual == 2:
hero_component = ImageComponent(
url = read_src_lee_jpg(),
size = "full")
elif sexual == 3:
hero_component = ImageComponent(
url = read_src_stickers('penguins'),
size = "full")
elif sexual == 4:
hero_component = ImageComponent(
url = read_src_anime_boys_jpg(),
size = "full")
buttons_template = FlexSendMessage(
alt_text='一個問題',
contents= BubbleContainer(
hero = hero_component,
body = BoxComponent(
layout = "vertical",
contents = [TextComponent(text = "請問" + question, wrap = True),
TextComponent(text = '(A)' + choice_1, wrap = True),
TextComponent(text = '(B)' + choice_2, wrap = True),
TextComponent(text = '(C)' + choice_3, wrap = True),]
),
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "A", text = "A")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "B", text = "B")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "C", text = "C"))]
)
)
)
line_bot_api.reply_message(event.reply_token, buttons_template)
return 0
elif "目前成績" == text:
userPoint, userWrong, userRate = user_point(userId)
if userPoint == "wrong":
text_message = TextSendMessage(text= "您尚未進行挑戰")
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
else:
text_message = FlexSendMessage(
alt_text='目前成績',
contents= BubbleContainer(
body = BoxComponent(
layout = "vertical",
contents = [TextComponent(text= "您目前答對" + str(userPoint) + '題\n答錯' + str(userWrong) + '題\n正確率為' + str(userRate/100) + '%', wrap = True)]
),
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "排行榜", text = "排行榜")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "繼續挑戰", text = "答題"))]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "排行榜" == text:
text_message = FlexSendMessage(
alt_text='答題風雲榜',
contents= BubbleContainer(
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "答題風雲榜", text = "答題風雲榜")),
SeparatorComponent(margin = 'xl'),
ButtonComponent(style = "primary", action = MessageAction(label = "正確率風雲榜", text = "正確率風雲榜"))]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "答題風雲榜" == text:
max_list, max_list_point = find_max_point()
userPoint, userWrong, userRate = user_point(userId)
userName = user_name(userId)
if userPoint == "wrong":
return 0
userPosition = max_list_point.index(userPoint)
if len(max_list) > 10:
max_list = max_list[:10]
max_list_point = max_list_point[:10]
body_contents = []
for i in range(len(max_list)):
text_one = TextComponent(text = str(i+1) + '.' + str(max_list[i]) + " 答對題數:" + str(max_list_point[i]), wrap = True)
body_contents.append(text_one)
text_user = TextComponent(text = '\n' + '您的成績:\n' + str(userPosition+1) + '.' + userName + " 答對題數:" + str(userPoint), wrap = True)
body_contents.append(text_user)
if sexual == 0 or sexual == 1:
hero_component = ImageComponent(
url = read_src_gaki_jpg(),
size = "full")
elif sexual == 2:
hero_component = ImageComponent(
url = read_src_fengden_jpg(),
size = "full")
elif sexual == 3:
hero_component = ImageComponent(
url = read_src_cats_jpg(),
size = "full")
elif sexual == 4:
hero_component = ImageComponent(
url = read_src_anime_jpg(),
size = "full")
text_message = FlexSendMessage(
alt_text='答題風雲榜',
contents= BubbleContainer(
header = BoxComponent(
layout = "vertical",
contents = [TextComponent(text = "答題風雲榜", size = "3xl", align = "center", wrap = True)]
),
hero = hero_component,
body = BoxComponent(
layout = "vertical",
contents = body_contents
),
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "繼續挑戰", text = "答題"))]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif "正確率風雲榜" == text:
max_list, max_list_rate = find_max_rate()
userPoint, userWrong, userRate = user_point(userId)
userName = user_name(userId)
if userRate == 0:
return 0
if userPoint >= 30:
userPosition = max_list_rate.index(userRate)
else:
userPosition = "None"
if len(max_list) > 10:
max_list = max_list[:10]
max_list_rate = max_list_rate[:10]
body_contents = []
for i in range(len(max_list)):
text_one = TextComponent(text = str(i+1) + '.' + str(max_list[i]) + " 正確率:" + str(max_list_rate[i]/100) + '%', wrap = True)
body_contents.append(text_one)
if userPosition != "None":
text_user = TextComponent(text = '\n' + '您的成績:\n' + str(userPosition+1) + '.' + userName + " 正確率:" + str(userRate/100) + '%', wrap = True)
else:
text_user = TextComponent(text = '\n' + '您的成績:\n' + "至少要答對30題才可入榜" + '.' + userName + " 正確率:" + str(userRate/100) + '%', wrap = True)
body_contents.append(text_user)
if sexual == 0 or sexual == 1:
hero_component = ImageComponent(
url = read_src_tori_jpg(),
size = "full")
elif sexual == 2:
hero_component = ImageComponent(
url = read_src_gongyoo_jpg(),
size = "full")
elif sexual == 3:
hero_component = ImageComponent(
url = read_src_squid_jpg(),
size = "full")
elif sexual == 4:
hero_component = ImageComponent(
url = read_src_dragon_jpg(),
size = "full")
text_message = FlexSendMessage(
alt_text='正確率風雲榜',
contents= BubbleContainer(
header = BoxComponent(
layout = "vertical",
contents = [TextComponent(text = "正確率風雲榜\n(至少答對30題)", size = "3xl", align = "center", wrap = True)]
),
hero = hero_component,
body = BoxComponent(
layout = "vertical",
contents = body_contents
),
footer = BoxComponent(
layout = "horizontal",
contents = [ButtonComponent(style = "primary", action = MessageAction(label = "繼續挑戰", text = "答題"))]
)
)
)
line_bot_api.reply_message(
event.reply_token,
text_message)
return 0
elif '掰掰精靈!' == text:
print(event.source)
d = json.loads(str(event.source))
print(d['userId'])
type_ = str(event.source)[2:6]
if type_ == "grou":
group_id = str(event.source)[13:46]
line_bot_api.leave_group(group_id)
elif type_ == "room":
room_id = str(event.source)[12:45]
line_bot_api.leave_room(room_id)
else:
img_src = read_src_mutom_jpg()
line_bot_api.reply_message(
event.reply_token,
ImageSendMessage(original_content_url= img_src,
preview_image_url= img_src))
return 0
if __name__ == "__main__":
port = int(os.environ['PORT'])
app.run(host='0.0.0.0',port=port) |
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget, QLabel, QGraphicsPixmapItem, QGraphicsView, QGraphicsScene
import pyqtgraph as pg
from mGesf import workers
from utils.GUI_main_window import init_container
from utils.img_utils import array_to_colormap_qim
class XeThruX4Tab(QWidget):
def __init__(self, Xe4Thru_worker: workers.Xe4ThruWorker, *args, **kwargs):
super().__init__()
self.Xe4Thru_worker = Xe4Thru_worker
self.Xe4Thru_worker.signal_data.connect(self.control_process_xethru_data)
self.main_page = QtWidgets.QVBoxLayout(self)
self.setLayout(self.main_page)
self.graph_container1 = init_container(parent=self.main_page,
vertical=True,
label="RF Frame and Baseband Frame",
label_bold=True,
label_position="lefttop")
self.graph_container2 = init_container(parent=self.main_page,
vertical=True,
label="Clutter Removed frame",
label_bold=True,
label_position="lefttop")
self.graph_container3 = init_container(parent=self.main_page,
vertical=True,
label="BASEBAND HISTORY",
label_bold=True,
label_position="lefttop")
# plots
self.rf_curve, self.baseband_curve = self.init_xethrux4_runtime_view(parent=self.graph_container1,
label="Clutter frame")
self.clutter_free_rf_curve, self.clutter_free_baseband_curve = self.init_xethrux4_runtime_view(
parent=self.graph_container2, label="Clutter free frame")
self.xethrux4_ir_spectrogram_display = QGraphicsPixmapItem()
self.xethrux4_runtime_view = self.init_spec_view(parent=self.graph_container3, label="XeThruX4",
graph=self.xethrux4_ir_spectrogram_display)
# @QtCore.pyqtSlot(int)
# def on_tab_bar_clicked(self, int):
# print('xethrux4 tab bar clicked')
def init_xethrux4_runtime_view(self, parent, label):
if label:
ql = QLabel()
ql.setAlignment(QtCore.Qt.AlignTop)
ql.setAlignment(QtCore.Qt.AlignCenter)
ql.setText(label)
parent.addWidget(ql)
rf_frame = pg.PlotWidget()
parent.addWidget(rf_frame)
# rf_frame.setXRange(*x_lim)
# rf_frame.setYRange(*y_lim)
pen = pg.mkPen(color=(0, 0, 255), width=1)
rf_curve = rf_frame.plot([], [], pen=pen, name="rf_curve")
pen = pg.mkPen(color=(255, 0, 0), width=2)
baseband = rf_frame.plot([], [], pen=pen, name="baseband_curve")
return rf_curve, baseband
@QtCore.pyqtSlot(dict)
def control_process_xethru_data(self, data_dict):
if data_dict['frame'] is not None:
xsamples = list(range(data_dict['frame'].shape[0]))
rf_frame = data_dict['frame']
baseband_frame = data_dict['baseband_frame']
clutter_removal_rf_frame = data_dict['clutter_removal_frame']
clutter_removal_baseband_frame = data_dict['clutter_removal_baseband_frame']
self.rf_curve.setData(xsamples, rf_frame)
self.baseband_curve.setData(xsamples, baseband_frame)
self.clutter_free_rf_curve.setData(xsamples, clutter_removal_rf_frame)
self.clutter_free_baseband_curve.setData(xsamples, clutter_removal_baseband_frame)
ir_heatmap_qim = array_to_colormap_qim(data_dict['ir_spectrogram'])
ir_qpixmap = QPixmap(ir_heatmap_qim)
ir_qpixmap = ir_qpixmap.scaled(500, 8000, pg.QtCore.Qt.KeepAspectRatio) # resize spectrogram
self.xethrux4_ir_spectrogram_display.setPixmap(ir_qpixmap)
def init_spec_view(self, parent, label, graph=None):
if label:
ql = QLabel()
ql.setAlignment(QtCore.Qt.AlignTop)
ql.setAlignment(QtCore.Qt.AlignCenter)
ql.setText(label)
parent.addWidget(ql)
spc_gv = QGraphicsView()
parent.addWidget(spc_gv)
scene = QGraphicsScene(self)
spc_gv.setScene(scene)
spc_gv.setAlignment(QtCore.Qt.AlignCenter)
if graph:
scene.addItem(graph)
# spc_gv.setFixedSize(config.WINDOW_WIDTH/4, config.WINDOW_HEIGHT/4)
return scene
def set_fire_tab_signal(self, is_fire_signal):
if is_fire_signal:
print('enabled xethrux4 signal')
self.Xe4Thru_worker.signal_data.connect(self.control_process_xethru_data)
else:
try:
print('disable xethrux4 signal')
self.Xe4Thru_worker.signal_data.disconnect(self.control_process_xethru_data)
except TypeError:
pass
|
from abc import ABC, abstractmethod
from collections import defaultdict
from functools import partial
from itertools import chain
from typing import List, Optional, Union, Callable
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils import check_random_state
from ..dataset import BaseDataset, TextDataset
from ..utils import array_to_marginals, calc_cmi_matrix, cluster_based_accuracy_variance
ABSTAIN = -1
class Expression(ABC):
@abstractmethod
def apply(self, x: np.ndarray):
raise NotImplementedError
@abstractmethod
def include(self, other):
raise NotImplementedError
@abstractmethod
def exclude(self, other):
raise NotImplementedError
def overlap(self, other):
if self.exclude(other): return False
if self.include(other): return False
if other.include(self): return False
return True
class UnaryExpression(Expression):
def __init__(self, idx, threshold):
self.idx = idx
self.threshold = threshold
def apply(self, x: np.ndarray):
assert x.ndim == 2, 'dimension of x should be 2!'
return self.apply_(x[:, self.idx])
@abstractmethod
def apply_(self, x: np.ndarray):
raise NotImplementedError
def include(self, other: Expression):
if isinstance(other, UnaryExpression):
if self.idx == other.idx:
return self.include_(other)
return False
if isinstance(other, BinaryExpression):
return self.include(other.e1) and self.include(other.e2)
@abstractmethod
def include_(self, other: Expression):
raise NotImplementedError
def exclude(self, other: Expression):
if isinstance(other, UnaryExpression):
if self.idx == other.idx:
return self.exclude_(other)
return True
if isinstance(other, BinaryExpression):
return self.exclude(other.e1) and self.exclude(other.e2)
@abstractmethod
def exclude_(self, other: Expression):
raise NotImplementedError
def __str__(self):
s = f'=====[{self.__class__}]=====\n'
s += f'[idx] {self.idx}\n'
s += f'[threshold] {self.threshold}\n'
return s
class GreaterExpression(UnaryExpression):
def apply_(self, x: np.ndarray):
return x > self.threshold
def include_(self, other: Expression):
if isinstance(other, GreaterExpression) or isinstance(other, EqualExpression):
return other.threshold > self.threshold
if isinstance(other, InIntervalExpression):
return other.threshold[0] > self.threshold
return False
def exclude_(self, other: Expression):
if isinstance(other, EqualExpression):
return other.threshold < self.threshold
if isinstance(other, InIntervalExpression):
return other.threshold[1] < self.threshold
if isinstance(other, LessExpression):
return other.threshold < self.threshold
return False
class LessExpression(UnaryExpression):
def apply_(self, x: np.ndarray):
return x < self.threshold
def include_(self, other: Expression):
if isinstance(other, LessExpression) or isinstance(other, EqualExpression):
return other.threshold < self.threshold
if isinstance(other, InIntervalExpression):
return other.threshold[1] < self.threshold
return False
def exclude_(self, other: Expression):
if isinstance(other, EqualExpression):
return other.threshold > self.threshold
if isinstance(other, InIntervalExpression):
return other.threshold[0] > self.threshold
if isinstance(other, GreaterExpression):
return other.threshold > self.threshold
return False
class EqualExpression(UnaryExpression):
def apply_(self, x: np.ndarray):
return x == self.threshold
def include_(self, other: Expression):
if isinstance(other, EqualExpression):
return other.threshold == self.threshold
return False
def exclude_(self, other: Expression):
if isinstance(other, EqualExpression):
return other.threshold != self.threshold
else:
return other.exclude(self)
class InIntervalExpression(UnaryExpression):
def apply_(self, x: np.ndarray):
return np.logical_and(self.threshold[0] < x, x < self.threshold[1])
def include_(self, other: Expression):
if isinstance(other, EqualExpression):
return self.threshold[0] < other.threshold < self.threshold[1]
if isinstance(other, InIntervalExpression):
return self.threshold[0] < other.threshold[0] and other.threshold[1] < self.threshold[1]
return False
def exclude_(self, other: Expression):
if isinstance(other, EqualExpression):
return other.threshold < self.threshold[0] or other.threshold > self.threshold[1]
if isinstance(other, InIntervalExpression):
return other.threshold[0] > self.threshold[1] or other.threshold[1] < self.threshold[0]
return other.exclude(self)
class OutIntervalExpression(UnaryExpression):
def apply_(self, x: np.ndarray):
return np.logical_or(self.threshold[0] > x, x > self.threshold[1])
def include_(self, other: Expression):
if isinstance(other, EqualExpression):
return self.threshold[0] > other.threshold or other.threshold > self.threshold[1]
if isinstance(other, GreaterExpression):
return self.threshold[1] < other.threshold
if isinstance(other, LessExpression):
return self.threshold[0] > other.threshold
if isinstance(other, InIntervalExpression):
return self.threshold[0] > other.threshold[1] or other.threshold[0] > self.threshold[1]
if isinstance(other, OutIntervalExpression):
return self.threshold[0] > other.threshold[0] and other.threshold[1] > self.threshold[1]
return False
def exclude_(self, other: Expression):
if isinstance(other, EqualExpression):
return self.threshold[0] < other.threshold < self.threshold[1]
if isinstance(other, InIntervalExpression):
return other.threshold[0] > self.threshold[0] and other.threshold[1] < self.threshold[1]
return False
class BinaryExpression(Expression):
logic_op: Callable
def __init__(self, e1: Expression, e2: Expression):
self.e1 = e1
self.e2 = e2
def apply(self, x: np.ndarray):
x1 = self.e1.apply(x)
x2 = self.e2.apply(x)
return self.logic_op(x1, x2)
def include(self, other: Expression):
if isinstance(other, UnaryExpression):
return self.e1.include(other) or self.e2.include(other)
if isinstance(other, BinaryExpression):
e1_included = self.e1.include(other.e1) or self.e2.include(other.e1)
e2_included = self.e1.include(other.e2) or self.e2.include(other.e2)
return e1_included and e2_included
def exclude(self, other: Expression):
if isinstance(other, UnaryExpression):
return self.e1.exclude(other) and self.e2.exclude(other)
if isinstance(other, BinaryExpression):
e1_excluded = self.e1.exclude(other.e1) and self.e2.exclude(other.e1)
e2_excluded = self.e1.exclude(other.e2) and self.e2.exclude(other.e2)
return e1_excluded and e2_excluded
class AndExpression(BinaryExpression):
logic_op = staticmethod(np.logical_and)
class OrExpression(BinaryExpression):
logic_op = staticmethod(np.logical_or)
class NGramExpression(Expression):
def __init__(self, idx, threshold, ngram):
self.idx = idx
self.threshold = threshold
self.ngram = ngram
def apply(self, x: np.ndarray):
assert x.ndim == 2, 'dimension of x should be 2!'
applied = x[:, self.idx] > self.threshold
if isinstance(applied, csr_matrix):
applied = applied.toarray().squeeze()
return applied
def include(self, other):
raise NotImplementedError
def exclude(self, other):
raise NotImplementedError
def __str__(self):
s = f'=====[{self.__class__}]=====\n'
s += f'[idx] {self.idx}\n'
s += f'[threshold] {self.threshold}\n'
s += f'[ngram] {self.ngram}\n'
return s
class LF:
def __init__(self, e: Expression, label: int, acc: float = -1.0, propensity: float = -1.0):
self.e = e
self.label = label
self.acc = acc
self.propensity = propensity
def apply(self, x: np.ndarray):
x = self.e.apply(x)
return x * self.label + (1 - x) * ABSTAIN
class AbstractLFApplier:
def __init__(self, lf_list: List[LF]):
self.lfs = lf_list
self.labels = [r.label for r in lf_list]
self.accs = [r.acc for r in lf_list]
@abstractmethod
def apply(self, dataset):
raise NotImplementedError
def __len__(self):
return len(self.lfs)
class FeatureLFApplier(AbstractLFApplier):
def __init__(self, lf_list: List[LF], preprocessor: Optional[Callable] = None):
super().__init__(lf_list)
self.preprocessor = preprocessor
def apply(self, dataset: Union[BaseDataset, np.ndarray]):
if self.preprocessor is not None:
X = self.preprocessor(dataset)
else:
if isinstance(dataset, BaseDataset):
X = np.array(dataset.features)
else:
X = dataset
L = np.stack([lf.apply(X) for lf in self.lfs]).T
return L
class NGramLFApplier(AbstractLFApplier):
def __init__(self, lf_list: List[LF], vectorizer: CountVectorizer):
super().__init__(lf_list)
self.vectorizer = vectorizer
def apply(self, dataset: Union[TextDataset, csr_matrix]):
if isinstance(dataset, TextDataset):
corpus = [i['text'] for i in dataset.examples]
X = self.vectorizer.transform(corpus)
else:
X = dataset
L = np.stack([lf.apply(X) for lf in self.lfs]).T
return L
class NoEnoughLFError(Exception):
def __init__(self, label=None):
if label is None:
self.message = 'cannot find enough lfs, please lower the min support or the min acc gain!'
else:
self.message = f'cannot find any lf for label {label}, please lower the min support or the min acc gain!'
super().__init__(self.message)
class AbstractLFGenerator(ABC):
lf_applier_type: Callable
X: Union[np.ndarray, csr_matrix]
label_to_candidate_lfs: dict
def __init__(self,
dataset: Union[BaseDataset, np.ndarray],
y: Optional[np.ndarray] = None,
min_acc_gain: float = 0.1,
min_support: float = 0.01,
random_state=None
):
if isinstance(dataset, BaseDataset):
self.Y = np.array(dataset.labels)
else:
assert y is not None
self.Y = y
self.n_class = len(set(self.Y))
assert self.n_class > 1
self.dataset = dataset
self.n_data = len(dataset)
self.min_support = int(min_support * self.n_data)
self.min_acc_gain = min_acc_gain
self.class_marginal = array_to_marginals(self.Y)
self.generator = check_random_state(random_state)
@staticmethod
def calc_acc(y):
return np.sum(y) / len(y)
def check_candidate_lfs_enough_(self, n_lfs: Union[int, List[int]]):
if isinstance(n_lfs, int):
assert sum(map(len, self.label_to_candidate_lfs.values())) > n_lfs, NoEnoughLFError()
else:
assert len(n_lfs) == self.n_class
labels = list(range(self.n_class))
for label, n_lfs_i in zip(labels, n_lfs):
assert len(self.label_to_candidate_lfs[label]) > n_lfs_i, NoEnoughLFError(label)
def return_candidate_lfs(self):
return list(chain.from_iterable(self.label_to_candidate_lfs.values()))
def generate(self, mode: str, **kwargs):
if mode == 'exhaustive':
return self.exhaustive_generate()
if mode == 'random':
return self.random_generate(**kwargs)
if mode == 'accurate':
return self.accurate_generate(**kwargs)
if mode == 'correlated':
return self.correlated_generate(**kwargs)
if mode == 'cluster_dependent':
return self.cluster_dependent_generate(**kwargs)
raise NotImplementedError(f'generate mode {mode} is not implemented!')
def exhaustive_generate(self) -> AbstractLFApplier:
return self.lf_applier_type(self.return_candidate_lfs())
def random_generate(self, n_lfs: Union[int, List[int]] = 10, duplicated_lf=False) -> AbstractLFApplier:
if not duplicated_lf:
self.check_candidate_lfs_enough_(n_lfs)
if isinstance(n_lfs, int):
candidate_lfs = self.return_candidate_lfs()
lfs = list(self.generator.choice(candidate_lfs, n_lfs, replace=duplicated_lf))
else:
labels = list(range(self.n_class))
lfs = []
for label, n_lfs_i in zip(labels, n_lfs):
candidate_lfs = self.label_to_candidate_lfs[label]
lfs_i = list(self.generator.choice(candidate_lfs, n_lfs_i, replace=duplicated_lf))
lfs += lfs_i
return self.lf_applier_type(lfs)
def accurate_generate(self, n_lfs: Union[int, List[int]] = 10) -> AbstractLFApplier:
self.check_candidate_lfs_enough_(n_lfs)
if isinstance(n_lfs, int):
candidate_lfs = self.return_candidate_lfs()
lfs = sorted(candidate_lfs, key=lambda x: -x.acc)[:n_lfs]
else:
labels = list(range(self.n_class))
lfs = []
for label, n_lfs_i in zip(labels, n_lfs):
candidate_lfs = self.label_to_candidate_lfs[label]
lfs += sorted(candidate_lfs, key=lambda x: -x.acc)[:n_lfs_i]
return self.lf_applier_type(lfs)
def correlated_generate(self,
n_lfs: Union[int, List[int]] = 20,
# n_correlated_lfs: Union[int, List[int]] = 10,
) -> AbstractLFApplier:
# assert type(n_lfs) == type(n_correlated_lfs)
self.check_candidate_lfs_enough_(n_lfs)
if isinstance(n_lfs, int):
candidate_lfs = self.return_candidate_lfs()
L = np.stack([lf.apply(self.X) for lf in candidate_lfs]).T
cmi_matrix = calc_cmi_matrix(self.Y, L)
row_max, col_max = np.unravel_index(cmi_matrix.argmax(), cmi_matrix.shape)
lfs_idx = [row_max, col_max]
while len(lfs_idx) < n_lfs:
sub_cmi_matrix = cmi_matrix[lfs_idx, :]
next_to_add = sub_cmi_matrix.mean(0).argmax()
lfs_idx.append(next_to_add)
lfs = [candidate_lfs[i] for i in lfs_idx]
else:
labels = list(range(self.n_class))
lfs = []
for label, n_lfs_i in zip(labels, n_lfs):
candidate_lfs = self.label_to_candidate_lfs[label]
L = np.stack([lf.apply(self.X) for lf in candidate_lfs]).T
Y = np.array(self.Y == label, dtype=int)
cmi_matrix = calc_cmi_matrix(Y, L)
row_max, col_max = np.unravel_index(cmi_matrix.argmax(), cmi_matrix.shape)
lfs_idx = [row_max, col_max]
while len(lfs_idx) < n_lfs_i:
sub_cmi_matrix = cmi_matrix[lfs_idx, :]
next_to_add = sub_cmi_matrix.mean(0).argmax()
lfs_idx.append(next_to_add)
lfs += [candidate_lfs[i] for i in lfs_idx]
return self.lf_applier_type(lfs)
def cluster_dependent_generate(self, n_lfs: Union[int, List[int]] = 10, n_clusters=5) -> AbstractLFApplier:
self.check_candidate_lfs_enough_(n_lfs)
kmeans = KMeans(n_clusters=n_clusters, random_state=self.generator).fit(self.X)
cluster_labels = kmeans.labels_
if isinstance(n_lfs, int):
candidate_lfs = self.return_candidate_lfs()
L = np.stack([lf.apply(self.X) for lf in candidate_lfs]).T
acc_var = np.array(
[cluster_based_accuracy_variance(self.Y, L[:, i], cluster_labels) for i in range(L.shape[1])])
argsort_idx = np.argsort(-acc_var)
lfs = [candidate_lfs[i] for i in argsort_idx[:n_lfs]]
else:
labels = list(range(self.n_class))
lfs = []
for label, n_lfs_i in zip(labels, n_lfs):
candidate_lfs = self.label_to_candidate_lfs[label]
L = np.stack([lf.apply(self.X) for lf in candidate_lfs]).T
acc_var = np.array(
[cluster_based_accuracy_variance(self.Y, L[:, i], cluster_labels) for i in range(L.shape[1])])
argsort_idx = np.argsort(-acc_var)
lfs += [candidate_lfs[i] for i in argsort_idx[:n_lfs_i]]
return self.lf_applier_type(lfs)
class FeatureLFGenerator(AbstractLFGenerator):
def __init__(self,
dataset: Union[BaseDataset, np.ndarray],
y: Optional[np.ndarray] = None,
min_acc_gain: float = 0.1,
min_support: float = 0.01,
random_state=None
):
super(FeatureLFGenerator, self).__init__(dataset, y, min_acc_gain, min_support, random_state)
if isinstance(dataset, BaseDataset):
self.X = np.array(dataset.features)
else:
assert y is not None
self.X = dataset
self.n_feature = self.X.shape[1]
self.bin_list = self.get_bin_egdes(self.X, self.min_support)
self.label_to_candidate_lfs, self.idx_to_lfs, self.label_to_idx_to_lfs = self.generate_label_to_lfs()
self.lf_applier_type = FeatureLFApplier
@staticmethod
def get_bin_egdes(X: np.ndarray, bin_size: int):
n_data, n_features = X.shape
bin_list = []
for i in range(n_features):
x = X[:, i]
argsort_idx = np.argsort(x)
min_x, max_x = np.min(x), np.max(x)
bin_list_i = [min_x]
interval = bin_size
while interval < n_data:
thres = x[argsort_idx[interval]]
if thres == max_x: break
while interval < n_data:
if x[argsort_idx[interval - 1]] == thres:
interval += 1
thres = x[argsort_idx[interval]]
else:
break
if thres == max_x:
bin_list_i.append(max_x)
break
else:
bin_list_i.append((thres + x[argsort_idx[interval + 1]]) / 2)
interval += bin_size
last_thres = bin_list_i[-1]
if last_thres != max_x:
if last_thres > max_x:
bin_list_i[-1] = max_x
else:
left = np.sum(np.logical_and(last_thres < x, x < max_x))
if left > (bin_size / 2):
bin_list_i.append(max_x)
else:
bin_list_i[-1] = max_x
bin_list.append(bin_list_i)
return bin_list
def generate_label_to_lfs(self):
label_to_lfs = {}
label_to_idx_to_lfs = {}
idx_to_lfs = defaultdict(list)
for label in range(self.n_class):
y = np.array(self.Y == label, dtype=np.int)
min_acc = self.class_marginal[label] + self.min_acc_gain
idx_to_lfs_i = {}
for idx in range(self.n_feature):
bin_list_i = self.bin_list[idx]
x = self.X[:, idx]
idx_lfs = self.generate_half_bounded_lf(x, y, idx, label, bin_list_i, min_acc) \
+ self.generate_interval_lf(x, y, idx, label, bin_list_i, min_acc)
if len(idx_lfs) > 1:
idx_to_lfs_i[idx] = idx_lfs
idx_to_lfs[idx] += idx_lfs
lfs_for_label = list(chain.from_iterable(idx_to_lfs_i.values()))
assert len(lfs_for_label) > 1, f'cannot find any lf for label {label}, please lower the min support or the min acc gain!'
label_to_idx_to_lfs[label] = idx_to_lfs_i
label_to_lfs[label] = list(chain.from_iterable(idx_to_lfs_i.values()))
return label_to_lfs, idx_to_lfs, label_to_idx_to_lfs
def generate_half_bounded_lf(self, x, y, idx, label, bin_list, min_acc):
lfs = []
n = len(x)
for thres in bin_list[1:-1]:
greater_then_idx = x > thres
greater_acc = self.calc_acc(y[greater_then_idx])
if greater_acc > min_acc and np.sum(greater_then_idx) > self.min_support:
propensity = np.sum(greater_then_idx) / n
e = GreaterExpression(idx=idx, threshold=thres)
lf = LF(e=e, label=label, acc=greater_acc, propensity=propensity)
lfs.append(lf)
else:
less_then_idx = x < thres
less_acc = self.calc_acc(y[less_then_idx])
if less_acc > min_acc and np.sum(less_then_idx) > self.min_support:
propensity = np.sum(less_then_idx) / n
e = LessExpression(idx=idx, threshold=thres)
lf = LF(e=e, label=label, acc=less_acc, propensity=propensity)
lfs.append(lf)
return lfs
def generate_interval_lf(self, x, y, idx, label, bin_list, min_acc):
lfs = []
n = len(x)
for i in range(1, len(bin_list) - 1):
thres = (bin_list[i], bin_list[i + 1])
in_interval_idx = np.logical_and(thres[0] < x, x < thres[1])
in_interval_acc = self.calc_acc(y[in_interval_idx])
if in_interval_acc > min_acc and np.sum(in_interval_idx) > self.min_support:
propensity = np.sum(in_interval_idx) / n
e = InIntervalExpression(idx=idx, threshold=thres)
lf = LF(e=e, label=label, acc=in_interval_acc, propensity=propensity)
lfs.append(lf)
else:
out_interval_idx = np.logical_or(thres[0] > x, x > thres[1])
out_interval_acc = self.calc_acc(y[out_interval_idx])
if out_interval_acc > min_acc and np.sum(out_interval_acc) > self.min_support:
propensity = np.sum(out_interval_idx) / n
e = OutIntervalExpression(idx=idx, threshold=thres)
lf = LF(e=e, label=label, acc=out_interval_acc, propensity=propensity)
lfs.append(lf)
return lfs
def one_feature_one_lf_generate(self, n_lfs: Union[int, List[int]] = 10) -> FeatureLFApplier:
if isinstance(n_lfs, int):
try:
sampled_idx = self.generator.choice(list(self.idx_to_lfs.keys()), size=n_lfs)
lfs = [self.generator.choice(self.idx_to_lfs[idx]) for idx in sampled_idx]
except ValueError as e:
raise NoEnoughLFError()
else:
assert len(n_lfs) == self.n_class
labels = list(range(self.n_class))
lfs = []
for label, n_lfs_i in zip(labels, n_lfs):
idx_to_lf = self.label_to_idx_to_lfs[label]
try:
sampled_idx = self.generator.choice(list(idx_to_lf.keys()), size=n_lfs_i)
lfs_i = [self.generator.choice(idx_to_lf[idx]) for idx in sampled_idx]
except ValueError as e:
raise NoEnoughLFError(label)
lfs += lfs_i
return FeatureLFApplier(lfs)
class NGramLFGenerator(AbstractLFGenerator):
def __init__(self,
dataset: TextDataset,
y: Optional[np.ndarray] = None,
vectorizer: CountVectorizer = None,
ngram_range=(1, 1),
min_acc_gain: float = 0.1,
min_support: float = 0.01,
random_state=None
):
super(NGramLFGenerator, self).__init__(dataset, y, min_acc_gain, min_support, random_state)
if vectorizer is None:
vectorizer = CountVectorizer(strip_accents='ascii',
# stop_words='english',
ngram_range=ngram_range,
analyzer='word',
max_df=0.90,
min_df=self.min_support / self.n_data,
max_features=None,
vocabulary=None,
binary=False)
corpus = [i['text'] for i in self.dataset.examples]
self.X = vectorizer.fit_transform(corpus)
self.vectorizer = vectorizer
self.idx_to_ngram = vectorizer.get_feature_names()
self.n_feature = self.X.shape[1]
self.label_to_candidate_lfs = self.generate_label_to_lfs()
self.lf_applier_type = partial(NGramLFApplier, vectorizer=vectorizer)
def generate_label_to_lfs(self):
label_to_lfs = {}
for label in range(self.n_class):
y = np.array(self.Y == label, dtype=np.int)
min_acc = self.class_marginal[label] + self.min_acc_gain
lfs = []
for idx in range(self.n_feature):
x = self.X[:, idx].toarray().squeeze()
exist_idx = x > 0
exist_acc = self.calc_acc(y[exist_idx])
if exist_acc > min_acc and np.sum(exist_idx) > self.min_support:
ngram = self.idx_to_ngram[idx]
propensity = np.sum(exist_idx) / self.n_data
e = NGramExpression(idx=idx, threshold=0, ngram=ngram)
lf = LF(e=e, label=label, acc=exist_acc, propensity=propensity)
lfs.append(lf)
assert len(lfs) > 1, f'cannot find any lf for label {label}, please lower the min support or the min acc gain!'
label_to_lfs[label] = lfs
return label_to_lfs
|
<reponame>Jay-9912/ACSConv<filename>experiments/mylib/utils.py<gh_stars>0
import os
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch
import torch.nn as nn
import pandas as pd
import os
import time
import random
import torch.nn.functional as F
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import tarfile
import zipfile
plt.switch_backend('agg')
USE_GPU = True
# USE_GPU = False
import collections.abc
container_abcs = collections.abc
from itertools import repeat
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
if USE_GPU and torch.cuda.is_available():
def to_var(x, requires_grad=False, gpu=None):
x = x.cuda(gpu)
return x.requires_grad_(requires_grad)
else:
def to_var(x, requires_grad=False, gpu=None):
return x.requires_grad_(requires_grad)
if USE_GPU and torch.cuda.is_available():
def to_device(x, gpu=None):
x = x.cuda(gpu)
return x
else:
def to_device(x, gpu=None):
return x
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MultiAverageMeter(object):
def __init__(self):
self.meters = []
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self,val,n=1):
if len(self.meters) < len(val):
for i in range(len(val)-len(self.meters)):
self.meters.append(AverageMeter())
for i, meter in enumerate(self.meters):
meter.update(val[i],n)
self.val = [meter.val for meter in self.meters]
self.avg = [meter.avg for meter in self.meters]
self.sum = [meter.sum for meter in self.meters]
self.count = [meter.count for meter in self.meters]
def log_results(save, epoch, log_dict, writer):
with open(os.path.join(save, 'logs.csv'), 'a') as f:
f.write('%03d,'%((epoch + 1),))
for value in log_dict.values():
f.write('%0.6f,' % (value,))
f.write('\n')
for key, value in log_dict.items():
writer.add_scalar(key, value, epoch)
def one_hot_to_categorical(x, dim):
return x.argmax(dim=dim)
def categorical_to_one_hot(x, dim=1, expand_dim=False, n_classes=None):
'''Sequence and label.
when dim = -1:
b x 1 => b x n_classes
when dim = 1:
b x 1 x h x w => b x n_classes x h x w'''
# assert (x - x.long().to(x.dtype)).max().item() < 1e-6
if type(x)==np.ndarray:
x = torch.Tensor(x)
assert torch.allclose(x, x.long().to(x.dtype))
x = x.long()
if n_classes is None:
n_classes = int(torch.max(x)) + 1
if expand_dim:
x = x.unsqueeze(dim)
else:
assert x.shape[dim] == 1
shape = list(x.shape)
shape[dim] = n_classes
x_one_hot = torch.zeros(shape).to(x.device).scatter_(dim=dim, index=x, value=1.)
return x_one_hot.long()
def plot_multi_voxels(*multi_voxels):
multi_voxels = [np.array(voxels.cpu()) if isinstance(voxels, torch.Tensor) else np.array(voxels) for voxels in multi_voxels]
multi_voxels = [np.expand_dims(voxels, 0) if voxels.ndim==3 else voxels for voxels in multi_voxels]
rows = len(multi_voxels[0])
columns = len(multi_voxels)
fig = plt.figure(figsize=[10*columns,8*rows])
for row in range(rows):
for column in range(columns):
if row<len(multi_voxels[column]):
ax = fig.add_subplot(rows,columns,row*columns+column+1, projection='3d')
ax.voxels(multi_voxels[column][row], edgecolor='k')
def plot_multi_shapes(*multi_shapes):
multi_shapes = [np.array(shapes.cpu()) if isinstance(shapes, torch.Tensor) else np.array(shapes) for shapes in multi_shapes]
multi_shapes = [np.expand_dims(shapes, 0) if shapes.ndim==2 else shapes for shapes in multi_shapes]
rows = len(multi_shapes[0])
columns = len(multi_shapes)
fig = plt.figure(figsize=[10*columns,8*rows])
for row in range(rows):
for column in range(columns):
if row<len(multi_shapes[column]):
ax = fig.add_subplot(rows,columns,row*columns+column+1)
ax.imshow(multi_shapes[column][row])
def save_model(model, save, valid_error, best_error, save_all):
if save_all or valid_error < best_error:
torch.save(model.state_dict(), os.path.join(save, 'model.dat'))
if valid_error < best_error:
best_error = valid_error
print('New best error: %.4f' % best_error)
return best_error
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def initialize(modules):
for m in modules:
if isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose3d):
nn.init.kaiming_uniform_(m.weight, mode='fan_in')
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_uniform_(m.weight, mode='fan_in')
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode='fan_in')
m.bias.data.zero_()
import sys, time
class Logger(object):
def __init__(self, filename='terminal log.txt', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a')
self.log.write(''.join([time.strftime("%y-%m-%d %H:%M:%S", time.localtime(time.time())), '\n\n']))
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def __del__(self):
self.log.write(''.join(['\n', time.strftime("%y-%m-%d %H:%M:%S", time.localtime(time.time()))]))
self.log.close()
def redirect_stdout(save_path):
sys.stdout = Logger(os.path.join(save_path, 'stdout.txt'), sys.stdout)
sys.stderr = Logger(os.path.join(save_path, 'stderr.txt'), sys.stderr) # redirect std err, if necessary
def copy_file_backup(save):
import shutil, sys, getpass, socket
backup_dir = os.path.join(save, 'backup_code')
os.makedirs(backup_dir)
with open(os.path.join(backup_dir, 'CLI argument.txt'), 'w') as f:
res = ''.join(['hostName: ', socket.gethostname(), '\n',
'account: ', getpass.getuser(), '\n',
'save_path: ', os.path.realpath(save), '\n',
'CUDA_VISIBLE_DEVICES: ', str(os.environ.get('CUDA_VISIBLE_DEVICES')), '\n'])
f.write(res)
for i, _ in enumerate(sys.argv):
f.write(sys.argv[i] + '\n')
script_file = sys.argv[0]
shutil.copy(script_file, backup_dir)
shutil.copytree(os.path.join(sys.path[0], '../', 'mylib'), os.path.join(backup_dir, 'mylib'))
shutil.copytree(os.path.join(sys.path[0], '../../', 'acsconv'), os.path.join(backup_dir, 'acsconv'))
os.makedirs(os.path.join(backup_dir, 'current_experiment'))
for file_path in os.listdir(sys.path[0]):
if file_path not in ['logs','tmp', 'data', '__pycache__']: # modified
shutil.copy(os.path.join(sys.path[0], file_path), os.path.join(backup_dir, 'current_experiment'))
from .sync_batchnorm import SynchronizedBatchNorm3d, SynchronizedBatchNorm2d
def model_to_syncbn(model):
preserve_state_dict = model.state_dict()
_convert_module_from_bn_to_syncbn(model)
model.load_state_dict(preserve_state_dict)
return model
def _convert_module_from_bn_to_syncbn(module):
for child_name, child in module.named_children():
if hasattr(nn, child.__class__.__name__) and \
'batchnorm' in child.__class__.__name__.lower():
TargetClass = globals()['Synchronized'+child.__class__.__name__]
arguments = TargetClass.__init__.__code__.co_varnames[1:]
kwargs = {k: getattr(child, k) for k in arguments}
setattr(module, child_name, TargetClass(**kwargs))
else:
_convert_module_from_bn_to_syncbn(child) |
<reponame>datalogics-kam/conan
import unittest
import os
from conans.test.utils.test_files import temp_folder
from conans.client.remote_registry import RemoteRegistry, migrate_registry_file, dump_registry, \
default_remotes
from conans.model.ref import ConanFileReference, PackageReference
from conans.errors import ConanException
from conans.test.utils.tools import TestBufferConanOutput, TestClient
from conans.util.files import save
class RegistryTest(unittest.TestCase):
def retro_compatibility_test(self):
f = os.path.join(temp_folder(), "aux_file")
save(f, """conan.io https://server.conan.io
""") # Without SSL parameter
new_path = os.path.join(temp_folder(), "aux_file.json")
migrate_registry_file(f, new_path)
registry = RemoteRegistry(new_path, TestBufferConanOutput())
self.assertEqual(registry.remotes.list, [("conan.io", "https://server.conan.io", True)])
def to_json_migration_test(self):
tmp = temp_folder()
conf_dir = os.path.join(tmp, ".conan")
f = os.path.join(conf_dir, "registry.txt")
save(f, """conan.io https://server.conan.io True
lib/1.0@conan/stable conan.io
other/1.0@lasote/testing conan.io
""")
client = TestClient(base_folder=tmp, servers=False)
new_path = client.client_cache.registry
registry = RemoteRegistry(new_path, TestBufferConanOutput())
self.assertEqual(registry.remotes.list, [("conan.io", "https://server.conan.io", True)])
self.assertEqual(registry.refs.list, {'lib/1.0@conan/stable': 'conan.io',
'other/1.0@lasote/testing': 'conan.io'})
def add_remove_update_test(self):
f = os.path.join(temp_folder(), "aux_file")
save(f, dump_registry(default_remotes, {}, {}))
registry = RemoteRegistry(f, TestBufferConanOutput())
# Add
registry.remotes.add("local", "http://localhost:9300")
self.assertEqual(registry.remotes.list, [("conan-center", "https://conan.bintray.com", True),
("local", "http://localhost:9300", True)])
# Add
registry.remotes.add("new", "new_url", False)
self.assertEqual(registry.remotes.list, [("conan-center", "https://conan.bintray.com", True),
("local", "http://localhost:9300", True),
("new", "new_url", False)])
with self.assertRaises(ConanException):
registry.remotes.add("new", "new_url")
# Update
registry.remotes.update("new", "other_url")
self.assertEqual(registry.remotes.list, [("conan-center", "https://conan.bintray.com", True),
("local", "http://localhost:9300", True),
("new", "other_url", True)])
with self.assertRaises(ConanException):
registry.remotes.update("new2", "new_url")
registry.remotes.update("new", "other_url", False)
self.assertEqual(registry.remotes.list, [("conan-center", "https://conan.bintray.com", True),
("local", "http://localhost:9300", True),
("new", "other_url", False)])
# Remove
registry.remotes.remove("local")
self.assertEqual(registry.remotes.list, [("conan-center", "https://conan.bintray.com", True),
("new", "other_url", False)])
with self.assertRaises(ConanException):
registry.remotes.remove("new2")
def refs_test(self):
f = os.path.join(temp_folder(), "aux_file")
save(f, dump_registry(default_remotes, {}, {}))
registry = RemoteRegistry(f, TestBufferConanOutput())
ref = ConanFileReference.loads("MyLib/0.1@lasote/stable")
remotes = registry.remotes.list
registry.refs.set(ref, remotes[0].name)
remote = registry.refs.get(ref)
self.assertEqual(remote, remotes[0])
registry.refs.set(ref, remotes[0].name)
remote = registry.refs.get(ref)
self.assertEqual(remote, remotes[0])
def insert_test(self):
f = os.path.join(temp_folder(), "aux_file")
save(f, """
{
"remotes": [
{
"url": "https://server.conan.io",
"verify_ssl": true,
"name": "conan.io"
}
],
"references": {}
}
""")
registry = RemoteRegistry(f, TestBufferConanOutput())
registry.remotes.add("repo1", "url1", True, insert=0)
self.assertEqual(registry.remotes.list, [("repo1", "url1", True),
("conan.io", "https://server.conan.io", True)])
registry.remotes.add("repo2", "url2", True, insert=1)
self.assertEqual(registry.remotes.list, [("repo1", "url1", True),
("repo2", "url2", True),
("conan.io", "https://server.conan.io", True)])
registry.remotes.add("repo3", "url3", True, insert=5)
self.assertEqual(registry.remotes.list, [("repo1", "url1", True),
("repo2", "url2", True),
("conan.io", "https://server.conan.io", True),
("repo3", "url3", True)])
def remove_all_package_test(self):
f = os.path.join(temp_folder(), "aux_file")
save(f, dump_registry(default_remotes, {}, {}))
registry = RemoteRegistry(f, TestBufferConanOutput())
registry.remotes.add("r1", "url1", True, insert=0)
registry.remotes.add("r2", "url2", True, insert=0)
ref = ConanFileReference.loads("MyLib/0.1@lasote/stable")
ref2 = ConanFileReference.loads("MyLib2/0.1@lasote/stable")
registry.prefs.set(PackageReference(ref, "1"), "r1")
registry.prefs.set(PackageReference(ref, "2"), "r1")
registry.prefs.set(PackageReference(ref, "3"), "r1")
registry.prefs.set(PackageReference(ref, "4"), "r2")
registry.prefs.set(PackageReference(ref2, "1"), "r1")
registry.prefs.remove_all(ref)
self.assertIsNone(registry.prefs.get(PackageReference(ref, "1")))
self.assertIsNone(registry.prefs.get(PackageReference(ref, "2")))
self.assertIsNone(registry.prefs.get(PackageReference(ref, "3")))
self.assertIsNone(registry.prefs.get(PackageReference(ref, "4")))
self.assertEquals(registry.prefs.get(PackageReference(ref2, "1")).name, "r1")
registry.prefs.set(PackageReference(ref, "1"), "r1")
registry.prefs.set(PackageReference(ref, "2"), "r1")
registry.prefs.set(PackageReference(ref, "3"), "r1")
registry.prefs.set(PackageReference(ref, "4"), "r2")
registry.prefs.set(PackageReference(ref2, "1"), "r1")
registry.prefs.remove_all(ref, "r1")
self.assertIsNone(registry.prefs.get(PackageReference(ref, "1")))
self.assertIsNone(registry.prefs.get(PackageReference(ref, "2")))
self.assertIsNone(registry.prefs.get(PackageReference(ref, "3")))
self.assertEquals(registry.prefs.get(PackageReference(ref, "4")).name, "r2")
self.assertEquals(registry.prefs.get(PackageReference(ref2, "1")).name, "r1")
|
<gh_stars>0
from sublime_db.core.typecheck import (
Any,
Callable,
Optional
)
import sublime
import sublime_plugin
from . import view_drag_select
command_id = 0
command_data = {}
sublime_command_visible = False
is_running_input = False
class SublimeDebugInputCommand(sublime_plugin.WindowCommand):
def run(self, command_id, **args):
global is_running_input
is_running_input = False
command_data[command_id][1](**args)
def input(self, args):
return command_data[args["command_id"]][0]
def is_visible(self):
return sublime_command_visible
def on_view_drag_select(event):
if is_running_input:
window = sublime.active_window()
window.run_command("hide_overlay", {
"overlay": "command_palette",
})
view_drag_select.add(on_view_drag_select)
def run_input_command(input, run, on_cancel = None):
global command_id
command_id += 1
current_command = command_id
command_data[current_command] = [input, run]
window = sublime.active_window()
def on_cancel_internal():
def cb():
# since we are async here we don't want to hide the panel if a new one was presented
if current_command == command_id:
window.run_command("hide_overlay", {
"overlay": "command_palette",
})
#when we do this while a command is closing it crashes sublime
sublime.set_timeout(cb, 0)
global is_running_input
is_running_input = False
input._on_cancel_internal = on_cancel_internal
if on_cancel:
input._on_cancel = on_cancel
def cb():
global sublime_command_visible
sublime_command_visible = True
window.run_command("hide_overlay", {
"overlay": "command_palette",
}
)
global is_running_input
is_running_input = True
window.run_command("show_overlay", {
"overlay": "command_palette",
"command": "sublime_debug_input",
"args": {
"command_id" : command_id
}
}
)
print('run command')
sublime_command_visible = False
sublime.set_timeout(cb, 0)
class TextInput(sublime_plugin.TextInputHandler):
def __init__(self, placeholder=None, initial=None, on_cancel=None, arg_name="text"):
super().__init__()
self._placeholder = placeholder
self._initial = initial
self.arg_name = arg_name
self._on_cancel = on_cancel
self._on_cancel_internal = None
def placeholder(self):
return self._placeholder
def initial_text(self):
return self._initial
def next_input(self, args):
return None
def name(self):
return self.arg_name
def cancel(self):
print('canceld')
if self._on_cancel_internal:
self._on_cancel_internal()
if self._on_cancel:
self._on_cancel()
class ListInputItem:
def __init__(self, text, name = None, next_input = None):
self.text = text
self.name = name
self.next_input = next_input
class ListInput(sublime_plugin.ListInputHandler):
def __init__(self, values, placeholder=None, index=0, on_cancel=None, arg_name="list"):
super().__init__()
self._next_input = None
self.values = values
self._placeholder = placeholder
self.index = index
self._on_cancel = on_cancel
self.arg_name = arg_name
self._on_cancel_internal = None
def name(self):
return self.arg_name
def placeholder(self):
return self._placeholder
def list_items(self):
items = []
for index, value in enumerate(self.values):
items.append([value.text, index])
return (items, self.index)
def confirm(self, value):
self._next_input = self.values[value].next_input
return value
def validate(self, value):
return True
def next_input(self, args):
return self._next_input
def cancel(self):
if self._on_cancel_internal:
self._on_cancel_internal()
if self._on_cancel:
self._on_cancel()
def description(self, value, text):
return self.values[value].name or self.values[value].text
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urlparse, urllib2, urllib
import string
from BeautifulSoup import NavigableString, BeautifulSoup as bs
import re
import socket
from werkzeug import url_fix
import json
from random import choice
socket.setdefaulttimeout(5)
class album_metadata:
content = bs()
allmusicMetadata = {}
rymMetadata = {}
discogsMetadata = {}
itunesMetadata = {}
pitchforkMetadata = {}
sputnikmusicMetadata = {}
rsMetadata = {}
metacriticMetadata = {}
songList = []
genre = []
styles = ""
pageUrl = ""
albumart = ""
searchUrl = ""
#albumartFile = ""
def search(self, searchString, contentSite):
''' Google I'm Feeling Lucky Search for searchString in contentSite. '''
if contentSite.lower() == "rollingstone":
searchString = searchString + " album review"
## Url spoofing to get past Google's bot-blocking mechanism.
searchString = searchString.replace("(", " ").replace(")", " ").replace("-", " ").replace("[", "").replace("]", "")
user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:14.0) Gecko/20100101 Firefox/14.0.1'
headers = {'User-Agent':user_agent,}
url = self.pick_url(searchString, contentSite, True)
response = self.open_url(url, headers)
try:
isValidUrl = response.geturl()
except:
return ""
for results in range(0, 10):
if (isValidUrl.find(contentSite.lower()) != -1) and (isValidUrl.find("/release") != -1 or isValidUrl.find("/album") != -1 or isValidUrl.find("/master") != -1 or isValidUrl.find("/review") != -1 or isValidUrl.find("/albumreviews") != -1 or isValidUrl.find("metacritic.com/music/") != -1):
if contentSite.lower() == 'rateyourmusic':
if (isValidUrl.find('/buy') != -1 or isValidUrl.find('/reviews') != -1 or isValidUrl.find('/ratings') != -1):
rc = re.compile("(\\/)(buy|reviews|ratings)", re.IGNORECASE|re.DOTALL)
isValidUrl = re.sub(rc, "", isValidUrl)
response = self.open_url(isValidUrl, headers)
break
else:
response = self.urlhelper(searchString, contentSite, headers)
try:
isValidUrl = response.geturl()
except AttributeError:
return ""
self.pageUrl = isValidUrl
data = response.read()
# BeautifulSouping the webpage.
self.content = bs(data)
return self.content
def urlhelper(self, searchString, contentSite, headers):
''' Do a fallback search on invalid URL. '''
url = self.pick_url(searchString, contentSite, False)
response = self.open_url(url, headers)
firstMatchingUrl = self.fallback_search(bs(response.read()), contentSite)
response = self.open_url(firstMatchingUrl, headers)
return response
def fallback_search(self, searchResult, contentSite):
''' For cases where the I'm Feeling Lucky search fails. (like The Doors by The Doors) '''
rs = re.compile("(.*)(" + contentSite.lower() + ")(.*)(\\/release|\\/album|\\/master|\\/review|\\/albumreviews|(metacritic)(\\.)(com)(\\/music\\/))(.*)")
try:
url = searchResult.findAll("a", {"href" :rs}, limit = 1)[0].get("href")
except:
url = ""
return url
def pick_url(self, searchString, contentSite, imFeelingLucky):
''' Pick between advanced search and normal search. '''
# Toggle the I'm Feeling Lucky search option.
if imFeelingLucky:
# Choosing between hyperlink search (site:foo.com) and general search.
if (contentSite.find(".") != -1):
url = "http://www.google.com/search?hl=en&safe=off&btnI&sourceid=navclient&q=" + urllib.quote_plus(searchString.encode('utf-8')) + "+site:" + urllib.quote_plus(contentSite.encode('utf-8'))
else:
url = "http://www.google.com/search?hl=en&safe=off&btnI&sourceid=navclient&q=" + urllib.quote_plus(searchString.encode('utf-8')) + "+" + urllib.quote_plus(contentSite.encode('utf-8'))
else:
if (contentSite.find(".") != -1):
url = "http://www.google.com/search?hl=en&safe=off&sourceid=navclient&q=" + urllib.quote_plus(searchString.encode('utf-8')) + "+site:" + urllib.quote_plus(contentSite.encode('utf-8'))
else:
url = "http://www.google.com/search?hl=en&safe=off&sourceid=navclient&q=" + urllib.quote_plus(searchString.encode('utf-8')) + "+" + urllib.quote_plus(contentSite.encode('utf-8'))
self.searchUrl = url
return url
def open_url(self, urlS, headers):
''' Return contents of url. '''
# Properly encode special characters in url.
url = url_fix(urlS)
# Make request and fetch the webpage..
request = urllib2.Request(url, None, headers)
try:
response = urllib2.urlopen(request, timeout = 5)
except:
return 'Oops, something went wrong.'
return response
def strip_tags(self, html):
''' Strips tags out of the html. '''
html = re.sub('<[^<]+?>', '', html)
return html
def allmusic_parse(self, allmusicSoup, getSongList = True, getAlbumArt = True, getGenre = True, getStyles = True):
''' Parse the scraped Allmusic data. '''
try:
# Parse the rating out of its <span> tag.
rating = self.content.findAll("span", {"itemprop" :"rating"})
rating = rating[0].findAll(text = True) # Remove tags
rating = "<b>" + rating[0] + "/5" + "</b>"
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
# Parse the review out of its <span> tag.
review = self.content.findAll("div", {"class" :"text"})
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review] # Remove tags
if not review:
raise IndexError
except IndexError:
review = [""]
if getSongList:
try:
# List of songs in the album
self.songList = self.content.findAll("div", {"class" :"title"})
tempList = []
for song in self.songList:
tempList += song.findAll(text = True)
self.songList = [a.encode('utf-8') for a in tempList if a != '\n']
except IndexError:
self.songList = []
if getGenre:
if self.songList:
try:
self.genre = self.content.findAll("div", {"class" :"genre"}, limit = 1)[0].findAll(text = True)[4]
except:
self.genre = ""
if getStyles:
if self.genre:
try:
self.styles = self.content.findAll("div", {"class" :"styles"})
#self.styles = [self.strip_tags(str(style)).strip() for style in self.styles]
#self.styles[0] = re.sub(' +', ' - ', self.styles[0])
self.styles = self.styles[0].findAll(text = True)
self.styles = ' - '.join([x for x in self.styles if x not in [' ', '\n', 'Styles']])
except:
self.styles = ""
if getAlbumArt:
if self.songList:
try:
self.albumart = self.content.findAll("img", {"width" :"303"}, limit = 1)[0].get("src")
#self.albumart = json.loads(self.albumart)["url"]
#self.albumartFile = ''.join(choice(string.ascii_uppercase + string.digits) for x in range(8))
#self.albumartFile = "./static/" + self.albumartFile + ".jpg"
#urllib.urlretrieve(str(self.albumart), self.albumartFile)
except:
self.albumart = ""
# Populate the metadata dictionary.
self.allmusicMetadata = {'rating': rating, 'review': review}
return self.allmusicMetadata
def rym_parse(self, rymSoup):
''' Parse the scraped RateYourMusic data. '''
try:
rating = self.content.findAll("span", {"class" :"rating", "style" :"display:none;"})
rating = rating[0].findAll(text = True)
ratingCount = self.content.findAll("a", {"href" :"#ratings"})
ratingCount = ratingCount[0].findAll(text = True)
rating = "<b>" + rating[0].strip('" ') + "/5" + "</b>" + " from " + "<b>" + ratingCount[0] + " ratings" + "</b>" + "."
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
review = self.content.findAll("td", {"style" :"padding:25px 50px 50px 50px;"}, limit = 2)
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review]
if not review:
raise IndexError
except IndexError:
review = ["", ""]
self.rymMetadata = {'rating': rating, 'review': review}
return self.rymMetadata
def discogs_parse(self, discogSoup):
''' Parse the scraped Discogs data. '''
# Hacking around the varying span class attributes for every document with regex.
try:
rg = re.compile("(rating_value)(\\s+)(rating)(_)(value)(_)(r)(\\d+)", re.IGNORECASE|re.DOTALL)
rating = self.content.findAll("span", {"class" :rg})
rating = rating[0].findAll(text = True)
rc = re.compile("(rating)(_)(count)(_)(r)(\\d+)", re.IGNORECASE|re.DOTALL)
ratingCount = self.content.findAll("span", {"class" :rc})
ratingCount = ratingCount[0].findAll(text = True)
rating = "<b>" + rating[0] + "/5" + "</b>" + " from " + "<b>" + ratingCount[0] + " ratings" + "</b>" + "."
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
review = self.content.findAll("div", {"class": "review_comment"})
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review]
if not review:
raise IndexError
except IndexError:
review = [""]
self.discogsMetadata = {'rating': rating, 'review': review}
return self.discogsMetadata
def itunes_parse(self, itunesSoup):
''' Parse the scraped iTunes Store data. '''
try:
rg = re.compile("(.+)(\\s+)(stars)(,)(\\s+)(.+)(\\s+)(Ratings)")
rating = self.content.findAll("div", {"aria-label" :rg})[0].get("aria-label")
rating = "<b>" + rating + "</b>"
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
review = self.content.findAll("div", {"class" :"product-review"})
if not review:
review = self.content.findAll("div", {"class" :"customer-review"}, limit = 1)
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review]
review[0] = review[0][13:]
if not review:
raise IndexError
except IndexError:
review = [""]
self.itunesMetadata = {'rating': rating, 'review': review}
return self.itunesMetadata
def pitchfork_parse(self, pitchforkSoup):
''' Parse the scraped Pitchfork data. '''
try:
rg = re.compile("(score)(.*)(score)(-)(\\d+)(-)(\\d+)")
rating = self.content.findAll("span", {"class" :rg}, limit = 1)
rating = rating[0].findAll(text = True)
rating = "<b>" + rating[0].strip() + "/10" + "</b>"
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
review = self.content.findAll("div", {"class" :"editorial"}, limit = 1)
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review]
if not review:
raise IndexError
except IndexError:
review = [""]
self.pitchforkMetadata = {'rating': rating, 'review': review}
return self.pitchforkMetadata
def sputnikmusic_parse(self, sputnikmusicSoup):
''' Parse the scraped Sputnikmusic data. '''
try:
rating = self.content.findAll("font", {"size" :"4", "color" :"#888888"})
rating = rating[0].findAll(text = True)
rating = "<b>" + rating[0].strip() + "/5" + "</b>"
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
review = self.content.findAll("div", {"id" :"leftColumn"}, limit = 1)
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review]
rg = re.compile("(\\d+)( of )(\\d+)( thought this review was well written)")
rc = re.compile("(Share:)(.*)")
review[0] = re.sub(rg, '<br />', review[0])
review[0] = review[0].replace("\n", "")
review[0] = re.sub(rc, '', review[0])#.decode('ISO-8859-1').encode('utf-8')
# Bad Hack to get around ISO-8859-1 to UTF-8 conversion issues.
rc = re.compile('(&)((?:[a-z][a-z]+))(;)(&)((?:[a-z][a-z]+))(;)')
review[0] = re.sub(rc, '', review[0])
rc = re.compile('(â|€)')
review[0] = re.sub(rc, '', review[0]).split("(function(")[0]
#review[0] = review[0].replace('€', '')
#review[0] = review[0].replace('â', '\'')
if not review:
raise IndexError
except IndexError:
review = [""]
self.sputnikmusicMetadata = {'rating': rating, 'review': review}
return self.sputnikmusicMetadata
def rs_parse(self, rsSoup):
''' Parse the scraped Rolling Stone data. '''
try:
rating = self.content.findAll("span", {"itemprop" :"ratingValue"}, limit = 1)
rating = rating[0].findAll(text = True)
if rating[0] == '0':
rating = "<i>Not rated</i>"
else:
rating = "<b>" + rating[0].strip() + "/5" + "</b>"
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
review = self.content.findAll("div", {"itemprop" :"reviewBody"}, limit = 1)
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review]
if not review:
raise IndexError
except IndexError:
review = [""]
self.rsMetadata = {'rating': rating, 'review': review}
return self.rsMetadata
def metacritic_parse(self, metacriticSoup):
''' Parse the scraped metacritic data. '''
try:
rating = self.content.findAll("span", {"property" :"v:average"}, limit = 1)
rating = rating[0].findAll(text = True)
rating = "<b>" + rating[0].strip() + "/100" + "</b>"
if not rating:
raise IndexError
except IndexError:
rating = ""
try:
review = self.content.findAll("span", {"class" :"inline_expand_collapse inline_collapsed"}, limit = 1)
review = [self.strip_tags(str(eachReview)).strip() for eachReview in review]
review[0] = review[0].replace("… Expand", "")
if not review:
raise IndexError
except IndexError:
review = [""]
self.metacriticMetadata = {'rating': rating, 'review': review}
return self.metacriticMetadata
if __name__ == "__main__":
a = album_metadata()
stringo = "kid a"
b = a.search(stringo, "rollingstone")
#a.metacritic_parse(b)
#print a.metacriticMetadata
#print b
#a.sputnikmusic_parse(b)
#print a.sputnikmusicMetadata
#a.pitchfork_parse(b)
#print a.pitchforkMetadata
a.allmusic_parse(b)
#a.rs_parse(b)
#print a.rsMetadata
#print a.styles
#b = a.search('abbey road the beatles', 'rateyourmusic')
#print a.pageUrl
#a.rym_parse(b)
#b = a.search('abbey road the beatles', 'discogs')
#a.discogs_parse(b)
print a.allmusicMetadata
#print a.songList
#print a.albumart
#print
#print a.rymMetadata
#print a.pageUrl
#print
#print a.discogsMetadata
#a.itunes_parse(b)
#print a.itunesMetadata
|
<reponame>vishalbelsare/pycobra<filename>pycobra/visualisation.py
# Licensed under the MIT License - https://opensource.org/licenses/MIT
import math
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import random
from scipy.spatial import Voronoi, voronoi_plot_2d
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.utils import shuffle
from pycobra.diagnostics import Diagnostics
from pycobra.cobra import Cobra
from pycobra.kernelcobra import KernelCobra
from pycobra.ewa import Ewa
from pycobra.classifiercobra import ClassifierCobra
from collections import OrderedDict
import logging
logger = logging.getLogger('pycobra.visualisation')
def create_labels(indice_info):
"""
Helper method to create labels for plotting.
Parameters
----------
indice_info: list of strings
List of machine names
Return
------
label: string
Serves as a label during plotting.
"""
label = ""
for machine in indice_info:
label = machine + " + " + label
return label[:-3]
def gen_machine_colors(only_colors=False, num_colors=None, indice_info=None, rgb=False, plot_machines=None, colors=None):
"""
Helper method to create a machine combinations to color dictionary, or a list of colors.
Parameters
----------
indice_info: dictionary, optional
Dictionary which is a result of running pycobra.visualisation.indice_info. Maps indices to combinations of machines.
only_colors: bool, optional
Option to return only a list of colors
num_colors: int, optional
Number of colors to be returned if using only_colors
rgb : bool, optional
Creates dictionary based on machine used and r, g, b, a scheme.
plot_machines: list of strings, optional
List of machines to use in rgb coloring.
colors: list of strings, optional
List of colors to be used for pairing with machine_combinations
Return
------
machine_colors: dictionary
Dictionary mapping machine combinations and color.
"""
# note: for default colors to be assigned, the latest version of matplotlib is needed.
# the code below is taken from the colors example:
from matplotlib import colors as mcolors
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
sorted_names = [name for hsv, name in by_hsv]
# if we need only as many colors as individual machine
if only_colors:
# make the sorted names list equal to in size as the machine combinations
# we do this by first picking up equally spaced out colors, then randonly picking out the right amount
sorted_names = sorted_names[0::int(len(sorted_names) / num_colors)]
return random.sample(sorted_names, num_colors)
machine_combinations = list(set(indice_info.values()))
machine_colors = {}
if rgb:
for indice in indice_info:
r, g, b, a = 0, 0, 0, 0.4
if plot_machines[0] in indice_info[indice]:
r = 1
if plot_machines[1] in indice_info[indice]:
g = 1
if plot_machines[2] in indice_info[indice]:
b = 1
if plot_machines[3] in indice_info[indice]:
a = 1
if (r, g, b) == (1, 1, 1):
r, g, b = 0, 0, 0
machine_colors[indice_info[indice]] = (r, g, b, a)
return machine_colors
# if it isn't rgb, let's pair each unique machine with a color provided
if colors is not None and len(machine_combinations) == len(colors):
for machine, color in zip(machine_combinations, colors):
machine_colors[machine] = color
return machine_colors
# if it's none of the above options, we create colors similar to the only colors option.
sorted_names = sorted_names[0::int(len(sorted_names) / len(machine_combinations))]
colors = random.sample(sorted_names, len(machine_combinations))
for machine, color in zip(machine_combinations, colors):
machine_colors[machine] = color
return machine_colors
def voronoi_finite_polygons_2d(vor, radius=None):
"""
Code originally written by pv: https://gist.github.com/pv/8036995.
Helper method for voronoi.
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()*2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all([v >= 0 for v in vertices]):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
class Visualisation():
"""
Plots and visualisations of COBRA aggregates.
If X_test and y_test is loaded, you can run the plotting functions with no parameters.
"""
def __init__(self, aggregate, X_test, y_test, plot_size=8, estimators={}, random_state=None, **kwargs):
"""
Parameters
----------
aggregate: pycobra.cobra.Cobra or pycobra.cobra.Ewa object
aggregate on which we want to run our analysis on.
X_test : array-like, shape = [n_samples, n_features].
Testing data.
y_test : array-like, shape = [n_samples].
Test data target values.
plot_size: int, optional
Size of matplotlib plots.
estimators: list, optional
List of machine objects to visualise. Default is machines used in aggregate.
"""
self.aggregate = aggregate
self.X_test = X_test
self.y_test = y_test
self.plot_size = plot_size
self.estimators = estimators
# load results so plotting doesn't need parameters
self.kwargs = kwargs
self.machine_test_results = {}
self.machine_MSE = {}
if len(self.estimators) == 0:
self.estimators = self.aggregate.estimators_
# if we are visualising ClassifierCobra then we must use accuracy score instead of MSE
if type(aggregate) is ClassifierCobra:
self.machine_test_results["ClassifierCobra"] = self.aggregate.predict(self.X_test)
self.machine_error["ClassifierCobra"] = 1 - accuracy_score(self.y_test, self.machine_test_results["ClassifierCobra"])
for machine in self.estimators_:
self.machine_test_results[machine] = self.estimators_[machine].predict(self.X_test)
# add MSE
self.machine_error[machine] = 1 - accuracy_score(self.y_test, self.machine_test_results[machine])
names_dict = {Cobra: "Cobra", Ewa: "EWA", KernelCobra: "KernelCobra"}
for name in names_dict:
if type(aggregate) == name:
if type(aggregate) == KernelCobra:
self.machine_test_results[names_dict[name]] = self.aggregate.predict(self.X_test, bandwidth=kwargs["bandwidth_kernel"])
else:
self.machine_test_results[names_dict[name]] = self.aggregate.predict(self.X_test)
self.machine_MSE[names_dict[name]] = mean_squared_error(self.y_test, self.machine_test_results[names_dict[name]])
for machine in self.estimators:
if type(self.estimators[machine]) == KernelCobra:
self.machine_test_results[machine] = self.estimators[machine].predict(self.X_test, bandwidth=kwargs["bandwidth_kernel"])
else:
self.machine_test_results[machine] = self.estimators[machine].predict(self.X_test)
self.machine_MSE[machine] = mean_squared_error(self.y_test, self.machine_test_results[machine])
self.random_state = random_state
if self.random_state is None:
self.random_state = self.aggregate.random_state
def plot_machines(self, machines=None, colors=None, plot_indices=False):
"""
Plot the results of the machines versus the actual answers (testing space).
Parameters
----------
machines: list, optional
List of machines to plot.
colors: list, optional
Colors of machines.
plot_indices: boolean, optional.
Plots truth values against indices.
"""
if machines is None:
machines = self.estimators
plt.figure(figsize=(self.plot_size, self.plot_size))
if plot_indices or self.X_test.size != self.y_test.size:
linspace = np.linspace(0, len(self.y_test), len(self.y_test))
if colors is None:
colors = gen_machine_colors(only_colors=True, num_colors=len(machines) + 1)
if plot_indices or self.X_test.size != self.y_test.size:
plt.scatter(linspace, self.y_test, color=colors[0], label="Truth")
else:
plt.scatter(self.X_test, self.y_test, color=colors[0], label="Truth")
for machine, color in zip(machines, colors[1:]):
if plot_indices or self.X_test.size != self.y_test.size:
plt.scatter(linspace, self.machine_test_results[machine], color=color, label=machine)
else:
plt.scatter(self.X_test, self.machine_test_results[machine], color=color, label=machine)
if plot_indices:
plt.xlabel("Point Indice")
plt.legend()
plt.show()
return plt
def QQ(self, machine="Cobra"):
"""
Plots the machine results vs the actual results in the form of a QQ-plot.
Parameters
----------
machine: string, optional
Name of machine to perform QQ-plot.
"""
plt.figure(figsize=(self.plot_size, self.plot_size))
axes = plt.gca()
pred = self.machine_test_results[machine]
# this is to make the plot look neater
min_limits = math.fabs(min(min(pred), min(self.y_test)))
max_limits = max(max(pred), max(self.y_test))
axes.set_xlim([min(min(pred), min(self.y_test)) - min_limits, max(max(pred), max(self.y_test)) + max_limits])
axes.set_ylim([min(min(pred), min(self.y_test)) - min_limits, max(max(pred), max(self.y_test)) + max_limits])
# scatter the machine responses versus the actual y_test
plt.scatter(self.y_test, pred, label=machine)
axes.plot(axes.get_xlim(), axes.get_ylim(), ls="--", c=".3")
# labels
plt.xlabel('RESPONSES')
plt.ylabel('PREDICTED')
plt.legend()
plt.show()
return plt
def boxplot(self, reps=100, info=False, dataframe=None, kind="normal"):
"""
Plots boxplots of machines.
Parameters
----------
reps: int, optional
Number of times to repeat experiments for boxplot.
info: boolean, optional
Returns data
"""
kwargs = self.kwargs
if dataframe is None:
if type(self.aggregate) is Cobra:
MSE = {k: [] for k, v in self.estimators.items()}
MSE["Cobra"] = []
for i in range(0, reps):
cobra = Cobra(epsilon=self.aggregate.epsilon)
X, y = shuffle(self.aggregate.X_, self.aggregate.y_)
cobra.fit(X, y, default=False)
cobra.split_data(shuffle_data=True)
for machine in self.aggregate.estimators_:
self.aggregate.estimators_[machine].fit(cobra.X_k_, cobra.y_k_)
cobra.load_machine(machine, self.aggregate.estimators_[machine])
cobra.load_machine_predictions()
for machine in self.estimators:
if "Cobra" in machine:
self.estimators[machine].fit(X, y)
else:
self.estimators[machine].fit(cobra.X_k_, cobra.y_k_)
try:
if type(self.estimators[machine]) == KernelCobra:
preds = self.estimators[machine].predict(self.X_test, bandwidth=kwargs["bandwidth_kernel"])
else:
preds = self.estimators[machine].predict(self.X_test)
except KeyError:
preds = self.estimators[machine].predict(self.X_test)
MSE[machine].append(mean_squared_error(self.y_test, preds))
MSE["Cobra"].append(mean_squared_error(self.y_test, cobra.predict(self.X_test)))
try:
dataframe = pd.DataFrame(data=MSE)
except ValueError:
return MSE
if type(self.aggregate) is KernelCobra:
MSE = {k: [] for k, v in self.estimators.items()}
MSE["KernalCobra"] = []
for i in range(0, reps):
kernel = KernelCobra()
X, y = shuffle(self.aggregate.X_, self.aggregate.y_)
kernel.fit(X, y, default=False)
kernel.split_data(shuffle_data=True)
for machine in self.aggregate.estimators_:
self.aggregate.estimators_[machine].fit(kernel.X_k_, kernel.y_k_)
kernel.load_machine(machine, self.aggregate.estimators_[machine])
kernel.load_machine_predictions()
for machine in self.estimators:
if "Cobra" in machine:
self.estimators[machine].fit(X, y)
else:
self.estimators[machine].fit(cobra.X_k_, cobra.y_k_)
try:
if type(self.estimators[machine]) == KernelCobra:
preds = self.estimators[machine].predict(self.X_test, bandwidth=kwargs["bandwidth_kernel"])
else:
preds = self.estimators[machine].predict(self.X_test)
except KeyError:
preds = self.estimators[machine].predict(self.X_test)
MSE[machine].append(mean_squared_error(self.y_test, preds))
MSE["KernelCobra"].append(mean_squared_error(self.y_test, kernel.predict(self.X_test, bandwidth=kwargs[bandwidth_kernel])))
try:
dataframe = pd.DataFrame(data=MSE)
except ValueError:
return MSE
if type(self.aggregate) is Ewa:
MSE = {k: [] for k, v in self.aggregate.estimators_.items()}
MSE["EWA"] = []
for i in range(0, reps):
ewa = Ewa(random_state=self.random_state, beta=self.aggregate.beta)
X, y = shuffle(self.aggregate.X_, self.aggregate.y_, random_state=self.aggregate.random_state)
ewa.fit(X, y, default=False)
ewa.split_data(shuffle_data=True)
for machine in self.estimators:
self.aggregate.estimators_[machine].fit(ewa.X_k_, ewa.y_k_)
ewa.load_machine(machine, self.aggregate.estimators_[machine])
ewa.load_machine_weights(self.aggregate.beta)
X_test, y_test = shuffle(self.X_test, self.y_test, random_state=self.aggregate.random_state)
for machine in self.estimators:
if "EWA" in machine:
self.estimators[machine].fit(X, y)
else:
self.estimators[machine].fit(ewa.X_k_, ewa.y_k_)
try:
if type(self.estimators[machine]) == KernelCobra:
preds = self.estimators[machine].predict(self.X_test, bandwidth=kwargs["bandwidth_kernel"])
else:
preds = self.estimators[machine].predict(self.X_test)
except KeyError:
preds = self.estimators[machine].predict(self.X_test)
MSE[machine].append(mean_squared_error(y_test, preds))
MSE["EWA"].append(mean_squared_error(y_test, ewa.predict(X_test)))
try:
dataframe = pd.DataFrame(data=MSE)
except ValueError:
return MSE
if type(self.aggregate) is ClassifierCobra:
errors = {k: [] for k, v in self.aggregate.estimators_.items()}
errors["ClassifierCobra"] = []
for i in range(0, reps):
cc = ClassifierCobra(random_state=self.random_state)
X, y = shuffle(self.aggregate.X_, self.aggregate.y_, random_state=self.aggregate.random_state)
cc.fit(X, y, default=False)
cc.split_data(shuffle_data=True)
for machine in self.aggregate.estimators_:
self.aggregate.estimators_[machine].fit(cc.X_k_, cc.y_k_)
cc.load_machine(machine, self.aggregate.estimators_[machine])
cc.load_machine_predictions()
X_test, y_test = shuffle(self.X_test, self.y_test, random_state=self.aggregate.random_state)
for machine in self.estimators:
errors[machine].append(1 - accuracy_score(y_test, self.estimators[machine].predict(X_test)))
errors["ClassifierCobra"].append(1 - accuracy_score(y_test, cc.predict(X_test)))
try:
dataframe = pd.DataFrame(data=errors)
except ValueError:
return errors
# code for different boxplot styles using the python graph gallery tutorial:
# https://python-graph-gallery.com/39-hidden-data-under-boxplot/
sns.set(style="whitegrid")
if kind == "normal":
sns.boxplot(data=dataframe)
plt.title("Boxplot")
if kind == "violin":
sns.violinplot(data=dataframe)
plt.title("Violin Plot")
if kind == "jitterplot":
ax = sns.boxplot(data=dataframe)
ax = sns.stripplot(data=dataframe, color="orange", jitter=0.2, size=2.5)
plt.title("Boxplot with jitter", loc="left")
plt.ylabel("Mean Squared Errors")
plt.xlabel("Estimators")
plt.figure(figsize=(self.plot_size, self.plot_size))
plt.show()
if info:
return dataframe
def indice_info(self, X_test=None, y_test=None, epsilon=None, line_points=200):
"""
Method to return information about each indices (query) optimal machines for testing data.
Parameters
----------
epsilon: float, optional
Epsilon value to use for diagnostics
line_points: int, optional
if epsilon is not passed, optimal epsilon is found per point.
Returns
-------
indice_info: dicitonary mapping indice to optimal machines.
MSE: dictionary mapping indice to mean squared error for optimal machines for that point.
"""
if X_test is None:
X_test = self.X_test
if y_test is None:
y_test = self.y_test
indice = 0
indice_info = {}
MSE = {}
cobra_diagnostics = Diagnostics(aggregate=self.aggregate, random_state=self.random_state)
if epsilon is None:
for data_point, response in zip(X_test, y_test):
info = cobra_diagnostics.optimal_machines_grid(data_point, response, line_points=line_points)
indice_info[indice], MSE[indice] = info[0][0], info[1]
indice += 1
else:
for data_point, response in zip(X_test, y_test):
info = cobra_diagnostics.optimal_machines(data_point, response, single=True, epsilon=epsilon)
indice_info[indice], MSE[indice] = info[0], info[1]
indice += 1
return indice_info, MSE
def color_cobra(self, X_test=None, y_test=None, line_points=200, epsilon=None, indice_info=None, plot_machines=["ridge", "lasso", "random_forest", "tree"], single=False, machine_colors=None):
"""
Plot the input space and color query points based on the optimal machine used for that point.
Parameters
----------
epsilon: float, optional
Epsilon value to use for diagnostics. Used to find indice_info if it isn't passed.
line_points: int, optional
if epsilon is not passed, optimal epsilon is found per point. Used to find indice_info if it isn't passed.
indice_info: dicitonary, optional
dictionary mapping indice to optimal machines.
plot_machines: list, optional
list of machines to be plotted.
single: bool, optional
plots a single plot with each machine combination.
machine_colors: dictionary, optional
Depending on the kind of coloring, a dictionary mapping machines to colors.
"""
if indice_info is None:
indice_info = self.indice_info(line_points, epsilon)
if X_test is None:
X_test = self.X_test
if y_test is None:
y_test = self.y_test
# we want to plot only two columns
data_1 = X_test[:, 0]
data_2 = X_test[:, 1]
if single:
if machine_colors is None:
machine_colors = gen_machine_colors(indice_info=indice_info)
plt.ion()
fig, ax = plt.subplots()
plot = ax.scatter([], [])
for indice in indice_info:
ax.set_title("All Machines")
ax.scatter(data_1[indice], data_2[indice], color=machine_colors[indice_info[indice]], label=create_labels(indice_info[indice]))
try:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
except ValueError:
return ax
if not single:
if machine_colors is None:
machine_colors = {}
colors = gen_machine_colors(only_colors=True, num_colors=len(plot_machines))
for machine, color in zip(plot_machines, colors):
machine_colors[machine] = color
for machine in plot_machines:
plt.ion()
fig, ax = plt.subplots()
plot = ax.scatter([], [])
# set boundaries based on the data
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_title(machine)
for indice in indice_info:
if machine in indice_info[indice]:
ax.scatter(data_1[indice], data_2[indice], color=machine_colors[machine])
return ax
def voronoi(self, X_test=None, y_test=None, line_points=200, epsilon=None, indice_info=None, MSE=None, plot_machines=["ridge", "lasso", "random_forest", "tree"], machine_colors=None, gradient=False, single=False):
"""
Plot the input space and color query points as a Voronoi Tesselation based on the optimal machine used for that point.
Parameters
----------
epsilon: float, optional
Epsilon value to use for diagnostics. Used to find indice_info if it isn't passed.
line_points: int, optional
if epsilon is not passed, optimal epsilon is found per point. Used to find indice_info if it isn't passed.
indice_info: dicitonary, optional
dictionary mapping indice to optimal machines.
MSE: dictionary, optional
dictionary mapping indice to mean-squared error for optimal machines
plot_machines: list, optional
list of machines to be plotted.
single: bool, optional
plots a single plot with each machine combination.
gradient: bool, optional
instead of aggregating optimal machines, plots a colored plot for each machine,
shaded according to the mean-squared error of that "region"
machine_colors: dictionary, optional
Depending on the kind of coloring, a dictionary mapping machines to colors.
"""
if X_test is None:
X_test = self.X_test
if y_test is None:
y_test = self.y_test
if indice_info is None:
indice_info, MSE = self.indice_info(line_points, epsilon)
# passing input space to set up voronoi regions.
points = np.hstack((np.reshape(X_test[:, 0], (len(X_test[:, 0]), 1)), np.reshape(X_test[:, 1], (len(X_test[:, 1]), 1))))
vor = Voronoi(points)
# use helper Voronoi
regions, vertices = voronoi_finite_polygons_2d(vor)
# # colorize
if not single:
for machine in plot_machines:
fig, ax = plt.subplots()
plot = ax.scatter([], [])
ax.set_title(machine)
indice = 0
for region in regions:
ax.plot(X_test[:, 0][indice], X_test[:, 1][indice], 'ko')
polygon = vertices[region]
if gradient is True and MSE is not None:
# we find closest index from range to give gradient value
mse_range = np.linspace(min(MSE.values()), max(MSE.values()), 10)
num = min(mse_range, key=lambda x: abs(x - MSE[indice]))
index = np.where(mse_range == num)
alpha = index[0][0] / 10.0 + 0.2
if alpha > 1.0:
alpha = 1.0
# we fill the polygon with the appropriate gradient
if machine in indice_info[indice]:
ax.fill(*zip(*polygon), alpha=alpha, color='r')
else:
# if it isn't gradient based we just color red or blue depending on whether that point uses the machine in question
if machine in indice_info[indice]:
ax.fill(*zip(*polygon), alpha=0.4, color='r', label="")
else:
ax.fill(*zip(*polygon), alpha=0.4, color='b', label="")
indice += 1
ax.axis('equal')
plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
return vor
if single:
if machine_colors is None:
machine_colors = gen_machine_colors(indice_info=indice_info)
fig, ax = plt.subplots()
plot = ax.scatter([], [])
ax.set_title("All Machines")
indice = 0
for region in regions:
ax.plot(X_test[:, 0][indice], X_test[:, 1][indice], 'ko')
polygon = vertices[region]
ax.fill(*zip(*polygon), alpha=0.2, color=machine_colors[indice_info[indice]], label=create_labels(indice_info[indice]))
indice += 1
ax.axis('equal')
plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
try:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
except ValueError:
return vor
return vor
|
<reponame>enwawerueli/footprints
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'forms/category_details.ui',
# licensing of 'forms/category_details.ui' applies.
#
# Created: Fri Feb 8 19:39:07 2019
# by: pyside2-uic running on PySide2 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_CategoryDetails(object):
def setupUi(self, CategoryDetails):
CategoryDetails.setObjectName("CategoryDetails")
CategoryDetails.resize(387, 288)
self.verticalLayout = QtWidgets.QVBoxLayout(CategoryDetails)
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(CategoryDetails)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.details_tbw = QtWidgets.QTabWidget(CategoryDetails)
self.details_tbw.setObjectName("details_tbw")
self.basic_info_tab = QtWidgets.QWidget()
self.basic_info_tab.setObjectName("basic_info_tab")
self.gridLayout = QtWidgets.QGridLayout(self.basic_info_tab)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.basic_info_tab)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_5 = QtWidgets.QLabel(self.basic_info_tab)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1)
self.modified_at_lb = QtWidgets.QLabel(self.basic_info_tab)
self.modified_at_lb.setObjectName("modified_at_lb")
self.gridLayout.addWidget(self.modified_at_lb, 2, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.basic_info_tab)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 2, 0, 1, 1)
self.name_lb = QtWidgets.QLabel(self.basic_info_tab)
self.name_lb.setObjectName("name_lb")
self.gridLayout.addWidget(self.name_lb, 0, 1, 1, 1)
self.created_at_lb = QtWidgets.QLabel(self.basic_info_tab)
self.created_at_lb.setObjectName("created_at_lb")
self.gridLayout.addWidget(self.created_at_lb, 1, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 3, 0, 1, 2)
self.details_tbw.addTab(self.basic_info_tab, "")
self.description_tab = QtWidgets.QWidget()
self.description_tab.setObjectName("description_tab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.description_tab)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.description_te = QtWidgets.QTextEdit(self.description_tab)
self.description_te.setReadOnly(True)
self.description_te.setObjectName("description_te")
self.verticalLayout_2.addWidget(self.description_te)
self.details_tbw.addTab(self.description_tab, "")
self.verticalLayout.addWidget(self.details_tbw)
self.buttonBox = QtWidgets.QDialogButtonBox(CategoryDetails)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(CategoryDetails)
self.details_tbw.setCurrentIndex(0)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), CategoryDetails.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), CategoryDetails.reject)
QtCore.QMetaObject.connectSlotsByName(CategoryDetails)
def retranslateUi(self, CategoryDetails):
CategoryDetails.setWindowTitle(QtWidgets.QApplication.translate("CategoryDetails", "Dialog", None, -1))
self.label_2.setText(QtWidgets.QApplication.translate("CategoryDetails", "<h4>Category Details</h4>", None, -1))
self.label.setText(QtWidgets.QApplication.translate("CategoryDetails", "Name :", None, -1))
self.label_5.setText(QtWidgets.QApplication.translate("CategoryDetails", "Created :", None, -1))
self.modified_at_lb.setText(QtWidgets.QApplication.translate("CategoryDetails", "modification date", None, -1))
self.label_7.setText(QtWidgets.QApplication.translate("CategoryDetails", "Last modified :", None, -1))
self.name_lb.setText(QtWidgets.QApplication.translate("CategoryDetails", "name", None, -1))
self.created_at_lb.setText(QtWidgets.QApplication.translate("CategoryDetails", "creation date", None, -1))
self.details_tbw.setTabText(self.details_tbw.indexOf(self.basic_info_tab), QtWidgets.QApplication.translate("CategoryDetails", "Basic", None, -1))
self.description_te.setHtml(QtWidgets.QApplication.translate("CategoryDetails", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Cantarell\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">No description.</p></body></html>", None, -1))
self.details_tbw.setTabText(self.details_tbw.indexOf(self.description_tab), QtWidgets.QApplication.translate("CategoryDetails", "Description", None, -1))
|
#!/usr/bin/env python3
import argparse
import errno
import select
import signal
import socket
import sys
import threading
class Server:
'''
Server is the passive side of client-server architecture model. It binds
to one or more local interface and listens for incoming connection on some
port.
Args:
payload_size (int): Max size of an incoming message. Defaults to 1024.
'''
def __init__(self, payload_size = 1024):
self.payload_size = payload_size
self.lock = threading.Lock()
self.threads = []
self.stopped = True
self.socket = None
def start(self, host, port):
'''
Starts the server.
Args:
host (str): Interfaces address where the server should listen on.
port (int): Port number where the server should listen on.
'''
address = (host, port)
print(f'starting the server at {address[0]}:{address[1]}...')
with self.lock:
if not self.stopped:
raise Exception('server is already running')
self.socket = self.create_server(address)
self.listen_connections()
self.socket.close()
with self.lock:
self.socket = None
def stop(self):
'''
Stops the server.
'''
with self.lock:
print('server has received a signal to stop...')
self.stopped = True
if self.socket:
self.socket.close()
print(self.threads)
for t in self.threads:
print(t)
t.join()
self.threads = []
def create_server(self, address):
'''
Creates a socket to accept connection on server side.
Args:
address (tuple): the address and port.
'''
ss = socket.create_server(address, reuse_port = True)
ss.setblocking(False) # set the socket as asynchronous
return ss
def listen_connections(self):
'''
Accepts new connections from remote peers.
Args:
socket (obj): Inherited socket object.
'''
while True:
reading_list = []
try:
reading_list, _, _ = select.select([self.socket], [], [])
except IOError as e:
if e.errno == errno.EBADF: # server has been stopped
break
except Exception as e:
print(f'an exception occurred while listening for connections: Exception = {e}', file = sys.stderr)
break
for r in reading_list:
peer_connection, peer_address = self.socket.accept()
thread = threading.Thread(target = self.handle_connection, args = (peer_connection, peer_address))
thread.start()
self.threads.append(thread)
def handle_connection(self, peer_conn, peer_address):
'''
Handles the connection from a remote peer, extracts the payload and
replies with the same content.
Args:
peer_conn (obj): Inherited socket object.
peer_address (tuple): A tuple with the address and port from the remote peer.
'''
peer_id = f'{peer_address[0]}:{peer_address[1]}'
print(f'handling connection from peer {peer_id}...')
messages_count = 0
while True:
try:
messages_count += 1
data = peer_conn.recv(self.payload_size)
if not data:
print(f' > message #{messages_count}: no content received, closing the connection with {peer_id}...')
break
self.request_handler(peer_conn, peer_address, str(data, encoding='utf-8'))
except IOError as e:
if e.errno == errno.EDEADLK: # ignoring the "Resource temporarily unavailable" error if no data is available (since the socket is asynchronous)
continue
if e.errno == errno.EBADF: # if the file descriptor is suddenly closed in the "connection_handler" method, we close abort the loop to end up the thread
break
except Exception as e:
print(f'an exception occurred while handling a connection from peer {peer_id}: Exception = {e}', file = sys.stderr)
break
peer_conn.close()
def request_handler(self, peer_conn, peer_address, data = ''):
raise NotImplementedError("please implement this method")
|
import csv
import sys
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
data_type = sys.argv[1]
def read_csv(input_file):
"""Reads a csv file."""
lines = []
with open(input_file, 'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
lines.append(row)
return lines
def filter_answer(answer):
if answer.lower().startswith('none of the above'):
return False
if answer == "":
return False
return True
def convert():
records = read_csv('./%s.csv' % (data_type))
examples = []
for (i, record) in enumerate(records):
guid = record["id"]
context = record["context"]
question = record["question"]
label = int(record["label"])
new_question = convert_question(question)
if new_question is not None:
question = new_question
if not filter_answer(record["answer%d" % (label)]):
continue
for j in range(4):
if not filter_answer(record["answer%d" % (j)]):
for k in range(4):
if k == label or k == j:
continue
if filter_answer(record["answer%d" % (k)]):
record["answer%d" % (j)] = record["answer%d" % (k)]
break
examples.append("%s\t%s\t%s\t%s\t%s\t%s\t%d" % (context, question,
record["answer0"], record["answer1"], record["answer2"], record["answer3"],
label))
print(len(examples))
return examples
def lemma(sentence):
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return None
tokens = sentence.split(' ')
try:
tagged_sent = pos_tag(tokens)
except:
return None, None
wnl = WordNetLemmatizer()
lemmas_sent, tags = [], []
for tag in tagged_sent:
wordnet_pos = get_wordnet_pos(tag[1]) or wordnet.NOUN
tags.append(wordnet_pos)
lemmas_sent.append(wnl.lemmatize(tag[0], pos=wordnet_pos))
return ' '.join(lemmas_sent), tags
def convert_question(question):
conj_words = ['if', 'during', 'because', 'despite', 'when', 'while', 'after', 'once']
pre_question = ''
question = question.strip()
question = question[0].lower() + question[1:]
question_words = question.split(' ')[:-1]
for word in conj_words:
if word in question_words[2:]:
word_index = question_words.index(word)
pre_question = ' '.join(question_words[word_index:] + [','])
question_words = question_words[:word_index]
break
question = ' '.join(question_words)
lemma_question, tags = lemma(question)
if lemma_question is None:
return None
if question_words[0] == 'what':
if len(lemma_question.split(' ')) <= 4 and 'happen' in lemma_question:
# new_question = ''
new_question = ' '.join(['it'] + question_words[1:] + ['that'])
elif len(lemma_question.split(' ')) == 4 and ' '.join(lemma_question.split(' ')[:2] + [lemma_question.split(' ')[3]]) == 'what do do':
new_question = question_words[2]
if new_question == 'you':
new_question = 'I'
elif 'a possible reason' in ' '.join(question_words[:6]):
new_question = ' '.join(question_words[question_words.index('reason') + 1:] + ['because'])
elif tags[1] == 'n' and lemma_question.split(' ')[1] not in ['may', 'have', 'do', 'be', 'will', 'can']:
new_question = ' '.join(['the'] + question_words[1:] + ['is', 'that'])
else:
new_question = ' '.join(['it'] + question_words[1:] + ['that'])
elif question_words[0] == 'why':
new_question = ' '.join(question_words[2:] + ['because'])
else:
return None
if new_question == '':
new_question = pre_question
elif pre_question != '':
new_question = pre_question + ' ' + new_question
new_question = new_question[0].upper() + new_question[1:]
return new_question
with open('CosmosQA-PQA.%s' % (data_type), 'w') as w:
w.write('\n'.join(convert()))
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import itertools
import math
from collections import defaultdict
import numpy as np
import torch
from PIL import Image
from toolz import compose, curry
from toolz import partition_all
from torch.utils.data import Dataset
from torchvision.datasets.utils import iterable_to_str, verify_str_arg
_open_to_array = compose(np.array, Image.open)
class DataNotSplitException(Exception):
pass
def _get_classes_and_counts(mask_list):
class_counts_dict = defaultdict(int)
for mask in mask_list:
for class_label, class_count in zip(*np.unique(mask, return_counts=True)):
class_counts_dict[class_label] += class_count
return list(class_counts_dict.keys()), list(class_counts_dict.values())
def _combine(mask_array):
"""Combine classes 2 and 3. Reduce all classes above 3 by one
"""
mask_array[np.logical_or(mask_array == 2, mask_array == 3)] = 2
for i in filter(lambda x: x > 3, np.unique(mask_array)):
mask_array[mask_array == i] = i - 1
return mask_array
def _combine_classes(mask_array_list):
"""Combine classes
Segmentation implementations using this dataset seem to combine
classes 2 and 3 so we are doing the same here and then relabeling the rest
Args:
mask_array_list (list): list of mask (numpy.Array)
"""
return [_combine(mask_array.copy()) for mask_array in mask_array_list]
def _replicate_channels(image_array, n_channels):
new_image_array = np.zeros((n_channels, image_array.shape[0], image_array.shape[1]))
for i in range(n_channels):
new_image_array[i] = image_array
return new_image_array
def _number_patches_in(height_or_width, patch_size, stride, complete_patches_only=True):
strides_in_hw = (height_or_width - patch_size) / stride
if complete_patches_only:
return int(np.floor(strides_in_hw))
else:
return int(np.ceil(strides_in_hw))
def _is_2D(numpy_array):
return len(numpy_array.shape) == 2
def _is_3D(numpy_array):
return len(numpy_array.shape) == 3
@curry
def _extract_patches(patch_size, stride, complete_patches_only, img_array, mask_array):
height, width = img_array.shape[-2], img_array.shape[-1]
num_h_patches = _number_patches_in(height, patch_size, stride, complete_patches_only=complete_patches_only)
num_w_patches = _number_patches_in(width, patch_size, stride, complete_patches_only=complete_patches_only)
height_iter = range(0, stride * (num_h_patches + 1), stride)
width_iter = range(0, stride * (num_w_patches + 1), stride)
patch_locations = list(itertools.product(height_iter, width_iter))
image_patch_generator = _generate_patches_for(img_array, patch_locations, patch_size)
mask_patch_generator = _generate_patches_for(mask_array, patch_locations, patch_size)
return image_patch_generator, mask_patch_generator, patch_locations
def _generate_patches_for(numpy_array, patch_locations, patch_size):
if _is_2D(numpy_array):
generate = _generate_patches_from_2D
elif _is_3D(numpy_array):
generate = _generate_patches_from_3D
else:
raise ValueError("Array is not 2D or 3D")
return generate(numpy_array, patch_locations, patch_size)
def _generate_patches_from_2D(numpy_array, patch_locations, patch_size):
return (numpy_array[h : h + patch_size, w : w + patch_size].copy() for h, w in patch_locations)
def _generate_patches_from_3D(numpy_array, patch_locations, patch_size):
return (numpy_array[:, h : h + patch_size, w : w + patch_size].copy() for h, w in patch_locations)
_STATS_FUNCS = {"mean": np.mean, "std": np.std, "max": np.max}
def _transform_CHW_to_HWC(numpy_array):
return np.moveaxis(numpy_array, 0, -1)
def _transform_HWC_to_CHW(numpy_array):
return np.moveaxis(numpy_array, -1, 0)
def _rescale(numpy_array):
""" Rescale the numpy array by 10000. The maximum value achievable is 32737
This will bring the values between -n and n
"""
return numpy_array / 10000
def _split_train_val_test(partition, val_ratio, test_ratio):
total_samples = len(partition)
val_samples = math.floor(val_ratio * total_samples)
test_samples = math.floor(test_ratio * total_samples)
train_samples = total_samples - (val_samples + test_samples)
train_list = partition[:train_samples]
val_list = partition[train_samples : train_samples + val_samples]
test_list = partition[train_samples + val_samples : train_samples + val_samples + test_samples]
return train_list, val_list, test_list
class InlinePatchDataset(Dataset):
"""Dataset that returns patches from the numpy dataset
Notes:
Loads inlines only and splits into patches
"""
_repr_indent = 4
def __init__(
self,
data_array,
mask_array,
patch_size,
stride,
split="train",
transforms=None,
max_inlines=None,
n_channels=1,
complete_patches_only=True,
val_ratio=0.1,
test_ratio=0.2,
):
"""Initialise Numpy Dataset
Args:
data_array (numpy.Array): a 3D numpy array that contain the seismic info
mask_array (numpy.Array): a 3D numpy array that contains the labels
patch_size (int): the size of the patch in pixels
stride (int): the stride applied when extracting patches
split (str, optional): what split to load, (train, val, test). Defaults to `train`
transforms (albumentations.augmentations.transforms, optional): albumentation transforms to apply to patches. Defaults to None
exclude_files (list[str], optional): list of files to exclude. Defaults to None
max_inlines (int, optional): maximum number of inlines to load. Defaults to None
n_channels (int, optional): number of channels that the output should contain. Defaults to 3
complete_patches_only (bool, optional): whether to load incomplete patches that are padded to patch_size. Defaults to True
val_ratio (float): ratio to use for validation. Defaults to 0.1
test_ratio (float): ratio to use for test. Defaults to 0.2
"""
super(InlinePatchDataset, self).__init__()
self._data_array = data_array
self._slice_mask_array = mask_array
self._split = split
self._max_inlines = max_inlines
self._n_channels = n_channels
self._complete_patches_only = complete_patches_only
self._patch_size = patch_size
self._stride = stride
self._image_array = []
self._mask_array = []
self._ids = []
self._patch_locations = []
self.transforms = transforms
valid_modes = ("train", "test", "val")
msg = "Unknown value '{}' for argument split. " "Valid values are {{{}}}."
msg = msg.format(split, iterable_to_str(valid_modes))
verify_str_arg(split, "split", valid_modes, msg)
# Set the patch and stride for the patch extractor
_extract_patches_from = _extract_patches(patch_size, stride, self._complete_patches_only)
num_partitions = 5
indexes = self._data_array.shape[0]
num_elements = math.ceil(indexes / num_partitions)
train_indexes_list = []
test_indexes_list = []
val_indexes_list = []
for partition in partition_all(num_elements, range(indexes)): # Partition files into N partitions
train_indexes, val_indexes, test_indexes = _split_train_val_test(partition, val_ratio, test_ratio)
train_indexes_list.extend(train_indexes)
test_indexes_list.extend(test_indexes)
val_indexes_list.extend(val_indexes)
if split == "train":
indexes = train_indexes_list
elif split == "val":
indexes = val_indexes_list
elif split == "test":
indexes = test_indexes_list
# Extract patches
for index in indexes:
img_array = self._data_array[index]
mask_array = self._slice_mask_array[index]
self._ids.append(index)
image_generator, mask_generator, patch_locations = _extract_patches_from(img_array, mask_array)
self._patch_locations.extend(patch_locations)
self._image_array.extend(image_generator)
self._mask_array.extend(mask_generator)
assert len(self._image_array) == len(self._patch_locations), "The shape is not the same"
assert len(self._patch_locations) % len(self._ids) == 0, "Something is wrong with the patches"
self._patches_per_image = int(len(self._patch_locations) / len(self._ids))
self._classes, self._class_counts = _get_classes_and_counts(self._mask_array)
def __len__(self):
return len(self._image_array)
@property
def n_classes(self):
return len(self._classes)
@property
def class_proportions(self):
total = np.sum(self._class_counts)
return [(i, w / total) for i, w in zip(self._classes, self._class_counts)]
def _add_extra_channels(self, image):
if self._n_channels > 1:
image = _replicate_channels(image, self._n_channels)
return image
def __getitem__(self, index):
image, target, ids, patch_locations = (
self._image_array[index],
self._mask_array[index],
self._ids[index // self._patches_per_image],
self._patch_locations[index],
)
image = self._add_extra_channels(image)
if _is_2D(image):
image = np.expand_dims(image, 0)
if self.transforms is not None:
image = _transform_CHW_to_HWC(image)
augmented_dict = self.transforms(image=image, mask=target)
image, target = augmented_dict["image"], augmented_dict["mask"]
image = _transform_HWC_to_CHW(image)
target = np.expand_dims(target, 0)
return (
torch.from_numpy(image).float(),
torch.from_numpy(target).long(),
ids,
np.array(patch_locations),
)
@property
def statistics(self):
flat_image_array = np.concatenate([i.flatten() for i in self._image_array])
stats = {stat: statfunc(flat_image_array) for stat, statfunc in _STATS_FUNCS.items()}
return "Mean: {mean} Std: {std} Max: {max}".format(**stats)
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return ["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self):
lines = [
"Split: {_split}",
"Patch size: {_patch_size}",
"Stride: {_stride}",
"Max inlines: {_max_inlines}",
"Num channels: {_n_channels}",
f"Num classes: {self.n_classes}",
f"Class proportions: {self.class_proportions}",
"Complete patches only: {_complete_patches_only}",
f"Dataset statistics: {self.statistics}",
]
return "\n".join(lines).format(**self.__dict__)
_TRAIN_PATCH_DATASETS = {"none": InlinePatchDataset}
def get_patch_dataset(cfg):
""" Return the Dataset class for Numpy Array
Args:
cfg: yacs config
Returns:
InlinePatchDataset
"""
assert str(cfg.TRAIN.DEPTH).lower() in [
"none"
], f"Depth {cfg.TRAIN.DEPTH} not supported for patch data. \
Valid values: section, patch, none."
return _TRAIN_PATCH_DATASETS.get(cfg.TRAIN.DEPTH, InlinePatchDataset)
|
<gh_stars>10-100
import time, math, re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import torch
def savePlot(points, outpath):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.savefig(outpath)
plt.close('all')
import time, math
def asHMS(s):
d = int(s / 60 / 60 / 24)
h = int(s / 60 / 60)
m = int(s / 60)
s = int(s % 60)
return '{:02d}d{:02d}h{:02d}m{:02d}s'.format(d, h, m, s)
def create_mask(lengths):
N = lengths.shape[0]
L = lengths.max()
mask = torch.zeros(N,L)
for i, length in enumerate(lengths):
mask[i,:length] += 1
return mask
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def decompose_hangul(text):
Start_Code, ChoSung, JungSung = 44032, 588, 28
ChoSung_LIST = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
JungSung_LIST = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ',
'ㅣ']
JongSung_LIST = ['', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ',
'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
line_dec = ""
line = list(text.strip())
for keyword in line:
if re.match('.*[ㄱ-ㅎㅏ-ㅣ가-힣]+.*', keyword) is not None:
char_code = ord(keyword) - Start_Code
char1 = int(char_code / ChoSung)
line_dec += ChoSung_LIST[char1]
char2 = int((char_code - (ChoSung * char1)) / JungSung)
line_dec += JungSung_LIST[char2]
char3 = int((char_code - (ChoSung * char1) - (JungSung * char2)))
line_dec += JongSung_LIST[char3]
else:
line_dec += keyword
return line_dec
def stat(datapath):
flist = glob.glob(datapath+'/*.mel')
wavlen = []
for fname in flist:
mellin = pickle.load(open(fname, 'rb'))
mel = mellin['mel']
lin = mellin['lin']
wavlen.append(len(mel))
if len(mel) > 1000:
print(fname)
plt.hist(wavlen)
plt.title('mean: {:.3f}, stdev: {:.3f}'.format(np.mean(wavlen), np.std(wavlen)))
plt.savefig('stat_hist.png')
def decay_learning_rate(init_lr, it, iter_per_epoch, start_epoch=0):
warmup_threshold = 4000
step = start_epoch * iter_per_epoch + it + 1
decayed_lr = init_lr * warmup_threshold ** 0.5 * min(step * warmup_threshold**-1.5, step**-0.5)
return decayed_lr
def saveAttention(input_sentence, attentions, outpath):
# Set up figure with colorbar
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
fig, ax = plt.subplots()
cax = ax.matshow(attentions.cpu().numpy(), aspect='auto', origin='upper',cmap='gray')
# fig.colorbar(cax)
plt.ylabel('Encoder timestep', fontsize=18)
plt.xlabel('Decoder timestep', fontsize=18)
if input_sentence:
plt.ylabel('Encoder timestep', fontsize=18)
plt.tight_layout()
plt.savefig(outpath)
plt.close('all')
def spectrogram2wav(spectrogram, n_fft, win_length, hop_length, num_iters):
'''
spectrogram: [t, f], i.e. [t, nfft // 2 + 1]
'''
min_level_db = -100
ref_level_db = 20
spec = spectrogram.T
# denormalize
spec = (np.clip(spec, 0, 1) * - min_level_db) + min_level_db
spec = spec + ref_level_db
# Convert back to linear
spec = np.power(10.0, spec * 0.05)
return _griffin_lim(spec ** 1.5, n_fft, win_length, hop_length, num_iters) # Reconstruct phase
def _griffin_lim(S, n_fft, win_length, hop_length, num_iters):
# angles = np.exp(2j * np.pi * np.ones(S.shape))
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
for i in range(num_iters):
if i > 0:
angles = np.exp(1j * np.angle(librosa.stft(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length)))
y = librosa.istft(S_complex * angles, hop_length=hop_length, win_length=win_length)
return y
def preprocess_text(dataset, text):
if dataset == 'emotion':
return decompose_hangul(text)
elif dataset == 'librispeech':
return text.lower()
else:
return text
|
<gh_stars>10-100
# global
import numpy as np
# local
from ivy_vision_tests.data import TestData
from ivy_vision import single_view_geometry as ivy_svg
class SingleViewGeometryTestData(TestData):
def __init__(self):
super().__init__()
# bilinear sampling
self.simple_image = np.tile(np.arange(9).astype(np.float).reshape((1, 1, 3, 3, 1)),
(self.batch_size, 1, 1, 1, 1))
self.warp = np.tile(np.array([[[[[0.0, 0.0], [0.5, 0.5], [2.0, 0.0]],
[[1.5, 0.5], [1.0, 1.0], [0.5, 1.5]],
[[0.0, 2.0], [1.5, 1.5], [2.0, 2.0]]]]]), (self.batch_size, 1, 1, 1, 1))
self.warped_simple_image = np.tile(np.array([[[[[0.], [2.], [2.]],
[[3.], [4.], [5.]],
[[6.], [6.], [8.]]]]]), (self.batch_size, 1, 1, 1, 1))
td = SingleViewGeometryTestData()
def test_create_uniform_pixel_coords_image(dev_str, call):
assert np.array_equal(
call(ivy_svg.create_uniform_pixel_coords_image, td.image_dims, (td.batch_size, td.num_cameras)),
td.uniform_pixel_coords)
assert np.array_equal(call(ivy_svg.create_uniform_pixel_coords_image, td.image_dims, (td.num_cameras,)),
td.uniform_pixel_coords[0])
call(ivy_svg.create_uniform_pixel_coords_image, td.image_dims, (td.num_cameras,), True)
def test_persp_angles_to_focal_lengths(dev_str, call):
assert np.allclose(call(ivy_svg.persp_angles_to_focal_lengths, td.persp_angles, td.image_dims, dev_str=dev_str),
td.focal_lengths, atol=1e-6)
assert np.allclose(call(ivy_svg.persp_angles_to_focal_lengths, td.persp_angles[0], td.image_dims, dev_str=dev_str),
td.focal_lengths[0], atol=1e-6)
def test_focal_lengths_to_persp_angles(dev_str, call):
assert np.allclose(call(ivy_svg.focal_lengths_to_persp_angles, td.focal_lengths, td.image_dims, dev_str=dev_str),
td.persp_angles, atol=1e-6)
assert np.allclose(call(ivy_svg.focal_lengths_to_persp_angles, td.focal_lengths[0], td.image_dims, dev_str=dev_str),
td.persp_angles[0], atol=1e-6)
def test_focal_lengths_and_pp_offsets_to_calib_mats(dev_str, call):
assert np.allclose(call(ivy_svg.focal_lengths_and_pp_offsets_to_calib_mat, td.focal_lengths,
td.pp_offsets, dev_str=dev_str), td.calib_mats, atol=1e-6)
assert np.allclose(call(ivy_svg.focal_lengths_and_pp_offsets_to_calib_mat, td.focal_lengths[0],
td.pp_offsets[0], dev_str=dev_str), td.calib_mats[0], atol=1e-6)
def test_rot_mats_and_cam_centers_to_ext_mats(dev_str, call):
assert np.allclose(call(ivy_svg.rot_mat_and_cam_center_to_ext_mat, td.Rs, td.C_hats), td.ext_mats, atol=1e-6)
assert np.allclose(call(ivy_svg.rot_mat_and_cam_center_to_ext_mat, td.Rs[0], td.C_hats[0]),
td.ext_mats[0], atol=1e-6)
def test_depth_to_ds_pixel_coords(dev_str, call):
assert (np.allclose(call(ivy_svg.depth_to_ds_pixel_coords, td.depth_maps, td.uniform_pixel_coords),
td.pixel_coords_to_scatter, atol=1e-4))
assert (np.allclose(call(ivy_svg.depth_to_ds_pixel_coords, td.depth_maps), td.pixel_coords_to_scatter, atol=1e-4))
assert (np.allclose(call(ivy_svg.depth_to_ds_pixel_coords, td.depth_maps[0], td.uniform_pixel_coords[0]),
td.pixel_coords_to_scatter[0], atol=1e-4))
def test_depth_to_radial_depth(dev_str, call):
assert (np.allclose(call(ivy_svg.depth_to_radial_depth, td.depth_maps, td.inv_calib_mats),
td.radial_depth_maps, atol=1e-4))
assert (np.allclose(call(ivy_svg.depth_to_radial_depth, td.depth_maps, td.inv_calib_mats),
td.radial_depth_maps, atol=1e-4))
assert (np.allclose(call(ivy_svg.depth_to_radial_depth, td.depth_maps[0],
td.inv_calib_mats[0]), td.radial_depth_maps[0], atol=1e-4))
def test_ds_pixel_coords_to_radial_depth(dev_str, call):
assert (np.allclose(call(ivy_svg.ds_pixel_coords_to_radial_depth, td.pixel_coords_to_scatter, td.inv_calib_mats),
td.radial_depth_maps, atol=1e-4))
assert (np.allclose(call(ivy_svg.ds_pixel_coords_to_radial_depth, td.pixel_coords_to_scatter, td.inv_calib_mats),
td.radial_depth_maps, atol=1e-4))
assert (np.allclose(call(ivy_svg.ds_pixel_coords_to_radial_depth, td.pixel_coords_to_scatter[0],
td.inv_calib_mats[0]), td.radial_depth_maps[0], atol=1e-4))
def test_cam_to_ds_pixel_coords(dev_str, call):
assert (
np.allclose(call(ivy_svg.cam_to_ds_pixel_coords, td.cam_coords, td.calib_mats), td.pixel_coords_to_scatter, atol=1e-4))
assert (np.allclose(call(ivy_svg.cam_to_ds_pixel_coords, td.cam_coords[0], td.calib_mats[0]),
td.pixel_coords_to_scatter[0], atol=1e-4))
def test_cam_coords_to_depth(dev_str, call):
assert (
np.allclose(call(ivy_svg.cam_coords_to_depth, td.cam_coords, td.calib_mats), td.depth_maps, atol=1e-4))
assert (np.allclose(call(ivy_svg.cam_coords_to_depth, td.cam_coords[0], td.calib_mats[0]),
td.depth_maps[0], atol=1e-4))
def test_ds_pixel_to_cam_coords(dev_str, call):
assert np.allclose(call(ivy_svg.ds_pixel_to_cam_coords, td.pixel_coords_to_scatter, td.inv_calib_mats, dev_str=dev_str),
td.cam_coords, atol=1e-6)
assert np.allclose(call(ivy_svg.ds_pixel_to_cam_coords, td.pixel_coords_to_scatter[0], td.inv_calib_mats[0], dev_str=dev_str),
td.cam_coords[0], atol=1e-6)
def test_depth_to_cam_coords(dev_str, call):
assert np.allclose(call(ivy_svg.depth_to_cam_coords, td.depth_maps, td.inv_calib_mats, dev_str=dev_str),
td.cam_coords, atol=1e-6)
assert np.allclose(call(ivy_svg.depth_to_cam_coords, td.depth_maps[0], td.inv_calib_mats[0], dev_str=dev_str),
td.cam_coords[0], atol=1e-6)
def test_world_to_cam_coords(dev_str, call):
assert np.allclose(call(ivy_svg.world_to_cam_coords, td.world_coords, td.ext_mats, dev_str=dev_str),
td.cam_coords, atol=1e-6)
assert np.allclose(call(ivy_svg.world_to_cam_coords, td.world_coords[0], td.ext_mats[0], dev_str=dev_str),
td.cam_coords[0], atol=1e-6)
def test_cam_to_world_coords(dev_str, call):
assert np.allclose(call(ivy_svg.cam_to_world_coords, td.cam_coords, td.inv_ext_mats, dev_str=dev_str),
td.world_coords, atol=1e-6)
assert np.allclose(call(ivy_svg.cam_to_world_coords, td.cam_coords[0], td.inv_ext_mats[0], dev_str=dev_str),
td.world_coords[0], atol=1e-6)
def test_world_to_ds_pixel_coords(dev_str, call):
assert np.allclose(call(ivy_svg.world_to_ds_pixel_coords, td.world_coords, td.full_mats), td.pixel_coords_to_scatter,
atol=1e-4)
assert np.allclose(call(ivy_svg.world_to_ds_pixel_coords, td.world_coords[0], td.full_mats[0]),
td.pixel_coords_to_scatter[0], atol=1e-4)
def test_world_coords_to_depth(dev_str, call):
assert np.allclose(call(ivy_svg.world_coords_to_depth, td.world_coords, td.full_mats), td.depth_maps,
atol=1e-4)
assert np.allclose(call(ivy_svg.world_coords_to_depth, td.world_coords[0], td.full_mats[0]),
td.depth_maps[0], atol=1e-4)
def test_ds_pixel_to_world_coords(dev_str, call):
# with 2D image dimensions
assert np.allclose(call(ivy_svg.ds_pixel_to_world_coords, td.pixel_coords_to_scatter, td.inv_full_mats),
td.world_coords, atol=1e-6)
assert np.allclose(call(ivy_svg.ds_pixel_to_world_coords, td.pixel_coords_to_scatter[0], td.inv_full_mats[0]),
td.world_coords[0], atol=1e-6)
# with flat image dimensions
batch_shape = list(td.inv_full_mats.shape[:-2])
assert np.allclose(call(ivy_svg.ds_pixel_to_world_coords,
np.reshape(td.pixel_coords_to_scatter, batch_shape + [-1, 3]), td.inv_full_mats),
np.reshape(td.world_coords, batch_shape + [-1, 4]), atol=1e-6)
def test_depth_to_world_coords(dev_str, call):
assert np.allclose(call(ivy_svg.depth_to_world_coords, td.depth_maps, td.inv_full_mats),
td.world_coords, atol=1e-6)
assert np.allclose(call(ivy_svg.depth_to_world_coords, td.depth_maps[0], td.inv_full_mats[0]),
td.world_coords[0], atol=1e-6)
def test_pixel_coords_to_world_rays(dev_str, call):
assert np.allclose(
call(ivy_svg.pixel_coords_to_world_ray_vectors, td.inv_full_mats, td.pixel_coords_to_scatter),
td.world_rays, atol=1e-6)
assert np.allclose(
call(ivy_svg.pixel_coords_to_world_ray_vectors, td.inv_full_mats[0], td.pixel_coords_to_scatter[0]),
td.world_rays[0], atol=1e-6)
def test_sphere_coords_to_world_ray_vectors(dev_str, call):
assert np.allclose(
call(ivy_svg.sphere_coords_to_world_ray_vectors, td.sphere_coords, td.inv_Rs),
td.world_rays, atol=1e-6)
assert np.allclose(
call(ivy_svg.sphere_coords_to_world_ray_vectors, td.sphere_coords[0], td.inv_Rs[0]),
td.world_rays[0], atol=1e-6)
def test_bilinearly_interpolate_image(dev_str, call):
assert np.allclose(call(ivy_svg.bilinearly_interpolate_image, td.world_coords,
td.uniform_pixel_coords[:, :, :, :, 0:2]), td.world_coords, atol=1e-5)
assert np.allclose(call(ivy_svg.bilinearly_interpolate_image, td.world_coords[0],
td.uniform_pixel_coords[0, :, :, :, 0:2]), td.world_coords[0], atol=1e-5)
assert np.allclose(call(ivy_svg.bilinearly_interpolate_image, td.simple_image, td.warp),
td.warped_simple_image, atol=1e-5)
def test_inv_ext_mat_to_camera_center(dev_str, call):
assert np.allclose(call(ivy_svg.inv_ext_mat_to_camera_center, td.inv_ext_mats), td.C_hats, atol=1e-6)
assert np.allclose(call(ivy_svg.inv_ext_mat_to_camera_center, td.inv_ext_mats[0]), td.C_hats[0], atol=1e-6)
def test_calib_and_ext_to_full_mat(dev_str, call):
assert np.allclose(call(ivy_svg.calib_and_ext_to_full_mat, td.calib_mats, td.ext_mats), td.full_mats, atol=1e-6)
assert np.allclose(call(ivy_svg.calib_and_ext_to_full_mat, td.calib_mats[0], td.ext_mats[0]), td.full_mats[0],
atol=1e-6)
def test_cam_to_sphere_coords(dev_str, call):
assert np.allclose(call(ivy_svg.cam_to_sphere_coords, td.cam_coords), td.sphere_coords, atol=1e-4)
assert np.allclose(call(ivy_svg.cam_to_sphere_coords, td.cam_coords[0]), td.sphere_coords[0], atol=1e-4)
def test_ds_pixel_to_sphere_coords(dev_str, call):
assert np.allclose(call(ivy_svg.ds_pixel_to_sphere_coords, td.pixel_coords_to_scatter, td.inv_calib_mats),
td.sphere_coords, atol=1e-4)
assert np.allclose(call(ivy_svg.ds_pixel_to_sphere_coords, td.pixel_coords_to_scatter[0], td.inv_calib_mats[0]),
td.sphere_coords[0], atol=1e-4)
def test_angular_pixel_to_sphere_coords(dev_str, call):
assert np.allclose(call(ivy_svg.angular_pixel_to_sphere_coords, td.angular_pixel_coords,
td.pixels_per_degree), td.sphere_coords, atol=1e-3)
assert np.allclose(call(ivy_svg.angular_pixel_to_sphere_coords, td.angular_pixel_coords[0],
td.pixels_per_degree), td.sphere_coords[0], atol=1e-3)
def test_sphere_to_cam_coords(dev_str, call):
assert np.allclose(call(ivy_svg.sphere_to_cam_coords, td.sphere_coords, dev_str=dev_str), td.cam_coords, atol=1e-3)
assert np.allclose(call(ivy_svg.sphere_to_cam_coords, td.sphere_coords[0], dev_str=dev_str),
td.cam_coords[0], atol=1e-3)
def test_sphere_to_ds_pixel_coords(dev_str, call):
assert np.allclose(call(ivy_svg.sphere_to_ds_pixel_coords, td.sphere_coords, td.calib_mats),
td.pixel_coords_to_scatter, atol=1e-3)
assert np.allclose(call(ivy_svg.sphere_to_ds_pixel_coords, td.sphere_coords[0], td.calib_mats[0]),
td.pixel_coords_to_scatter[0], atol=1e-3)
def test_sphere_to_angular_pixel_coords(dev_str, call):
assert np.allclose(call(ivy_svg.sphere_to_angular_pixel_coords, td.sphere_coords,
td.pixels_per_degree), td.angular_pixel_coords, atol=1e-3)
assert np.allclose(call(ivy_svg.sphere_to_angular_pixel_coords, td.sphere_coords[0],
td.pixels_per_degree), td.angular_pixel_coords[0], atol=1e-3)
|
<filename>polya/interface/example.py<gh_stars>10-100
####################################################################################################
#
# example.py
#
# Authors:
# <NAME>
# <NAME>
# <NAME>
#
# Class to easily construct examples.
#
#
####################################################################################################
import polya.interface.solve_util as solve_util
import timeit
import polya.main.messages as messages
import polya.util.timer as timer
import polya.main.formulas as formulas
class Example:
def __init__(self, hyps, terms, conc, axioms, modules, omit, comment,
split_depth, split_breadth, solver):
"""
Instantiates an Example object. Used to create lists of test problems.
Arguments:
-- hyps: a list of TermComparisons, the hypotheses
-- conclusion: a TermComparison, to try to derive. Defaults to False, ie, show hyps
is contradictory.
-- axioms: a list of extra axioms to use.
-- modules: a list of modules to use. Defaults to all available modules.
-- omit: the example will not run if omit=True. Defaults to False.
-- comment: prints comment when the example is run.
-- split_depth, split_depth: as in Solver.
-- solver: 'fm' or 'poly' arithmetic
"""
self.hyps = hyps if hyps else list()
self.terms = terms if terms else list()
self.conc = conc
self.axioms = axioms if axioms else list()
self.modules = modules if modules else list()
self.comment=comment
self.omit = omit # flag to omit from 'test_all'
self.split_depth = split_depth
self.split_breadth = split_breadth
self.solver = solver
self.clauses = []
def show(self):
"""
Prints the example.
"""
for a in self.axioms:
print 'Axiom: {0!s}'.format(a)
for h in self.hyps:
print 'Hypothesis: {0!s}'.format(h)
for t in self.terms:
print 'Term of interest: {0!s}'.format(t)
for c in self.clauses:
print 'Clause: {0!s}'.format(formulas.Or(*c))
if self.conc:
print 'Conclusion: {0!s}'.format(self.conc)
else:
print 'Conclusion: False'
if self.comment:
print 'Comment: {0}'.format(self.comment)
if self.omit == True:
print "(Omitted from 'test_all')"
elif self.omit == 'fm':
print "(Omitted from FM)"
print
def set_solver_type(self, s):
self.solver = s
def set_split(self, depth, breadth):
self.split_depth, self.split_breadth = depth, breadth
def test(self):
"""
Creates a Solver object with the stored values, and runs check().
"""
self.show()
S = solve_util.Solver(self.split_depth, self.split_breadth, self.hyps, self.terms,
self.axioms, self.modules, self.solver)
for c in self.clauses:
S.add_clause(c)
t = timeit.default_timer()
r = False
if self.conc:
if S.prove(self.conc):
print 'Conclusion is valid.'
r = True
else:
print 'Failed.'
else:
if S.check():
print 'Refuted hypotheses.'
r = True
else:
print 'Failed.'
print 'Ran in', round(timeit.default_timer()-t, 3), 'seconds'
print
return r
def run_examples(examples, args):
"""
Takes a list of Example objects, tests each one in succession, and prints data.
Used from the command line, as in sample_problems.py
"""
# handle switches
if '-v' in args:
messages.set_verbosity(messages.normal)
args.remove('-v')
else:
messages.set_verbosity(messages.quiet)
if '-fm' in args:
for e in examples:
e.set_solver_type('fm')
args.remove('-fm')
# perform command
if len(args) == 1 or '-h' in args:
script_name = args[0]
print "Use 'python {0} list' to list the examples.".format(script_name)
print "Use 'python {0} 6 9 10' to run those examples.".format(script_name)
print "Use 'python {0} test_all' to run them all.".format(script_name)
print "Use switch -v to produce verbose output."
print "Use switch -fm to use Fourier Motzkin"
else:
#show_configuration()
if args[1] == 'list':
for i in range(len(examples)):
print '*** Example {0!s} ***'.format(i)
examples[i].show()
elif args[1] == 'test_all':
t = timeit.default_timer()
for i in range(len(examples)):
if not (examples[i].omit == True or (examples[i].omit == 'fm'
and examples[i].solver == 'fm')):
print '*** Example {0!s} ***'.format(i)
examples[i].test()
print 'Total:', round(timeit.default_timer()-t, 3), 'seconds'
# for a comparison of Fourier-Motzkin and polytope methods
elif args[1] == 'test_all_comp':
t = timeit.default_timer()
for i in range(len(examples)):
if not (examples[i].omit == True):
print '*** Example {0!s} ***'.format(i)
if examples[i].omit == 'fm':
print '[Fourier Motzkin run omitted]'
else:
examples[i].set_solver_type('fm')
print '[Fourier-Motzkin]'
examples[i].test()
examples[i].set_solver_type('poly')
print '[Poly]'
examples[i].test()
print 'Total:', round(timeit.default_timer()-t, 3), 'seconds'
else:
for i in range(1, len(args)):
try:
examples[int(args[i])].test()
except ValueError:
print 'No example {0}.'.format(args[i])
messages.set_verbosity(messages.debug)
if args[1] != 'list':
timer.announce_times() |
<reponame>liangyongxiang/vsf-all-in-one<filename>source/component/3rd-party/btstack/raw/port/mtk/docs/scripts/plot_scan.py
#!/usr/bin/env python3
import matplotlib.pyplot as plt
#from pylab import *
import pickle
import pylab as P
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.patches import Polygon
import itertools
import os
def histplot(data,labels, colors, x_label, y_label, title, fig_name, cdf):
fig, ax = plt.subplots()
if cdf:
n, bins, patches = ax.hist(data, 20, weights=None, histtype='step', normed=True, cumulative=True, label= labels, color = colors)
legend = ax.legend(loc='lower left', shadow=False)
ax.grid(True)
else:
n, bins, patches = ax.hist( data, 20, weights=None, histtype='bar', label= labels, color = colors)
legend = ax.legend(loc='upper right', shadow=False)
for line in ax.get_lines():
line.set_linewidth(1.5)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
for label in legend.get_texts():
label.set_fontsize('small')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
fig.suptitle(title, fontsize=12)
#plt.show()
pp = PdfPages(fig_name)
pp.savefig(fig)
pp.close()
return [n, bins, patches]
def accplot(data, labels, colors, x_label, y_label, title, fig_name, annotation):
mean = np.zeros(len(data))
for i in range(len(data)):
if len(data[i]) > 0:
mean[i] = len(data[i]) /(1.0*max(data[i]))
mean = round(mean)
fig, ax = plt.subplots()
for i in range(len(data)):
if len(data[i]) > 0:
ax.plot(data[i], range(len(data[i])), colors[i], label= labels[i]+', '+mean[i]+' adv/s, total nr. '+str(len(data[i])))
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
for tl in ax.get_yticklabels():
tl.set_color('k')
legend = ax.legend(loc='upper left', shadow=False)
for label in legend.get_texts():
label.set_fontsize('small')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
for line in ax.get_lines():
line.set_linewidth(1.5)
fig.suptitle(title, fontsize=12)
ax.text(400, 5000, annotation , style='italic',
bbox={'facecolor':'gray', 'alpha':0.5, 'pad':10})
#plt.show()
pp = PdfPages(fig_name)
pp.savefig(fig)
pp.close()
return fig
def mean_common_len(data):
mcl = 0
for i in range(len(data) - 1):
if len(data[i]) > 0:
if mcl == 0:
mcl = len(data[i])
else:
mcl = min(mcl, len(data[i]))
return mcl
def mean_common_time(data):
mct = 0
for i in range(len(data) - 1):
if len(data[i]) > 0:
if mct == 0:
mct = max(data[i])
else:
mct = min(mct, max(data[i]))
return mct
def normalize(s):
return map(lambda x: (x - s[0]), s)
def delta(s):
rs = list()
for i in range(len(s)-1):
rs.append(s[i+1] - s[i])
return rs
def round(s):
return map(lambda x: "{0:.4f}".format(x), s)
def cut(s, V):
r = list()
for i in range(len(s)):
if s[i] <= V:
r.append(s[i])
return r
def prepare_data(exp_name, sensor_name):
prefix = '../data/processed/'
scanning_type = exp_name+'_continuous'
mn = pickle.load(open(prefix+scanning_type+'_mac_'+sensor_name+'.data', 'rb')) # mac nio,
mm = pickle.load(open(prefix+scanning_type+'_mac_mac.data', 'rb')) # mac mac,
rn = pickle.load(open(prefix+scanning_type+'_rug_'+sensor_name+'.data', 'rb')) # ruggear nio,
rm = pickle.load(open(prefix+scanning_type+'_rug_mac.data', 'rb')) # ruggear mac,
scanning_type = exp_name+'_normal'
try:
normal_rn = pickle.load(open(prefix + scanning_type+'_rug_'+sensor_name+'.data', 'rb')) # ruggear mac, normal
except:
normal_rn = list()
try:
normal_mn = pickle.load(open(prefix + scanning_type+'_mac_'+sensor_name+'.data', 'rb')) # ruggear mac, normal
except:
normal_mn = list()
try:
normal_rm = pickle.load(open(prefix + scanning_type+'_rug_mac.data', 'rb')) # ruggear mac, normal
except:
normal_rm = list()
try:
normal_mm = pickle.load(open(prefix + scanning_type+'_mac_mac.data', 'rb')) # ruggear mac, normal
except:
normal_mm = list()
T = mean_common_time([mm, mn, rm, rn, normal_rm, normal_rn, normal_mm, normal_mn])
L = mean_common_len([mm, mn, rm, rn, normal_rm, normal_rn, normal_mm, normal_mn])
Z = 15
print("mct %d, mcl %d" % (T,L))
mac_mac = normalize(mm)
mac_nio = normalize(mn)
ruggeer_mac = normalize(rm)
ruggeer_nio = normalize(rn)
ruggeer_nio_normal = normalize(normal_rn)
ruggeer_mac_normal = normalize(normal_rm)
mac_mac_normal = normalize(normal_mm)
mac_nio_normal = normalize(normal_mn)
delta_mn = delta(mac_nio)
delta_mm = delta(mac_mac)
delta_rn = delta(ruggeer_nio)
delta_rm = delta(ruggeer_mac)
rn_delays = list()
for i in range(len(delta_rn)):
rn_delays.append(range(delta_rn[i]))
flattened_rn_delays = list(itertools.chain.from_iterable(rn_delays))
plot_data = [cut(mac_mac,T), cut(mac_nio,T), cut(ruggeer_mac,T), cut(ruggeer_nio,T)]
plot_data_normal = [cut(mac_mac_normal,T), cut(mac_nio_normal,T), cut(ruggeer_mac_normal,T), cut(ruggeer_nio_normal,T)]
hist_data = [delta_mm[0:L], delta_mn[0:L], delta_rm[0:L], delta_rn[0:L]]
zoomed_hist_data = list()
if len(hist_data[0]) >= Z and len(hist_data[1]) >= Z and len(hist_data[2]) >= Z and len(hist_data[3]) >= Z :
zoomed_hist_data = [cut(hist_data[0],Z), cut(hist_data[1],Z), cut(hist_data[2],Z), cut(hist_data[3],Z)]
return [plot_data, hist_data, zoomed_hist_data, flattened_rn_delays, plot_data_normal]
def plot(exp_name, sensor_name, sensor_title, prefix):
[plot_data, hist_data, zoomed_hist_data, rn_delays, plot_data_normal] = prepare_data(exp_name, sensor_name)
labels = ['Scan. BCM, Adv. BCM', 'Scan. BCM, Adv. '+ sensor_title, 'Scan. RugGear, Adv. BCM', 'Scan. RugGear, Adv. '+sensor_title]
plot_colors = ['r-','k-','b-','g-']
hist_colors = ['red','black','blue','green']
title = 'Continuous scanning over time'
annotation = 'scan window 30ms, scan interval 30ms'
x_label = 'Time [s]'
y_label = 'Number of advertisements'
accplot(plot_data, labels, plot_colors, x_label, y_label, title, prefix+sensor_name+'_acc_number_of_advertisements_continuous_scanning.pdf', annotation)
x_label = 'Time interval between two advertisements [s]'
title = 'Continuous scanning - interval distribution'
histplot(hist_data, labels, hist_colors, x_label, y_label, title, prefix+sensor_name+'_histogram_advertisements_time_delay.pdf', 0)
#if len(zoomed_hist_data) > 0:
# title = 'Continuous scanning - interval distribution [0-15s]'
# histplot(zoomed_hist_data, labels, hist_colors, x_label, y_label, title, prefix+sensor_name+'_histogram_advertisements_time_delay_zoomed.pdf', 0)
title = 'Continuous scanning - expected waiting time'
x_label = 'Expected waiting time until first scan [s]'
[n, bins, patches] = histplot([rn_delays], [labels[3]], [hist_colors[3]], x_label, y_label, title, prefix+sensor_name+'_ruggear_expected_scan_response.pdf', 0)
title = 'Continuous scanning - expected waiting time probability distribution'
y_label = 'Advertisement probability'
x_label = 'Time until first scan [s]'
[n, bins, patches] = histplot([rn_delays], [labels[3]], [hist_colors[3]], x_label, y_label, title, prefix+sensor_name+'_ruggear_cdf.pdf', 1)
title = 'Normal scanning over time'
annotation = 'scan window 30ms, scan interval 300ms'
x_label = 'Time [s]'
y_label = 'Number of advertisements'
accplot(plot_data_normal, labels, plot_colors, x_label, y_label, title, prefix+sensor_name+'_acc_number_of_advertisements_normal_scanning.pdf', annotation)
picts_folder = "../picts_experiments/"
if not os.access(picts_folder, os.F_OK):
os.mkdir(picts_folder)
plot('exp1','nio', 'Nio', picts_folder)
plot('exp2','xg2', 'XG', picts_folder)
|
<reponame>robertwb/collapsing-thread-pool-executor
import atexit
import sys
import threading
import weakref
from concurrent.futures import _base
from logging import getLogger
from uuid import uuid4
try: # Python3
import queue
except Exception: # Python2
import Queue as queue
try: # Python2
from concurrent.futures.thread import cpu_count
except BaseException: # Python3
from multiprocessing import cpu_count
# for the clean shutdown piece
_workers = weakref.WeakSet()
_shutdown = False
_thread_pools = weakref.WeakSet()
# for the clean shutdown piece
def _python_exit():
global _shutdown
_shutdown = True
for w in _workers:
w.work_item_manager.work_item = None
w.work_item_available_event.set()
for tp in _thread_pools:
tp.shutdown()
atexit.register(_python_exit) # for the clean shutdown piece
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e, tb = sys.exc_info()[1:]
self.future.set_exception_info(e, tb)
else:
self.future.set_result(result)
class _WorkItemManager(object):
def __init__(self):
self._lock = threading.Lock()
self._work_item = None
@property
def work_item(self):
with self._lock:
return self._work_item
@work_item.setter
def work_item(self, work_item):
with self._lock:
self._work_item = work_item
class _Worker(threading.Thread):
def __init__(self, executor_reference, work_item_manager, work_item_available_event, worker_available_callback,
timeout, name):
super(_Worker, self).__init__(
name=name
)
self._executor_reference = executor_reference
self._work_item_manager = work_item_manager
self._work_item_available_event = work_item_available_event
self._worker_available_callback = worker_available_callback
self._timeout = timeout
@property
def work_item_manager(self):
return self._work_item_manager
@property
def work_item_available_event(self):
return self._work_item_available_event
def run(self):
try:
while True:
# declare this thread as available
self._worker_available_callback(self)
# wait until task or shutdown on timeout
work_available = self._work_item_available_event.wait(timeout=self._timeout)
self._work_item_available_event.clear()
if work_available:
work_item = self._work_item_manager.work_item
if work_item is not None:
self._work_item_manager.work_item = None
else: # shutdown this thread if there no was no work given
return
if work_item is not None: # do the work
work_item.run()
del work_item # Delete references to object. See issue16284
continue
# this path only executes if the work_item was None (pool shutdown commanded)
executor = self._executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
# based on concurrent.futures.thread.ThreadPoolexecutor
class CollapsingThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers=None, thread_name_prefix=None,
permitted_thread_age_in_seconds=30, logger=None):
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
self._thread_name_prefix = thread_name_prefix or '{0}'.format(hex(id(self))[2:])
self._permitted_thread_age_in_seconds = permitted_thread_age_in_seconds
self._logger = logger if logger is not None else getLogger(self.__class__.__name__)
self._work_queue = queue.Queue()
self._workers = set()
self._workers_lock = threading.Lock()
self._available_workers_queue = queue.LifoQueue()
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._cleanup_thread_shutdown_queue = queue.Queue()
self._cleanup_threads_lock = threading.Lock()
self._cleanup_thread = threading.Thread(
target=self._cleanup_threads
)
self._cleanup_thread.daemon = True
self._cleanup_thread.start()
self._work_queue_thread = threading.Thread(
target=self._handle_work_queue,
)
self._work_queue_thread.daemon = True
self._work_queue_thread.start()
self._work_queue_finished = False
_thread_pools.add(self)
def _worker_available(self, worker):
if self._work_queue_finished:
# wake the worker to exit right away
worker.work_item_available_event.set()
else:
self._available_workers_queue.put(worker)
def _cleanup_threads(self):
last_num_workers = -1
while True:
with self._shutdown_lock:
if self._shutdown:
return
dead_workers = []
with self._workers_lock:
for w in self._workers:
if w.ident and not w.isAlive():
dead_workers += [w]
for w in dead_workers:
self._workers.remove(w)
self._logger.debug('removed {0}'.format(w))
num_workers = len(self._workers)
for w in dead_workers:
self._logger.debug('joining {0}'.format(w))
w.join()
self._logger.debug('joined {0}'.format(w))
if num_workers != last_num_workers:
last_num_workers = num_workers
self._logger.debug('{0} workers running'.format(
num_workers
))
# makes for an interruptable sleep
try:
self._cleanup_thread_shutdown_queue.get(
timeout=self._permitted_thread_age_in_seconds)
return
except queue.Empty:
pass
def _handle_work_queue(self):
while True:
with self._shutdown_lock:
if self._shutdown:
return
# wait for some work
try:
work_item = self._work_queue.get(timeout=5)
if work_item is None: # shutdown commanded
# wake all the workers so they exit quickly
self._work_queue_finished = True
try:
while True:
w = self._available_workers_queue.get_nowait()
if w:
w.work_item_available_event.set()
except queue.Empty:
pass
return
except queue.Empty:
continue
# wait for a worker
wait = False
worker = None
while worker is None:
try:
w = self._available_workers_queue.get_nowait() if not wait else self._available_workers_queue.get(
timeout=5
)
except queue.Empty:
wait = self._adjust_thread_count()
continue
if w is None: # shutdown commanded
return
elif w.ident and not w.isAlive(): # dead worker
continue
worker = w
break
# give the work_item to the worker
worker.work_item_manager.work_item = work_item
# notify it of work to be done
worker.work_item_available_event.set()
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
with self._workers_lock:
num_workers = len(self._workers)
if num_workers == self._max_workers:
return False
thread_name = '{0}_{1}'.format(self._thread_name_prefix, uuid4())
work_item_manager = _WorkItemManager()
work_item_available_event = threading.Event()
work_item_available_event.clear()
w = _Worker(
weakref.ref(self, weakref_cb),
work_item_manager,
work_item_available_event,
self._worker_available,
self._permitted_thread_age_in_seconds,
name=thread_name,
)
w.daemon = True
w.start()
self._logger.debug('added {0}'.format(w))
with self._workers_lock:
self._workers.add(w)
_workers.add(w)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
self._logger.debug('setting shutdown flag')
with self._shutdown_lock:
self._shutdown = True
self._logger.debug('shutting down work queue')
self._work_queue.put(None)
self._logger.debug('shutting down work queue thread')
self._available_workers_queue.put(None)
self._logger.debug('shutting down cleanup thread')
self._cleanup_thread_shutdown_queue.put(1)
self._logger.debug('joining cleanup thread')
self._cleanup_thread.join()
self._logger.debug('joined cleanup thread')
if wait:
with self._workers_lock:
for w in self._workers:
self._logger.debug('joining {0}'.format(w))
w.join()
self._logger.debug('joined {0}'.format(w))
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
<reponame>charlesblakemore/opt_lev_analysis<gh_stars>0
import os, sys, time, h5py
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
import scipy.optimize as opti
import scipy.constants as constants
from obspy.signal.detrend import polynomial
import bead_util as bu
import dill as pickle
plt.rcParams.update({'font.size': 14})
base = '/data/spin_sim_data/libration_tests/'
dirname = os.path.join(base, 'sdeint_amp-sweep')
n_mc = bu.count_subdirectories(dirname)
hdf5 = True
### Constants
dipole_units = constants.e * (1e-6) # to convert e um -> C m
T = 297
m0 = 18.0 * constants.atomic_mass # residual gas particle mass, in kg
### Bead-specific constants
p0 = 100.0 * dipole_units # C * m
# rhobead = {'val': 1850.0, 'sterr': 1.0, 'syserr': 1.0}
mbead_dic = {'val': 84.3e-15, 'sterr': 1.0e-15, 'syserr': 1.5e-15}
mbead = mbead_dic['val']
Ibead = bu.get_Ibead(mbead=mbead_dic)['val']
kappa = bu.get_kappa(mbead=mbead_dic)['val']
############################################################################
############################################################################
############################################################################
############################################################################
amps = []
pressures = []
lib_freqs = []
for i in range(n_mc):
cdir = os.path.join(dirname, 'mc_{:d}'.format(i))
param_path = os.path.join(cdir, 'params.p')
params = pickle.load( open(param_path, 'rb') )
pressure = params['pressure'] # convert back to mbar
drive_amp = params['drive_amp']
fsig = params['drive_freq']
p0 = params['p0']
fsamp = params['fsamp']
beta_rot = pressure * np.sqrt(m0) / kappa
gamma = beta_rot / Ibead
pressures.append(pressure)
amps.append(drive_amp)
if hdf5:
ext = '.h5'
else:
ext = '.npy'
datfiles, lengths = bu.find_all_fnames(cdir, ext=ext)
nfiles = lengths[0]
gammas = []
longdat = []
nsamp = 0
lib_freqs.append([])
lib_calc = np.sqrt(drive_amp * p0 / Ibead) / (2.0 * np.pi)
for fileind, file in enumerate(datfiles[::-1]):
if fileind > 5:
break
bu.progress_bar(fileind, nfiles, suffix='{:d}/{:d}'.format(i+1, n_mc))
if hdf5:
fobj = h5py.File(file, 'r')
dat = np.copy(fobj['sim_data'])
fobj.close()
else:
dat = np.load(file)
tvec = dat[0]
theta = dat[1]
phi = dat[2]
px = p0 * np.cos(phi) * np.sin(theta)
crossp = np.abs(px)
carrier_amp, carrier_phase \
= bu.demod(crossp, fsig, fsamp, harmind=2.0, filt=True, \
bandwidth=5000.0, plot=False)
params, cov = bu.fit_damped_osc_amp(carrier_phase, fsamp, plot=False, \
fit_band=[50.0, lib_calc*2.0])
lib_freqs[-1].append(params[1])
gammas.append(np.abs(params[2]))
# if not len(longdat):
# longdat = crossp
# else:
# longdat = np.concatenate( (longdat, crossp) )
print('Expected gamma = {:0.2g}'.format(gamma))
print(gammas)
# carrier_amp, carrier_phase \
# = bu.demod(longdat, fsig, fsamp, harmind=2.0, filt=True, \
# bandwidth=5000.0, plot=False)
# params, cov = bu.fit_damped_osc_amp(carrier_phase, fsamp, plot=False, \
# fit_band=200)
# label1 = '$\\gamma = {:0.2f}$ Hz'.format(params[2])
# label2 = 'Expected: $\\gamma = {:0.2f}$ Hz'.format(gamma / (2.0 * np.pi))
# freqs = np.fft.rfftfreq(nsamp, d=1.0/fsamp)
# asd = np.abs(np.fft.rfft(carrier_phase)) * bu.fft_norm(nsamp, fsamp)
# plt.loglog(freqs, asd)
# plt.loglog(freqs, bu.damped_osc_amp(freqs, *params), \
# label=label1)
# plt.loglog([], color='w', label=label2)
# plt.legend()
# plt.xlim(20, 2000)
# plt.ylim(1e-6, 0.12)
# plt.xlabel('Frequency [Hz]')
# plt.ylabel('Phase ASD [rad/$\\sqrt{\mathrm{Hz}}$]')
# plt.tight_layout()
# plt.show()
# input()
## Routine to make the libfreq vs efield plot (need to move somewhere else)
amps = np.array(amps)
lib_freqs = np.mean(lib_freqs, axis=-1)
def fit_fun(x, A):
return A * np.sqrt(x)
popt, pcov = opti.curve_fit(fit_fun, amps, lib_freqs)
plot_x = np.linspace(0, np.max(amps), 200)
expected_libration = np.sqrt(plot_x * p0 / Ibead) / (2.0 * np.pi)
plt.plot(amps * 1e-3, lib_freqs, 'o', ms=8, label='$\\omega_0 / 2 \\pi$ from simulation output')
plt.plot(plot_x * 1e-3, expected_libration, ls='--', lw=2, color='r', \
label='Expected value: $( \\sqrt{E d / \\, I} \\,) / \\, 2 \\pi$')
plt.xlabel('Efield Amplitude [kV/m]')
plt.ylabel('Libration Frequency [Hz]')
plt.legend()
plt.tight_layout()
plt.show()
# print()
# print('Pressure [mbar] : {:0.3g}'.format(pressure))
# print(' Damping [Hz] : {:0.3g}'.format(np.mean(gammas_hz)))
# sys.stdout.flush() |
<filename>examples/hand_pose_estimation/unit_tests.py
from backend_SE3 import build_rotation_matrix_x, build_rotation_matrix_y
from backend_SE3 import build_rotation_matrix_z, build_affine_matrix
from backend_SE3 import rotation_from_axis_angles
from backend_SE3 import to_homogeneous_coordinates, build_translation_matrix_SE3
from backend_keypoints import canonical_transformations_on_keypoints
from backend_keypoints import get_hand_side_and_keypooints
from backend_keypoints import keypoints_to_palm_coordinates
from backend_keypoints import normalize_keypoints, extract_hand_side_keypoints
from RHDv2 import LEFT_WRIST
from RHDv2 import RIGHT_WRIST
from hand_keypoints_loader import RenderedHandLoader
from paz.backend.boxes import to_one_hot
from processors_standard import TransposeOfArray, ListToArray
import paz.processors as pr
from paz.processors import SequentialProcessor
data_loader = RenderedHandLoader(
'/media/jarvis/CommonFiles/5th_Semester/DFKI_Work/RHD_published_v2/')
from HandPoseEstimation import HandSegmentationNet, PosePriorNet, PoseNet
from HandPoseEstimation import ViewPointNet
import numpy as np
from pipelines import PostProcessSegmentation, \
Process2DKeypoints
from paz.backend.image.opencv_image import load_image
from backend_keypoints import create_multiple_gaussian_map
from processors_keypoints import ExtractKeypoints
np.random.seed(0)
use_pretrained = True
HandSegNet = HandSegmentationNet()
HandPoseNet = PoseNet()
HandPosePriorNet = PosePriorNet()
HandViewPointNet = ViewPointNet()
def test_keypoints_to_palm_coordinates():
keypoints = np.arange(0, 123).reshape((41, 3))
keypoint_palm = keypoints_to_palm_coordinates(keypoints)
assert keypoint_palm[LEFT_WRIST, :].all() == np.array([
[18., 19., 20.]]).all()
assert keypoint_palm[RIGHT_WRIST, :].all() == np.array([
[81., 82., 83.]]).all()
def test_one_hot_encode():
one_hot_vector = to_one_hot([1], 2)
assert type(one_hot_vector).__module__ == np.__name__
assert one_hot_vector.all() == np.array([0, 1]).all()
assert to_one_hot([0], 2).all() == np.array([1, 0]).all()
def test_normalize_keypoints():
test_array = np.array([[0., 0., 0.], [1., 1., 1.], [1., 1., 1.],
[2., 2., 2.], [2., 2., 2.], [3., 3., 3.],
[3., 3., 3.], [4., 4., 4.], [5., 5., 5.],
[5., 5., 5.], [6., 6., 6.], [6., 6., 6.],
[7., 7., 7.], [8., 8., 8.], [8., 8., 8.],
[9., 9., 9.], [9., 9., 9.], [10., 10., 10.],
[10., 10., 10.], [11., 11., 11.], [12., 12., 12.]])
keypoints3D = np.random.rand(21, 3)
keypoint_scale, keypoint_normalized = normalize_keypoints(keypoints3D)
assert round(keypoint_scale, 2) == 0.68
assert keypoints3D.shape == keypoint_normalized.shape
assert keypoint_normalized.round().all() == test_array.all()
def test_extracting_handside():
keypoints3D = np.random.rand(42, 3)
left_keypoints = extract_hand_side_keypoints(keypoints3D, 0)
right_keypoints = extract_hand_side_keypoints(keypoints3D, 1)
assert left_keypoints.shape == (21, 3)
assert right_keypoints.shape == (21, 3)
def test_to_homogeneous():
vector_shape = (1, 3)
keypoint = np.zeros(vector_shape)
homogeneous_keypoint = to_homogeneous_coordinates(keypoint)
assert homogeneous_keypoint[-1] == 1
assert homogeneous_keypoint.shape == (vector_shape[1] + 1,)
def test_to_translation_1D():
translation_matrix = build_translation_matrix_SE3([1])
assert translation_matrix.shape == (1, 4, 4)
assert translation_matrix[-1].all() == np.array([0, 0, 0, 1]).all()
def test_to_translation_3D():
translation_matrix = build_translation_matrix_SE3([1, 2, 3])
assert translation_matrix[:, :, -1].all() == np.array([[1, 2, 3, 1]]).all()
assert translation_matrix.shape == (1, 4, 4)
assert translation_matrix[-1].all() == np.array([0, 0, 0, 1]).all()
def test_to_affine_matrix():
matrix = np.arange(0, 9).reshape((3, 3))
affine_matrix = build_affine_matrix(matrix)
assert matrix.shape == (3, 3)
assert affine_matrix.shape == (4, 4)
def test_rotation_matrix_x():
rotation_matrix_test = np.array([[1.0000000, 0.0000000, 0.0000000],
[0.0000000, 0.8668, 0.5],
[0.0000000, -0.5, 0.8668]])
rotation_matrix = build_rotation_matrix_x(np.deg2rad(30))
assert rotation_matrix.shape == rotation_matrix_test.shape
assert np.round(np.linalg.det(rotation_matrix)) == 1.0
assert np.round(np.linalg.inv(rotation_matrix)).all() == \
np.round(np.transpose(rotation_matrix)).all()
assert rotation_matrix_test.round().all() == \
rotation_matrix.round().all()
def test_rotation_matrix_y():
rotation_matrix_test = np.array([[0.8660254, 0.0000000, 0.5000000],
[0.0000000, 1.0000000, 0.0000000],
[-0.5000000, 0.0000000, 0.8660254]])
rotation_matrix = build_rotation_matrix_y(np.deg2rad(30))
assert rotation_matrix.shape == rotation_matrix_test.shape
assert np.round(np.linalg.det(rotation_matrix)) == 1.0
assert np.round(np.linalg.inv(rotation_matrix)).all() == \
np.round(np.transpose(rotation_matrix)).all()
assert rotation_matrix_test.round().all() == \
rotation_matrix.round().all()
def test_rotation_matrix_z():
rotation_matrix_test = np.array([[0.8660254, -0.5000000, 0.0000000],
[0.5000000, 0.8660254, 0.0000000],
[0.0000000, 0.0000000, 1.0000000]])
rotation_matrix = build_rotation_matrix_z(np.deg2rad(30))
assert rotation_matrix.shape == rotation_matrix_test.shape
assert np.round(np.linalg.det(rotation_matrix)) == 1.0
assert np.round(np.linalg.inv(rotation_matrix)).all() == \
np.round(np.transpose(rotation_matrix)).all()
assert rotation_matrix_test.round().all() == \
rotation_matrix.round().all()
def test_rotation_matrix_axis_angles():
rotation_matrix_test = np.array([[0.739, -0.406, 0.536],
[0.536, 0.837, -0.1],
[-0.4, 0.36, 0.837]])
rotation_matrix = rotation_from_axis_angles(np.deg2rad([15, 30, 30]))
print(rotation_matrix)
assert rotation_matrix.shape == rotation_matrix_test.shape
assert np.round(np.linalg.det(rotation_matrix)) == 1.0
assert np.round(np.linalg.inv(rotation_matrix)).all() == \
np.round(np.transpose(rotation_matrix)).all()
assert rotation_matrix_test.round().all() == \
rotation_matrix.round().all()
def test_get_affine_matrix():
rotation_matrix = build_rotation_matrix_x(np.deg2rad(30))
affine_rotation_matrix = build_affine_matrix(rotation_matrix)
assert affine_rotation_matrix.shape == (4, 4)
assert affine_rotation_matrix[-1].all() == np.array([0, 0, 0, 1]).all()
def test_hand_side_extraction(segmentation_path, label_path):
segmentation_mask = data_loader.load_images(segmentation_path)
annotations_all = data_loader._load_annotation(label_path)
keypoints3D = data_loader.process_keypoints_3D(annotations_all[11]['xyz'])
hand_side, hand_side_keypoints, dominant_hand_keypoints = \
get_hand_side_and_keypooints(segmentation_mask, keypoints3D)
assert type(hand_side).__module__ == np.__name__
assert hand_side == np.array([0])
assert hand_side_keypoints.shape == (21, 3)
assert dominant_hand_keypoints.shape == (21, 3)
def test_canonical_transformations(label_path):
annotations_all = data_loader._load_annotation(label_path)
keypoints3D = data_loader.process_keypoints_3D(annotations_all[11]['xyz'])
transformed_keypoints, rotation_matrix = canonical_transformations_on_keypoints(
keypoints3D.T)
assert transformed_keypoints.shape == (42, 3)
assert rotation_matrix.shape == (3, 3)
def test_preprocess_image():
preprocess_pipeline = SequentialProcessor(
[pr.NormalizeImage(), pr.ResizeImage((320, 320)), pr.ExpandDims(0)])
image = load_image('./sample.jpg')
processed_image = preprocess_pipeline(image)
assert len(processed_image.shape) == 4
assert processed_image.shape == (1, 320, 320, 3)
def test_image_cropping():
handsegnet = HandSegmentationNet()
preprocess_image = SequentialProcessor(
[pr.NormalizeImage(), pr.ResizeImage((320, 320)),
pr.ExpandDims(0)])
postprocess_segmentation = PostProcessSegmentation(
320, 320)
localize_hand = pr.Predict(handsegnet, preprocess_image,
postprocess_segmentation)
image = load_image('./sample.jpg')
hand_crop, segmentation_map, center, boxes, crop_sizes = localize_hand(
image)
box = boxes[0]
xmin, ymin, xmax, ymax = box
crop_size = crop_sizes[0]
assert len(hand_crop.shape) == 4
assert hand_crop.shape == (1, 256, 256, 3)
assert len(segmentation_map.shape) == 4
assert segmentation_map.shape == (1, 320, 320, 1)
assert center == [[191.5, 194.5]]
assert len(box) == 4
assert box == [114, 153, 269, 236]
assert xmax > xmin and ymin > ymax
assert round(crop_size[0], 2) == 1.32
def test_segmentation_postprocess():
preprocess_pipeline = SequentialProcessor(
[pr.NormalizeImage(), pr.ResizeImage((320, 320)), pr.ExpandDims(0)])
image = load_image('./sample.jpg')
processed_image = preprocess_pipeline(image)
localization_pipeline = PostProcessSegmentation(HandSegNet)
localization_output = localization_pipeline(processed_image)
assert len(localization_output) == 5
assert localization_output[0].shape == (1, 256, 256, 3)
assert localization_output[1].shape == (1, 320, 320, 1)
assert localization_output[2].shape == (1, 2)
assert localization_output[3].shape == (1, 2, 2)
assert localization_output[4].shape == (1, 1)
def test_keypoints2D_process():
preprocess_pipeline = SequentialProcessor(
[pr.NormalizeImage(), pr.ResizeImage((320, 320)), pr.ExpandDims(0)])
image = load_image('./sample.jpg')
processed_image = preprocess_pipeline(image)
localization_pipeline = PostProcessSegmentation(HandSegNet)
localization_output = localization_pipeline(processed_image)
keypoints_pipeline = Process2DKeypoints(HandPoseNet)
score_maps_dict = keypoints_pipeline(np.squeeze(localization_output[0],
axis=0))
score_maps = score_maps_dict['score_maps']
assert score_maps.shape == (1, 32, 32, 21)
assert len(score_maps) == 1
def test_extract_keypoints2D():
uv_coordinates = np.array([[0, 0], [1, 1]])
uv_coordinates = np.expand_dims(uv_coordinates, axis=0)
gaussian_maps = create_multiple_gaussian_map(uv_coordinates, (256, 256),
sigma=0.1, validity_mask=None)
gaussian_maps = np.expand_dims(gaussian_maps, axis=0)
keypoints_extraction_pipeline = ExtractKeypoints()
keypoints2D = keypoints_extraction_pipeline(gaussian_maps)
assert keypoints2D[0] == [0, 0]
|
"""
Entry point for Libretto runtime mode
Runtime mode is unattended mode for "one-click" model deployment
"""
from __future__ import annotations
#
# 220221 early venv detection
#
if __name__ == "__main__":
from configparser import ConfigParser
from libretto.venv import Venv
config = ConfigParser()
config.read("config.ini")
Venv(__file__, config)
from typing import Union
from dataclasses import dataclass
from datetime import timedelta
import logging
import json
import tornado.ioloop
import tornado.web
import tornado.websocket
import pandas as pd
from libretto import plugin
from libretto.inout import Output
from libretto.baseblock import Block, Parent, RunSpec
from libretto.jsoncodec import Encoder, json_decode
from libretto.venv import Venv
from libretto.tpe import TPE
ioloop:tornado.ioloop.IOLoop
@dataclass
class MaskedOutput(Output):
sender:Union[WsHandler, RestHandler]
"""
A masked output class to disable log-to-client output
In runtime mode everything is unattended and the only output should be
the final result of the receipe (unless error)
"""
def msg(self, code: int = -1000, msg: str = None, param: dict = None) -> None:
if code > 0:
return
if param is None:
param = {}
param["result"] = code
param["message"] = msg
logging.info(f'({self.sender.hostname()}) << {code}: {msg}')
msg = json.dumps(param, cls=Encoder)
self.sender.msg(msg)
class Runtime:
"""
Singleton class holding the receipe and abstracting I/O from protocols
"""
instance = None
def __new__(cls):
if Runtime.instance is not None:
return Runtime.instance
r = object.__new__(cls)
r.rootblock = None
Runtime.instance = r
return r
async def handle(self, body:str, sender:Union[WsHandler, RestHandler]):
"""
Handle request from client, only "ping" and "run" is supported
"""
try:
output = MaskedOutput(sender)
msg = json.loads(body, object_hook=json_decode)
if not "action" in msg:
output.error("Invalid input")
return
action = msg["action"]
logging.info(f'({sender.hostname()}) >> {action}')
if action=="ping":
output.finished("OK")
elif action=="run":
await ioloop.run_in_executor(TPE().executor, self.run, msg["input"], output)
else:
output.error(sender, "no action performed")
except Exception as e:
logging.error(repr(e))
output.error(sender, 'Server internal error')
def run(self, x, output:Output)->None:
"""
Cook the receipe and reponse to client
"""
try:
if Runtime().rootblock is None:
logging.error("No receipe")
output.error("Server internal error")
return
try:
x = pd.DataFrame(x)
except ValueError:
output.error("Invalid input")
if x is None or len(x) < 1:
output.error("No input")
return
runspec = RunSpec(mode=RunSpec.RunMode.RUN, out=output)
_result = Runtime().rootblock(runspec, x)
result = _result[0]
if _result[2] is not None:
result["__ID__"] = _result[2].values
output.finished("OK", param={"data": result.where(pd.notnull(result), None).to_dict(orient="records")})
except Exception as e:
logging.error(repr(e))
output.error('Server internal error')
class WsHandler(tornado.websocket.WebSocketHandler):
def msg(self, msg:str):
self.write(msg)
def hostname(self)->str:
return self.request.host_name
def open(self, *args: str, **kwargs: str):
r = super().open(*args, **kwargs)
logging.info(f'({self.request.host_name}) -- WebSocket opened')
return r
def on_close(self) -> None:
logging.info(f'({self.request.host_name}) -- WebSocket closed')
return super().on_close()
def on_message(self, message):
Runtime().handle(message, self)
class RestHandler(tornado.web.RequestHandler):
def msg(self, msg:str):
self.write(msg)
def hostname(self)->str:
return self.request.host_name
async def post(self):
logging.info(f'({self.request.host_name}) -- REST POST')
await Runtime().handle(self.request.body, self)
def set_ping(ioloop, timeout):
"""
Regular interval to unblock the ioloop (for external interrupts)
"""
ioloop.add_timeout(timeout, lambda: set_ping(ioloop, timeout))
def __main():
"""
main entry point (obviously)
"""
#
# regular argparse
#
import argparse
parser = argparse.ArgumentParser(description="Libretto Runtime")
parser.add_argument("-name", help="Instance name")
args = parser.parse_args()
instance_name = args.name if "name" in args else None
instance_name = f'Runtime.{instance_name}' if instance_name else 'Runtime'
norest = config.getboolean(instance_name, "rest", fallback=False)
nows = config.getboolean(instance_name, "websocket", fallback=False)
port = config.getInt(instance_name, "port", fallback=9876)
model = config.get(instance_name, "model", fallback=None)
if not model:
logging.error("No model specified")
exit()
if norest and nows:
logging.error("You cannot disable both REST and WebSocket")
exit()
#
# discover and initialize plugins
# also announce the only session "__runtime__" is created
#
plugin.init(config)
plugin.dispatch(lambda _, plugin: getattr(plugin, "__new_session")("__runtime__") if hasattr(plugin, "__new_session") else None)
#
# Load the receipe with trained parameters
#
try:
import joblib
with open(model, "rb") as f:
rootblock = joblib.load(f)
if not isinstance(rootblock, Parent):
logging.error(f'{model} is not a valid model')
exit()
Runtime().rootblock = rootblock
except Exception as e:
logging.error(repr(e))
exit()
logging.info("Libretto runtime started.")
#
# Create tornado webapp and start
#
handlers = []
if not norest:
handlers.append((rf"/rest/{instance_name}", RestHandler))
if not nows:
handlers.append((rf"/ws/{instance_name}", WsHandler))
app = tornado.web.Application(handlers)
app.listen(port)
ioloop = tornado.ioloop.IOLoop.instance()
set_ping(ioloop, timedelta(seconds=1))
ioloop.start()
if __name__ == "__main__":
__main() |
<filename>gnd-sys/app/cfsinterface/telecommand.py
"""
Copyright 2022 Open STEMware Foundation
All Rights Reserved.
This program is free software; you can modify and/or redistribute it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation; version 3 with attribution addendums as found in the
LICENSE.txt
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
This program may also be used under the terms of a commercial or enterprise
edition license of cFSAT if purchased from the copyright holder.
Purpose:
Define a Telecommand interface with the main function serving as a
command line utility.
"""
import configparser
import socket
import time
import sys
import os
import logging
logger = logging.getLogger(__name__)
if __name__ == '__main__' or 'cfsinterface' in os.getcwd():
sys.path.append('..')
from edsmission import EdsMission
from edsmission import CfeEdsTarget
from cmdtlmrouter import CmdTlmRouter
else:
from .edsmission import EdsMission
from .edsmission import CfeEdsTarget
from .cmdtlmrouter import CmdTlmRouter
from tools import hex_string
###############################################################################
class TelecommandInterface(CfeEdsTarget):
"""
Manage an EDS-defined telecommand interface. It uses the EdsMission database for Telecommand
message definitions and provides methods for loadng payload values and sending messages on
on a UDP socket. If needed, the communication design can be generalized and separated from
this class.
This class does not have any user interface dependencies and should be usable in GUI,
command line, and scripted scenarios.
"""
@staticmethod
def valid_payload(payload_entry, payload_value):
"""
Checks to see if a given payload value is valid based on the payload_entry function
Inputs:
payload_entry - EDS function to create the object that is filled by payload_value
payload_value - The user input value to be checked if an EDS object can be created
Outputs:
Boolean value of the payload_value validity
"""
try:
#todo: object_test = payload_entry[1](payload_value)
object_test = payload_entry(payload_value)
return True
except TypeError:
return False
def __init__(self, mission, target, cmd_router_queue):
super().__init__(mission, target, EdsMission.TELECOMMAND_IF)
self.cmd_router_queue = cmd_router_queue
# command_topic contains the toic name used to generate the current command_dict
self.command_topic = EdsMission.TOPIC_CMD_TITLE_KEY
self.command_dict = {EdsMission.COMMAND_TITLE_KEY: EdsMission.NULL_ID}
self.command_list = [EdsMission.COMMAND_TITLE_KEY]
self.cmd_entry = None
self.cmd_obj = None
def get_topic_id(self, topic_name):
topic_id = EdsMission.NULL_ID
topic_text = ""
try:
topic_id = self.topic_dict[topic_name]
except KeyError:
topic_text = "Error retrieving topic %s from current target %s" % (topic_name, self.target_name)
return topic_id, topic_text
def get_topic_commands(self, topic_name):
"""
Return a dictionary of commands based on a given telecommand topic
"""
logger.debug("self.topic_dict = " + str(self.topic_dict))
topic_id = self.topic_dict[topic_name]
self.command_dict = {EdsMission.COMMAND_TITLE_KEY: EdsMission.NULL_ID}
try:
topic_obj = self.eds_mission.interface.Topic(topic_id)
for command in topic_obj:
self.command_dict[command[0]] = command[1]
self.command_topic = topic_name
except RuntimeError:
pass
return self.command_dict
def get_cmd_id(self, command_name):
command_id = EdsMission.NULL_ID
command_text = ""
try:
command_id = self.command_dict[command_name]
except KeyError:
command_text = "Error retrieving command %s from current topic %s" % (command_name, self.command_topic)
return command_id, command_text
def get_cmd_entry(self, topic_name, command_name):
"""
"""
cmd_valid = True
#todo: Decide how class variables are used. Could use self.command_dict if assume its been loaded with current topic
command_dict = self.get_topic_commands(topic_name)
if len(command_dict) > 1:
try:
command_id = command_dict[command_name]
self.cmd_entry = self.eds_mission.get_database_entry(command_id)
self.cmd_obj = self.cmd_entry()
except KeyError:
cmd_valid = False
self.cmd_entry = None
self.cmd_obj = None
else:
eds_id = self.eds_mission.get_eds_id_from_topic(topic_name)
self.cmd_entry = self.eds_mission.get_database_entry(eds_id)
self.cmd_obj = self.cmd_entry()
return (cmd_valid, self.cmd_entry, self.cmd_obj)
def set_cmd_hdr(self, topic_id, cmd_obj):
"""
"""
self.eds_mission.cfe_db.SetPubSub(self.id, topic_id, cmd_obj)
cmd_obj.CCSDS.SeqFlag = 3
def get_cmd_entry_payload(self,cmd_entry):
logger.debug("has_payload() - cmd_entry = " + str(cmd_entry))
#todo: Remove loop if possible
has_payload = False
payload_item = None
for item in cmd_entry:
if item[0] == 'Payload':
payload_item = item
has_payload = True
return has_payload, payload_item
def get_payload_struct(self, base_entry, base_object, base_name):
"""
Recursive function that goes through an EDS object structure (arrays and structs)
To get down to the fundamental objects (ints, strings, enumerations).
Inputs:
eds_db - EDS database
base_entry - EDS fucntion to create the base_object
base_object - EDS Object that is iterated over to find the structure
base_name - Name used in the recursion to get the full name of a fundamental object
Outputs:
EDS Object data structure
"""
struct = {}
# Arrays
if (self.eds_mission.lib_db.IsArray(base_object)):
# Get the type of an array element
array_type_split = str(type(base_object[0])).split("'")
logger.debug("array_type_split[1] = " + str(array_type_split[1]))
logger.debug("array_type_split[3] = " + str(array_type_split[3]))
array_entry = self.eds_mission.get_database_named_entry(array_type_split[3])
#todo: array_entry = self.eds_mission.lib_db.DatabaseEntry(array_type_split[1], array_type_split[3])
array_object = array_entry()
# Loop over all the aray elements
struct = []
struct_name = base_name + array_entry.Name
for i in range(len(base_object)):
struct_name = f"{base_name}[{i}]"
array_struct = self.get_payload_struct(array_entry, array_object, struct_name)
struct.append(array_struct)
# Containers
elif (self.eds_mission.lib_db.IsContainer(base_object)):
# Iterate over the subobjects within the container
for subobj in base_object:
for subentry in base_entry:
if subobj[0] == subentry[0]:
logger.debug("subentry[1] = " + str(subentry[1]))
logger.debug("subentry[2] = " + str(subentry[2]))
entry_eds = self.eds_mission.get_database_named_entry(subentry[2])
#todo: entry_eds = self.eds_mission.lib_db.DatabaseEntry(subentry[1], subentry[2])
struct_name = f"{base_name}.{subobj[0]}"
struct[subobj[0]] = self.get_payload_struct(entry_eds, subobj[1], struct_name)
# Enumeration
elif (self.eds_mission.lib_db.IsEnum(base_entry)):
struct = ()
enum_dict = {}
# Iterate over the Enumeration labels
for enum in base_entry:
enum_dict[enum[0]] = enum[1]
struct = (base_name, base_entry, 'enum', enum_dict)
# Anything left over uses an entry field
else:
struct = (base_name, base_entry, 'entry', None)
return struct
def set_payload_values(self, structure):
"""
Iterating over the payload structure from get_payload_structure function,
this create a payload object that fills in the payload of the cmd object.
Input:
structure - the result structure from get_payload_structure
Output:
result - payload structure to fill in the cmd object
"""
if isinstance(structure, dict):
logger.debug("Dictionary struct = " + str(structure))
result = {}
for item in list(structure.keys()):
result[item] = self.set_payload_values(structure[item])
elif isinstance(structure, list):
logger.debug("List struct = " + str(structure))
result = []
for item in structure:
result.append(self.set_payload_values(item))
elif isinstance(structure, tuple):
#structure = [payload_name, payload_eds_entry, payload_type, payload_list]
logger.debug("Tuple struct = " + str(structure))
result = self.load_payload_entry_value(structure[0],structure[1],structure[2],structure[3])
logger.debug("@@@result = " + str(result))
else:
#todo: Return errors and strings to keep this independent of the user interface
logger.debug("Something went wrong in the Set Payload Values function")
result = None
return result
def remove_eds_payload_name_prefix(self, eds_name):
"""
Strip the 'Payload' prefix from an EDS payload name so only the payload
name is used for the GUI
"""
return eds_name[eds_name.find('.')+1:]
def load_payload_entry_value(self, payload_eds_name, payload_eds_entry, payload_type, payload_list):
raise NotImplementedError
def send_command(self, cmd_obj):
"""
"""
cmd_packed = self.eds_mission.get_packed_obj(cmd_obj)
cmd_sent = True
cmd_text = cmd_packed.hex()
cmd_status = "Sent command " + self.cmd_entry.Name
self.cmd_router_queue.put(bytes(cmd_packed))
return (cmd_sent, cmd_text, cmd_status)
"""
try:
self.cmd_router_queue.put(bytes(cmd_packed))
self.socket.sendto(bytes(cmd_packed), self.cmd_ip_address)
except:
cmd_sent = False
cmd_status = "Failed to send command on socket to %s:%d" % self.cmd_ip_address
return (cmd_sent, cmd_text, cmd_status)
"""
###############################################################################
class TelecommandScript(TelecommandInterface):
"""
Target designed to support scripts.
"""
def __init__(self, mission, target, cmd_router_queue):
super().__init__(mission, target, cmd_router_queue)
self.cmd_payload = {}
def load_payload_entry_value(self, payload_eds_name, payload_eds_entry, payload_type, payload_list):
logger.debug("payload_eds_name = " + payload_eds_name)
logger.debug("self.payload.keys() = " + str(self.cmd_payload.keys()))
payload_name = self.remove_eds_payload_name_prefix(payload_eds_name)
if payload_name in self.cmd_payload:
result = self.cmd_payload[payload_name]
else:
result = ""
return result
def send_cfs_cmd(self, app_name, cmd_name, cmd_payload):
self.cmd_payload = cmd_payload
topic_name = app_name.upper() + '/Application/CMD'
topic_id, topic_text = self.get_topic_id(topic_name)
cmd_valid, cmd_entry, cmd_obj = self.get_cmd_entry(topic_name, cmd_name)
self.set_cmd_hdr(topic_id, cmd_obj)
cmd_has_payload, cmd_payload_item = self.get_cmd_entry_payload(cmd_entry)
if cmd_has_payload:
payload_entry = self.eds_mission.get_database_named_entry(cmd_payload_item[2])
payload = payload_entry()
payload_struct = self.get_payload_struct(payload_entry, payload, 'Payload')
eds_payload = self.set_payload_values(payload_struct)
payload = payload_entry(eds_payload)
cmd_obj['Payload'] = payload
(cmd_sent, cmd_text, cmd_status) = self.send_command(cmd_obj)
if cmd_sent == True:
cmd_status = "%s %s command sent" % (app_name, cmd_name)
logger.debug(hex_string(cmd_text, 8))
else:
logger.info(cmd_status)
return (cmd_sent, cmd_text, cmd_status)
###############################################################################
class TelecommandCmdLine(TelecommandInterface):
"""
Command line tool to interact with a user to manually send commands to a cFS target. Helpful
for informal verification of a system configuration.
"""
def __init__(self, mission, target, cmd_router_queue):
super().__init__(mission, target, cmd_router_queue)
def load_payload_entry_value(self, payload_eds_name, payload_eds_entry, payload_type, payload_list):
if payload_type == 'enum':
print()
for key in list(payload_list.keys()):
print(key)
while True:
result = None
value = input("\nFor {} ({}) Enter Value > ".format(payload_eds_name, payload_eds_entry))
try:
result = payload_eds_entry(value)
break
except TypeError:
print("Invalid value for {}".format(payload_eds_name))
continue
return result
def send_user_command(self):
topic_dict = self.get_topics()
logger.debug("topics = " + str(topic_dict))
print("Topic List:")
topic_list = []
user_topic_id = 0
for topic in topic_dict.keys():
topic_list.append(topic)
print("%2d: %s" % (user_topic_id,topic))
user_topic_id += 1
eds_topic_id = EdsMission.NULL_ID
while True:
user_id = int(input("\nInput numeric topic ID> "))
if user_id > 0 and user_id < user_topic_id:
topic_name = topic_list[user_id]
topic_id, topic_text = self.get_topic_id(topic_name)
print("Selected topic %s with EDS ID %d" % (topic_name, topic_id))
break
else:
print("Aborted topic selection")
break
command_dict = self.get_topic_commands(topic_name)
logger.debug("commands = " + str(command_dict))
if len(command_dict) > 1:
print("Command List:")
command_list = []
user_command_id = 0
for command in command_dict.keys():
command_list.append(command)
print("%2d: %s" % (user_command_id,command))
user_command_id += 1
while True:
user_id = int(input("\nInput numeric command ID> "))
if user_id > 0 and user_id < user_command_id:
command_name = command_list[user_id]
command_id, command_text = self.get_cmd_id(command_name)
print("Selected command %s with EDS ID %d" % (command_name, command_id))
break
else:
print("Aborted command selection")
break
(cmd_valid, cmd_entry, cmd_obj) = self.get_cmd_entry(topic_name, command_name)
if cmd_valid == True:
logger.debug("self.cmd_entry = " + str(cmd_entry))
logger.debug("self.cmd_obj = " + str(cmd_obj))
self.set_cmd_hdr(topic_id, cmd_obj)
cmd_has_payload, cmd_payload_item = self.get_cmd_entry_payload(cmd_entry)
if cmd_has_payload:
# Use the information from the database entry iterator to get a payload Entry and object
logger.debug("cmd_payload_item[1] = " + str(cmd_payload_item[1]))
logger.debug("cmd_payload_item[2] = " + str(cmd_payload_item[2]))
#todo: payload_entry = self.eds_mission.lib_db.DatabaseEntry(cmd_payload_item[1], cmd_payload_item[2])
payload_entry = self.eds_mission.get_database_named_entry(cmd_payload_item[2])
payload = payload_entry()
payload_struct = self.get_payload_struct(payload_entry, payload, 'Payload')
eds_payload = self.set_payload_values(payload_struct)
payload = payload_entry(eds_payload)
cmd_obj['Payload'] = payload
(cmd_sent, cmd_text, cmd_status) = self.send_command(cmd_obj)
if cmd_sent == True:
print(hex_string(cmd_text, 8))
else:
print(cmd_text)
else:
print("Error retrieving command %s using topic ID %d" % (command_name, topic_id))
def execute(self):
while True:
self.send_user_command()
input_str = input("\nPress <Enter> to send another command. Enter any character to exit> ")
if len(input_str) > 0:
break
###############################################################################
def main():
config = configparser.ConfigParser()
config.read('../cfsat.ini')
MISSION = config.get('CFS_TARGET', 'MISSION_EDS_NAME')
CFS_TARGET = config.get('CFS_TARGET', 'CPU_EDS_NAME')
HOST_ADDR = config.get('NETWORK', 'CFS_HOST_ADDR')
CMD_PORT = config.getint('NETWORK', 'CFS_SEND_CMD_PORT')
TLM_PORT = config.getint('NETWORK', 'CFS_RECV_TLM_PORT')
TLM_TIMEOUT = float(config.getint('CFS_TARGET', 'RECV_TLM_TIMEOUT'))/1000.0
system_string = "Mission: %s, Target: %s, Host: %s, Command Port %d" % (MISSION, CFS_TARGET, HOST_ADDR, CMD_PORT)
print("Creating telecommand objects for " + system_string)
try:
cmd_tlm_router = CmdTlmRouter(HOST_ADDR, CMD_PORT, HOST_ADDR, TLM_PORT, TLM_TIMEOUT)
telecommand_script = TelecommandScript(MISSION, CFS_TARGET, cmd_tlm_router.get_cfs_cmd_queue())
telecommand_cmd_line = TelecommandCmdLine(MISSION, CFS_TARGET, cmd_tlm_router.get_cfs_cmd_queue())
logger.info("Telecommand object created for " + system_string)
except RuntimeError:
print("Error creating telecommand object for " + system_string)
sys.exit(2)
cmd_tlm_router.start()
telecommand_script.send_cfs_cmd('TO_LAB','EnableOutputCmd',{'dest_IP':'127.0.0.1'})
telecommand_cmd_line.execute()
cmd_tlm_router.shutdown()
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file belong to https://github.com/snolfi/evorobotpy
and has been written by <NAME>, <EMAIL>
coevo2.py include an implementation of an competitive co-evolutionary algorithm analogous
to that described in:
<NAME> and <NAME>. (2019). Long-Term Progress and Behavior Complexification in Competitive Co-Evolution, arXiv:1909.08303.
Requires es.py policy.py and evoalgo.py
Also requires the net.so library that can be obtained by compiling with cython the following files contained in the ./lib directory:
evonet.cpp, evonet.h, utilities.cpp, utilities.h, net.pxd, net.pyx and setupevonet.py
with the commands: cd ./evorobotpy/lib; python3 setupevonet.py build_ext –inplace; cp net*.so ../bin
"""
import numpy as np
from numpy import zeros, dot, sqrt
import math
import time
from evoalgo import EvoAlgo
from utils import ascendent_sort
import random
import os
import sys
import configparser
# competitive coevolutionary algorithm operating on two populations
class Algo(EvoAlgo):
def __init__(self, env, policy, seed, fileini, filedir):
EvoAlgo.__init__(self, env, policy, seed, fileini, filedir)
def loadhyperparameters(self):
if os.path.isfile(self.fileini):
config = configparser.ConfigParser()
config.read(self.fileini)
self.popsize = 80
self.selsize = 10
self.ngenerations = 1000
self.stepsize = 0.01
self.batchSize = 20
self.noiseStdDev = 0.02
self.wdecay = 0
self.saveeach = 100
options = config.options("ALGO")
for o in options:
found = 0
if o == "ngenerations":
self.ngenerations = config.getint("ALGO","ngenerations")
found = 1
if o == "selsize":
self.selsize = config.getint("ALGO","selsize")
found = 1
if o == "popsize":
self.popsize = config.getint("ALGO","popsize")
found = 1
if o == "stepsize":
self.stepsize = config.getfloat("ALGO","stepsize")
found = 1
if o == "noisestddev":
self.noiseStdDev = config.getfloat("ALGO","noiseStdDev")
found = 1
if o == "samplesize":
self.batchSize = config.getint("ALGO","sampleSize")
found = 1
if o == "wdecay":
self.wdecay = config.getint("ALGO","wdecay")
found = 1
if o == "saveeach":
self.saveeach = config.getint("ALGO","saveeach")
found = 1
if found == 0:
print("\033[1mOption %s in section [ALGO] of %s file is unknown\033[0m" % (o, filename))
print("available hyperparameters are: ")
print("ngenerations [integer] : max number of generations (default 200)")
print("popsize [integer] : popsize (default 40)")
print("selsize [integer] : number selected agents (default 10)")
print("stepsize [float] : learning stepsize (default 0.01)")
print("samplesize [int] : samplesize/2 (default 20)")
print("noiseStdDev [float] : samples noise (default 0.02)")
print("wdecay [0/2] : weight decay (default 0), 1 = L1, 2 = L2")
print("saveeach [integer] : save file every N generations (default 100)")
sys.exit()
else:
print("\033[1mERROR: configuration file %s does not exist\033[0m" % (self.fileini))
def run(self):
self.loadhyperparameters() # load hyperparameters
seed = self.seed
self.rs = np.random.RandomState(self.seed)
# Extract the number of parameters
nparams = int(self.policy.nparams / 2) # parameters required for a single individul
# allocate and vectors
pop = [] # the populations (the individuals of the second pop follow)
popm = [] # the momentum of the populations
popv = [] # the squared momentum of the populations
self.candidate = np.arange(nparams, dtype=np.float64) # the vector containing varied parameters
self.fmatrix = np.zeros((self.popsize+self.selsize, self.popsize++self.selsize), dtype=np.float64) # the fitness of each individual against each competitor
# the additional lines and columns contain data of evolving individuals
self.selp = np.arange(nparams*self.selsize, dtype=np.float64) # the parameters of the selected individuals
self.selm = np.arange(nparams*self.selsize, dtype=np.float64) # the momentum of the selected individuals
self.selv = np.arange(nparams*self.selsize, dtype=np.float64) # the squared-momentum of the selected individuals
self.selcomp = np.arange(nparams*self.selsize, dtype=np.float64) # the parameters of the selected competitors
self.selp = np.resize(self.selp, (self.selsize, nparams))
self.selm = np.resize(self.selm, (self.selsize, nparams))
self.selv = np.resize(self.selv, (self.selsize, nparams))
self.selcomp = np.resize(self.selcomp, (self.selsize, nparams))
# initialize population vectors
for i in range(self.popsize*2):
self.policy.nn.initWeights()
randomparams = np.copy(self.policy.get_trainable_flat())
pop.append(randomparams[:nparams])
popm.append(zeros(nparams))
popv.append(zeros(nparams))
pop = np.asarray(pop)
popm = np.asarray(popm)
popv = np.asarray(popv)
print("Coevo2 seed %d Popsize %d %d batchSize %d stepsize %lf noiseStdDev %lf wdecay %d nparams %d" % (self.seed, self.popsize, self.selsize, self.batchSize, self.stepsize, self.noiseStdDev, self.wdecay, nparams))
# evaluate pop1 against pop2
print("gen %d eval pop1 against pop2" % (0))
for i1 in range(self.popsize):
for i2 in range(self.popsize):
self.policy.set_trainable_flat(np.concatenate((pop[i1], pop[self.popsize+i2])))
eval_rews, eval_length = self.policy.rollout(1)
self.fmatrix[i1][i2] = eval_rews
#print("%.2f " % (eval_rews), end = '')
#print("")
filename = "S%dG0.npy" % (seed)
np.save(filename, pop)
filename = "S%dFitG0.npy" % (seed)
np.save(filename, self.fmatrix)
# main loop
self.evopop = 0 # whether the first or the second pop evolve
for gen in range(self.ngenerations):
# chooses the selected competitors
#self.selc = random.sample(range(self.popsize), self.selsize)
self.selc = self.seldiffcomp()
print("gen %d competitors: " % (gen), end = '')
print(self.selc)
# chooses the selected individuals
self.seli = random.sample(range(self.popsize), self.selsize)
# update the matrix of selected individuals (with associated momentum vectors) and the matrix of selected competitors
for sind in range(self.selsize):
if (self.evopop == 0):
for p in range(nparams):
self.selp[sind][p] = pop[self.seli[sind]][p]
self.selm[sind][p] = popm[self.seli[sind]][p]
self.selv[sind][p] = popv[self.seli[sind]][p]
self.selcomp[sind][p] = pop[self.popsize+self.selc[sind]][p]
else:
for p in range(nparams):
self.selp[sind][p] = pop[self.popsize+self.seli[sind]][p]
self.selm[sind][p] = popm[self.popsize+self.seli[sind]][p]
self.selv[sind][p] = popv[self.popsize+self.seli[sind]][p]
self.selcomp[sind][p] = pop[self.selc[sind]][p]
# evolve individuals
for sind in range(self.selsize):
self.runphase(sind, nparams)
# test evolving individual agaist all competitors
print("gen %d postevaluate against all competitors" % (gen))
for i1 in range(self.selsize):
for i2 in range(self.popsize):
if (self.evopop == 0):
self.policy.set_trainable_flat(np.concatenate((self.selp[i1], pop[self.popsize+i2])))
else:
self.policy.set_trainable_flat(np.concatenate((pop[i2], self.selp[i1])))
eval_rews, eval_length = self.policy.rollout(1)
if (self.evopop == 0):
self.fmatrix[self.popsize+i1][i2] = eval_rews # additional rows of the popsize*popsize matrix
else:
self.fmatrix[i2][self.popsize+i1] = eval_rews # additional columns of the popsize*popsize matrix
if (self.evopop == 0):
# average lines (pop 1), submatrix of popsize+selsize raws and popsize columns
fm = self.fmatrix[0:self.popsize+self.selsize,0:self.popsize].mean(axis=1, dtype='float')
orderfm = fm.argsort() # sort ascending order
else:
# average columns (pop 2), submatrix of popsize raws and popsize+selsize columns
fm = self.fmatrix[0:self.popsize,0:self.popsize+self.selsize].mean(axis=0, dtype='float')
fm = 1.0 - fm # transfor fitness from pop1 to pop2 point of view
orderfm = fm.argsort() # sort ascending order
# replace the worst population individuals with the evolving individuals that ootperform them in postevaluation
replaced = 0
while (orderfm[replaced] >= self.popsize and replaced < (self.popsize - 1)):
replaced += 1
localprog = 0
for i in range(self.popsize):
if (orderfm[i+self.selsize] >= self.popsize): # evolving individual ranked among the best
evoi = orderfm[i+self.selsize] - self.popsize
worsei = orderfm[replaced]
print("%d->%d %.2f " % (evoi,worsei, fm[self.popsize + evoi] - fm[worsei]), end ='')
if (self.evopop == 0):
for p in range(nparams):
pop[worsei][p] = self.selp[evoi][p]
popm[worsei][p] = self.selm[evoi][p]
popv[worsei][p] = self.selv[evoi][p]
localprog += fm[self.popsize + evoi] - fm[worsei]
for c in range(self.popsize):
self.fmatrix[worsei][c] = self.fmatrix[evoi+self.popsize][c]
else:
for p in range(nparams):
pop[worsei+self.popsize][p] = self.selp[evoi][p]
popm[worsei+self.popsize][p] = self.selm[evoi][p]
popv[worsei+self.popsize][p] = self.selv[evoi][p]
localprog += fm[self.popsize + evoi] - fm[worsei]
for c in range(self.popsize):
self.fmatrix[c][worsei] = self.fmatrix[c][evoi+self.popsize]
replaced += 1
while (orderfm[replaced] >= self.popsize and replaced < (self.popsize - 1)):
replaced += 1
print("local progress %.2f " % (localprog / self.selsize))
# save evolving populations and fitness matrix
if (((gen + 1) % self.saveeach) == 0):
filename = "S%dG%d.npy" % (seed, gen + 1)
np.save(filename, pop)
filename = "S%dFitG%d.npy" % (seed, gen + 1)
np.save(filename, self.fmatrix)
if (((gen + 1) % (self.saveeach * 10)) == 0):
filename = "S%dG%dm.npy" % (seed, gen + 1)
np.save(filename, popm)
filename = "S%dG%dv.npy" % (seed, gen + 1)
np.save(filename, popv)
fm = self.fmatrix[0:self.popsize,0:self.popsize].mean(dtype='float')
print("seed %d gen %d popfit %.2f %.2f weights %.2f" % (seed, gen, fm, 1.0 - fm, np.average(np.absolute(pop))))
# changes the evolving population
self.evopop += 1
if (self.evopop > 1):
self.evopop = 0
# select differentiated competitors
# the first is chosen randomly, the next are those that achieved the maximum different performance
def seldiffcomp(self):
comp = np.zeros(self.selsize)
unselected = np.arange(self.popsize)
selected = []
# first competitor is selected randomly
selind = random.randint(0, self.popsize-1)
selected.append(selind)
unselected = np.delete(unselected, selind)
# select the competitor that differ more with respect to already selected competitors
while (len(selected) < self.selsize):
selind = 0
maxdiff = 0
for i1 in range(len(unselected)):
diff = 0
for i2 in range(len(selected)):
for i3 in range(self.popsize):
if (self.evopop == 0):
diff += abs(self.fmatrix[i3][unselected[i1]] - self.fmatrix[i3][selected[i2]])
else:
diff += abs(self.fmatrix[unselected[i1]][i3] - self.fmatrix[selected[i2]][i3])
if (diff > maxdiff):
selind = i1
maxdiff = diff
selected.append(unselected[selind])
unselected = np.delete(unselected, selind)
return(selected)
# evolve selected individuls against selected competitors
def runphase(self, sind, nparams):
epsilon = 1e-08
beta1 = 0.9
beta2 = 0.999
weights = zeros(self.batchSize)
for it in range (20):
ave_rews = 0
# evaluate the centroid
for i in range(self.selsize):
if (self.evopop == 0):
self.policy.set_trainable_flat(np.concatenate((self.selp[sind], self.selcomp[i])))
eval_rews, eval_length = self.policy.rollout(1)
# sanity check
if (it == 0 and eval_rews != self.fmatrix[self.seli[sind],self.selc[i]]):
print("warning: sanity check failed")
ave_rews += eval_rews
else:
self.policy.set_trainable_flat(np.concatenate((self.selcomp[i], self.selp[sind])))
eval_rews, eval_length = self.policy.rollout(1)
# sanity check
if (it == 0 and eval_rews != self.fmatrix[self.selc[i],self.seli[sind]]):
print("warning: sanity check failed")
ave_rews += (1.0 - eval_rews)
ave_rews /= float(self.selsize)
#print("centroid ", end ='')
#for g in range(10):
#print("%.4f " % (self.selp[sind][g+20]), end='')
#print("");
if (it == 0):
print("evopop %d ind %2d : " % (self.evopop, self.seli[sind]), end = '')
print("%.2f " % (ave_rews), end='')
# Extract half samples from Gaussian distribution with mean 0.0 and standard deviation 1.0
samples = self.rs.randn(self.batchSize, nparams)
fitness = zeros(self.batchSize * 2)
# Evaluate offspring
for b in range(self.batchSize):
for bb in range(2):
if (bb == 0):
for g in range(nparams):
self.candidate[g] = self.selp[sind][g] + samples[b,g] * self.noiseStdDev
else:
for g in range(nparams):
self.candidate[g] = self.selp[sind][g] - samples[b,g] * self.noiseStdDev
#print("candidad ", end ='')
#for g in range(10):
#print("%.4f " % (self.candidate[g+20]), end='')
#print("");
# evaluate offspring
ave_rews = 0
for c in range(self.selsize):
if (self.evopop == 0):
self.policy.set_trainable_flat(np.concatenate((self.candidate, self.selcomp[c])))
eval_rews, eval_length = self.policy.rollout(1)
ave_rews += eval_rews
else:
self.policy.set_trainable_flat(np.concatenate((self.selcomp[c], self.candidate)))
eval_rews, eval_length = self.policy.rollout(1)
ave_rews += (1.0 - eval_rews)
#print("f %.2f" % eval_rews)
fitness[b*2+bb] = ave_rews / float(self.selsize)
#print("%.2f " % (ave_rews / float(self.selsize)), end = '')
# Sort by fitness and compute weighted mean into center
fitness, index = ascendent_sort(fitness)
# Now me must compute the symmetric weights in the range [-0.5,0.5]
utilities = zeros(self.batchSize * 2)
for i in range(self.batchSize * 2):
utilities[index[i]] = i
utilities /= (self.batchSize * 2 - 1)
utilities -= 0.5
# Now we assign the weights to the samples
for i in range(self.batchSize):
idx = 2 * i
weights[i] = (utilities[idx] - utilities[idx + 1]) # pos - neg
# Compute the gradient
g = 0.0
i = 0
while i < self.batchSize:
gsize = -1
if self.batchSize - i < 500:
gsize = self.batchSize - i
else:
gsize = 500
g += dot(weights[i:i + gsize], samples[i:i + gsize,:]) # weights * samples
i += gsize
# Normalization over the number of samples
g /= (self.batchSize * 2)
# Weight decay
if (self.wdecay == 1):
globalg = -g + 0.005 * self.selp[sind]
else:
globalg = -g
# ADAM stochastic optimizer
# a = self.stepsize * sqrt(1.0 - beta2 ** cgen) / (1.0 - beta1 ** cgen)
a = self.stepsize # bias correction is not implemented
self.selm[sind] = beta1 * self.selm[sind] + (1.0 - beta1) * globalg
self.selv[sind] = beta2 * self.selv[sind] + (1.0 - beta2) * (globalg * globalg)
dCenter = -a * self.selm[sind] / (sqrt(self.selv[sind]) + epsilon)
# update center
self.selp[sind] += dCenter
#for g in range(10):
#print("%.4f " % (self.selp[sind][g+20]), end='')
#print("");
# evaluate the evolving individual at the end of the evolution phase
ave_rews = 0
for i in range(self.selsize):
if (self.evopop == 0):
self.policy.set_trainable_flat(np.concatenate((self.selp[sind], self.selcomp[i])))
eval_rews, eval_length = self.policy.rollout(1)
ave_rews += eval_rews
else:
self.policy.set_trainable_flat(np.concatenate((self.selcomp[i], self.selp[sind])))
eval_rews, eval_length = self.policy.rollout(1)
ave_rews += (1.0 - eval_rews)
ave_rews /= float(self.selsize)
print("%.2f" % (ave_rews))
def testusage(self):
print("ERROR: To post-evaluate with the coevo algorithm you should specify with the -g parameter a string containing:")
print("P-ng-ni (postevaluate a population) where ng is generation number and ni the number of best agents to be posteveluated")
print("p-ng-ni (postevaluate a population without displaying the behavior)")
print("m-ng-ngg (master tournament) where ng is the last generation number and ngg is the generation interval ")
print("c-pop1-pop2 (population cross test) pop1 and pop2 are the name of the files containing the populations to be cross-tested ")
sys.exit()
def test(self, testparam):
if testparam is None:
self.testusage()
if "-" not in testparam:
self.testusage()
seed = self.seed
parsen = testparam.split("-")
if (len(parsen) != 3 or not parsen[0] in ["P", "p", "m", "M","c", "C"]):
self.testusage()
# P-g-max: Test generation g (only the best max individuals)
# P renders behavior, "p" only print fitness
if (parsen[0] == "p" or parsen[0] == "P"):
if (parsen[0] == "P"):
self.policy.test = 1
rendt = True
else:
self.policy.test = 0
rendt = False
popfile = "S%dG%d.npy" % (seed,int(parsen[1]))
print("load %s" % (popfile))
pop = np.load(popfile)
popshape = pop.shape
popsize = int(popshape[0] / 2)
if (len(parsen) >= 3):
maxi = int(parsen[2])
else:
maxi = popsize
fmatrixfile = "S%dFitG%d.npy" % (seed, int(parsen[1]))
fmatrix = np.load(fmatrixfile)
fit1 = fmatrix[0:popsize,0:popsize].mean(axis=1, dtype='float')
rank1 = fit1.argsort() # sort ascending order
fit2 = fmatrix[0:popsize,0:popsize].mean(axis=0, dtype='float')
rank2 = fit2.argsort() # sort ascending order
# print the matrix loaded from file
print(" ", end = '')
for i1 in range(popsize):
print("\033[1m%4d \033[0m" % (i1), end = '')
print("")
for i1 in range(popsize):
print("\033[1m%3d \033[0m" % (i1), end = '')
for i2 in range(popsize):
print("%.2f " % (fmatrix[i1,i2]), end = '')
print("\033[1m%.2f\033[0m" % (fit1[i1]))
print(" ", end = '')
for i1 in range(popsize):
print("\033[1m%.2f \033[0m" % (fit2[i1]), end = '')
print("")
# test in order of performance
print("")
if (not rendt):
print(" ", end = '')
for i2 in range(maxi):
print("\033[1m%4d \033[0m" % (rank2[i2]), end = '')
print("")
fitcol = np.zeros(maxi)
i1 = popsize - 1
ii1 = 0
while (ii1 < maxi):
if (not rendt):
print("\033[1m%3d \033[0m" % (rank1[i1]), end = '')
tot_rew = 0
for i2 in range(maxi):
if (rendt):
print("pred %d prey %d " % (rank1[i1], rank2[i2]), end = '')
self.policy.set_trainable_flat(np.concatenate((pop[rank1[i1]], pop[popsize+rank2[i2]])))
eval_rews, eval_length = self.policy.rollout(1)
tot_rew += eval_rews
fitcol[i2] += eval_rews
if (not rendt):
print("%.2f " % eval_rews, end = '')
if (fmatrix[rank1[i1]][rank2[i2]] != eval_rews):
print("warning [%.2f %.2f]" % (eval_rews, fmatrix[rank1[i1]][rank2[i2]]) , end = '')
if (not rendt):
print("\033[1m%.2f\033[0m" % (tot_rew / float(maxi)))
i1 -= 1
ii1 += 1
if (not rendt):
print(" ", end = '')
for i2 in range(maxi):
print("\033[1m%.2f \033[0m" % (fitcol[i2] / float(maxi)), end = '')
print("")
# "m-n1-n2, Master tournament (only last gen), test pop of generation n1 against competitors of previous generations every n2 generations
if (parsen[0] == "m"):
popfile = "S%dG%d.npy" % (seed,int(parsen[1]))
pop = np.load(popfile)
popshape = pop.shape
popsize = int(popshape[0] / 2)
self.policy.test = 0
bestrew1 = ""
bestrew2 = ""
print("seed %d: postevaluation gen %d against contemporary and ancient competitors every %d generaions" % (seed, int(parsen[1]), int(parsen[2])))
for pp in range(2):
if (pp == 0):
print("pred: ", end ='', flush=True)
else:
print("prey: ", end ='', flush=True)
cgen = int(parsen[1])
while (cgen >= 0):
pop2file = "S%dG%d.npy" % (seed,cgen)
pop2 = np.load(pop2file)
tot_rew = 0
max_ind_rew = 0
for i1 in range(popsize):
ind_rew = 0
for i2 in range(popsize):
if (pp == 0):
self.policy.set_trainable_flat(np.concatenate((pop[i1], pop2[popsize+i2])))
else:
self.policy.set_trainable_flat(np.concatenate((pop2[i1], pop[popsize+i2])))
rew, eval_length = self.policy.rollout(1)
tot_rew += rew
ind_rew += rew
ind_rew = ind_rew / popsize
if (ind_rew > max_ind_rew):
max_ind_rew = ind_rew
if (pp == 0):
print("%.2f " % (tot_rew / (popsize*popsize)), end = '', flush=True)
else:
print("%.2f " % (1.0 - (tot_rew / (popsize*popsize))), end = '', flush=True)
if (pp == 0):
bestrew1 += "%.2f " % (max_ind_rew)
#bestrew1.append(st)
else:
bestrew2 += "%.2f " % (1.0 - max_ind_rew)
#bestrew2.append(st)
cgen -= int(parsen[2])
print("")
print("pred-max: ", end = '')
print(bestrew1)
print("prey-max: ", end = '')
print(bestrew2)
# "M-n1-n2, Master tournament, test pop of all generations up to generation n1 against opponent of all generations, every n2 generations
if (parsen[0] == "M"):
uptogen = int(parsen[1])
everygen = int(parsen[2])
ntests = int(uptogen / everygen)
popfile = "S%dG%d.npy" % (seed,0)
pop = np.load(popfile)
popshape = pop.shape
popsize = int(popshape[0] / 2)
self.policy.test = 0
master = np.zeros((ntests, ntests), dtype=np.float64) # matrix with the average performance of every generation against every other generation
print("seed %d: postevaluation all generations up to %d against all competitors, every %d generations" % (seed, uptogen, everygen))
for p in range(ntests):
for pp in range(ntests):
popfile = "S%dG%d.npy" % (seed,p * everygen)
pop = np.load(popfile)
pop2file = "S%dG%d.npy" % (seed,pp * everygen)
pop2 = np.load(pop2file)
tot_rew = 0
max_ind_rew = 0
for i1 in range(popsize):
ind_rew = 0
for i2 in range(popsize):
if (pp == 0):
self.policy.set_trainable_flat(np.concatenate((pop[i1], pop2[popsize+i2])))
else:
self.policy.set_trainable_flat(np.concatenate((pop2[i1], pop[popsize+i2])))
rew, eval_length = self.policy.rollout(1)
tot_rew += rew
ind_rew += rew
ind_rew = ind_rew / popsize
if (ind_rew > max_ind_rew):
max_ind_rew = ind_rew
master[p][pp] = tot_rew / float(ntests * ntests)
mfile = "masterS%d.npy" % (seed)
np.save(mfile, master)
# "C-file1-file2, cross-experiment (pred and prey of file1 against themselves and against prey and pred of file2
if (parsen[0] == "c" or parsen[0] == "C"):
print("crosstest of %s against %s " % (parsen[1], parsen[2]))
self.policy.test = 0
pop1 = np.load(parsen[1])
popshape1 = pop1.shape
popsize1 = int(popshape1[0] / 2)
pop2 = np.load(parsen[2])
popshape2 = pop2.shape
popsize2 = int(popshape2[0] / 2)
assert popshape1[1] == popshape2[1], "the number of parameters in the two file is inconsistent"
# 4 cases, pred1-prey1, pred1-prey2, pred2-prey1, pred2-prey2
tot_rew = [0,0,0,0]
for pp in range(4):
if (pp == 0):
print("pred1-prey1: ", end ='', flush=True)
psizea = popsize1
psizeb = popsize1
if (pp == 1):
print("pred1-prey2: ", end ='', flush=True)
psizea = popsize1
psizeb = popsize2
if (pp == 2):
print("pred2-prey1: ", end ='', flush=True)
psizea = popsize2
psizeb = popsize1
if (pp == 3):
print("pred2-prey2: ", end ='', flush=True)
psizea = popsize2
psizeb = popsize2
for i1 in range(psizea):
for i2 in range(psizeb):
if (pp == 0):
self.policy.set_trainable_flat(np.concatenate((pop1[i1], pop1[popsize1+i2])))
if (pp == 1):
self.policy.set_trainable_flat(np.concatenate((pop1[i1], pop2[popsize2+i2])))
if (pp == 2):
self.policy.set_trainable_flat(np.concatenate((pop2[i1], pop1[popsize1+i2])))
if (pp == 3):
self.policy.set_trainable_flat(np.concatenate((pop2[i1], pop2[popsize2+i2])))
rew, eval_length = self.policy.rollout(1)
tot_rew[pp] += rew
tot_rew[pp] /= (psizea*psizeb)
print("%.2f " % (tot_rew[pp]), flush=True)
print("pred diff: %.2f" % (tot_rew[1] - tot_rew[0]))
print("prey diff: %.2f" % ((1.0 - tot_rew[2]) - (1.0 - tot_rew[3])))
print("tot diff: %.2f" % (tot_rew[1] - tot_rew[0] + (1.0 - tot_rew[2]) - (1.0 - tot_rew[3])))
|
<gh_stars>1-10
# Copyright 2019 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pathlib import Path
import math
import numpy as np
import torch
from mtap.io.brat import read_brat_document
from torch.nn.utils.rnn import pad_sequence
from biomedicus.sentences.vocabulary import Vocabulary, get_char
_whitespace_pattern = re.compile(r'((?!_)[\w.\'])+|\[\*\*.*?\*\*\]')
_digit = re.compile(r'[0-9]')
_punct = re.compile(r'[.\']')
_identifier = re.compile(r'\[\*\*.*\*\*\]')
class Dataset:
def __init__(self, batch_size):
self.char_ids = []
self.word_ids = []
self.labels = []
self.lengths = []
self.batch_size = batch_size
@property
def n_batches(self):
return len(self.lengths) // self.batch_size
def append(self, char_ids, word_ids, labels):
self.char_ids.append(char_ids)
self.word_ids.append(word_ids)
self.labels.append(labels)
self.lengths.append(len(labels))
def build(self):
self.char_ids = [torch.tensor(x) for x in self.char_ids]
self.word_ids = [torch.tensor(x) for x in self.word_ids]
self.labels = [torch.tensor(x) for x in self.labels]
self.lengths = torch.tensor(self.lengths)
def batches(self, shuffle=True):
indices = np.arange(len(self.lengths))
if shuffle:
np.random.shuffle(indices)
for batch in range(self.n_batches):
batch_indices = indices[batch * self.batch_size:(batch + 1) * self.batch_size]
char_ids = pad_sequence([self.char_ids[idx] for idx in batch_indices], batch_first=True)
word_ids = pad_sequence([self.word_ids[idx] for idx in batch_indices], batch_first=True)
labels = pad_sequence([self.labels[idx] for idx in batch_indices], batch_first=True)
lengths = self.lengths[batch_indices]
yield (char_ids, word_ids), labels, lengths
class InputMapping:
def __init__(self, char_mapping, words, word_length, device=None):
self.char_mapping = char_mapping
self.word_mapping = {word: i for i, word in enumerate(words)}
self.word_length = word_length
self.device = device or 'cpu'
def load_dataset(self, input_directory, validation_split, batch_size, sequence_length):
class_counts = [0, 0]
docs = list(map(str, Path(input_directory).glob('*/*.txt')))
np.random.shuffle(docs)
split = math.ceil(len(docs) * validation_split)
train_docs = docs[split:]
validation_docs = docs[:split]
train = Dataset(batch_size)
for char_ids, word_ids, labels in self.examples_generator(train_docs, sequence_length, True,
class_counts):
train.append(char_ids, word_ids, labels)
validation = Dataset(1)
for char_ids, word_ids, labels in self.examples_generator(validation_docs, sequence_length,
False, class_counts):
validation.append(char_ids, word_ids, labels)
# there are many more negative examples (label == 0) than positive examples, so we weight
# positive values according to the ratios, so that precision and recall end up being equally
# important during training
pos_weight = class_counts[0] / class_counts[1]
train.build()
validation.build()
return train, validation, pos_weight
def transform_text(self, text):
char_ids = []
word_ids = []
actual_tokens = [(m.start(), m.end()) for m in _whitespace_pattern.finditer(text)]
tokens = [(0, 0)] + actual_tokens + [(len(text), len(text))]
start_of_sequence = True
for i in range(1, len(tokens) - 1):
local_char_ids, local_word_id = self.transform_word(i, start_of_sequence,
text, tokens)
char_ids.append(local_char_ids)
word_ids.append(local_word_id)
start_of_sequence = False
return (
actual_tokens,
torch.tensor([char_ids], device=self.device),
torch.tensor([word_ids], device=self.device)
)
def examples_generator(self, docs, sequence_length, training, class_counts):
for doc in docs:
char_ids = []
word_ids = []
labels = []
start_of_sequence = True
with read_brat_document(doc) as event:
document = event.documents['plaintext']
text = document.text
try:
sentences = document.get_label_index('Sentence')
except KeyError:
continue
tokens = [(0, 0)] + [(m.start(), m.end()) for m in
_whitespace_pattern.finditer(text)] + [(len(text), len(text))]
i = 1
for sentence in sentences:
while i < len(tokens) - 1 and tokens[i][0] < sentence.start_index:
i += 1
if len(labels) > 0:
if training:
yield from step_sequence(char_ids, word_ids, labels,
sequence_length)
else:
yield char_ids, word_ids, labels
char_ids = []
word_ids = []
labels = []
start_of_sequence = True
start_of_sentence = True
while i < len(tokens) - 1 and tokens[i][0] in range(sentence.start_index,
sentence.end_index):
local_char_ids, local_word_id = self.transform_word(i, start_of_sequence,
text, tokens)
char_ids.append(local_char_ids)
word_ids.append(local_word_id)
label = 1 if start_of_sentence else 0
class_counts[label] += 1
labels.append(label)
start_of_sentence = False
start_of_sequence = False
i += 1
if i == len(tokens) - 1:
break
if len(labels) > 0:
if training:
yield from step_sequence(char_ids, word_ids, labels, sequence_length)
else:
yield char_ids, word_ids, labels
def transform_word(self, i, start_of_sequence, text, tokens):
_, prev_end = tokens[i - 1]
start, end = tokens[i]
next_start, _ = tokens[i + 1]
prior = text[prev_end:start]
word = text[start:end]
post = text[end:next_start]
local_char_ids = self.lookup_char_ids(prior, word, post, start_of_sequence)
local_word_id = self.lookup_word_id(word)
return local_char_ids, local_word_id
def lookup_char_ids(self, prior, word, post, start_of_sequence):
char_ids = ([Vocabulary.BEGIN_SEQUENCE if start_of_sequence else Vocabulary.PREV_TOKEN]
+ [get_char(self.char_mapping, c) for c in prior]
+ [Vocabulary.TOKEN_BEGIN]
+ [get_char(self.char_mapping, c) for c in word]
+ [Vocabulary.TOKEN_END]
+ [get_char(self.char_mapping, c) for c in post]
+ [Vocabulary.NEXT_TOKEN])
if len(char_ids) > self.word_length:
return char_ids[:self.word_length]
elif len(char_ids) < self.word_length:
padded = [Vocabulary.PADDING for _ in range(self.word_length)]
padded[:len(char_ids)] = char_ids
return padded
else:
return char_ids
def lookup_word_id(self, word):
if _identifier.match(word):
word = 'IDENTIFIER'
else:
word = word.lower()
word = _punct.sub('', word)
word = _digit.sub('#', word)
local_word_id = self.word_mapping.get(word, len(self.word_mapping))
return local_word_id
def step_sequence(char_ids, word_ids, labels, sequence_length):
length = len(labels)
required_pad = sequence_length - length
if required_pad > 0:
yield char_ids, word_ids, labels
else:
for i in range(0, length - sequence_length):
limit = i + sequence_length
yield char_ids[i:limit], word_ids[i:limit], labels[i:limit]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.