input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
in the new mapping
if nffg is not None and nf_id in nffg:
new_running_nfs = [n.id for n in nffg.running_nfs(infra_id)]
# And connected to the same infra
if nf_id in new_running_nfs:
# NF was not moved, Skip deletion
self.log.debug('Unchanged NF: %s' % nf_id)
continue
# If the NF exists in the new mapping, but moved to another infra
else:
self.log.info("Found moved NF: %s")
self.log.debug(
"NF migration is not supported! Stop and remove already "
"deployed NF and reinitialize later...")
else:
self.log.debug("Found removable NF: %s" % nf_id)
# Create connection Adapter to EE agent
connection_params = self.topoAdapter.get_agent_connection_params(
infra_id)
if connection_params is None:
self.log.error("Missing connection params for communication with the "
"agent of Node: %s" % infra_id)
result = False
continue
updated = self.remoteAdapter.update_connection_params(
**connection_params)
if updated:
self.log.debug("Update connection params in %s: %s" % (
self.remoteAdapter.__class__.__name__, updated))
self.log.debug("Stop deployed NF: %s" % nf_id)
try:
vnf_id = self.deployed_vnfs[(infra_id, nf_id)]['vnf_id']
reply = self.remoteAdapter.removeNF(vnf_id=vnf_id)
self.log.log(VERBOSE,
"Removed NF status:\n%s" % pprint.pformat(reply))
# Remove NF from deployed cache
del self.deployed_vnfs[(infra_id, nf_id)]
# Delete infra ports connected to the deletable NF
for u, v, link in topo.network.out_edges([nf_id], data=True):
topo[v].del_port(id=link.dst.id)
# Delete NF
topo.del_node(nf_id)
except KeyError:
self.log.error("Deployed VNF data for NF: %s is not found! "
"Skip deletion..." % nf_id)
result = False
continue
except NCClientError as e:
self.log.error("Got NETCONF RPC communication error during NF: %s "
"deletion! Skip deletion..." % nf_id)
self.log.error(VERBOSE, "Exception: %s" % e)
result = False
continue
self.log.debug("NF deletion result: %s" %
("SUCCESS" if result else "FAILURE"))
return result
def _deploy_new_nfs (self, nffg):
"""
Install the NFs mapped in the given NFFG.
If an NF is already defined in the topology and it's state is up and
running then the actual NF's initiation will be skipped!
:param nffg: container NF-FG part need to be deployed
:type nffg: :class:`NFFG`
:return: deploy was successful or not
:rtype: bool
"""
self.log.info("Deploy mapped NFs into the domain: %s..." % self.domain_name)
result = True
self.portmap.clear()
# Remove unnecessary SG and Requirement links to avoid mess up port
# definition of NFs
nffg.clear_links(NFFG.TYPE_LINK_SG)
nffg.clear_links(NFFG.TYPE_LINK_REQUIREMENT)
# Get physical topology description from Mininet
mn_topo = self.topoAdapter.get_topology_resource()
if mn_topo is None:
self.log.warning("Missing topology description from %s domain! "
"Skip deploying NFs..." % self.domain_name)
return False
# Iter through the container INFRAs in the given mapped NFFG part
# print mn_topo.dump()
for infra in nffg.infras:
if infra.infra_type not in (
NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE):
self.log.debug(
"Infrastructure Node: %s (type: %s) is not Container type! "
"Continue to next Node..." % (infra.id, infra.infra_type))
continue
else:
self.log.debug("Check NFs mapped on Node: %s" % infra.id)
# If the actual INFRA isn't in the topology(NFFG) of this domain -> skip
if infra.id not in (n.id for n in self.internal_topo.infras):
self.log.error("Infrastructure Node: %s is not found in the %s domain! "
"Skip NF initiation on this Node..." %
(infra.id, self.domain_name))
result = False
continue
# Iter over the NFs connected the actual INFRA
for nf in nffg.running_nfs(infra.id):
# NF with id is already deployed --> change the dynamic port to
# static and continue
if nf.id in (nf.id for nf in self.internal_topo.nfs):
self.log.debug("NF: %s has already been initiated! "
"Continue to next NF..." % nf.id)
for u, v, link in nffg.real_out_edges_iter(nf.id):
dyn_port = nffg[v].ports[link.dst.id]
for x, y, l in mn_topo.real_out_edges_iter(nf.id):
if l.src.id == link.src.id:
self.portmap[dyn_port.id] = l.dst.id
dyn_port.id = l.dst.id
break
continue
# Extract the initiation params
params = {'nf_type': nf.functional_type,
'nf_ports': [link.src.id for u, v, link in
nffg.real_out_edges_iter(nf.id)],
'infra_id': infra.id}
# Check if every param is not None or empty
if not all(params.values()):
self.log.error("Missing arguments for initiation of NF: %s! "
"Extracted params: %s" % (nf.id, params))
result = False
continue
# Create connection Adapter to EE agent
connection_params = self.topoAdapter.get_agent_connection_params(
infra.id)
if connection_params is None:
self.log.error("Missing connection params for communication with the "
"agent of Node: %s" % infra.id)
result = False
continue
# Save last used adapter --> and last RPC result
self.log.info("Initiating NF: %s ..." % nf.id)
self.log.debug("NF parameters: %s" % params)
updated = self.remoteAdapter.update_connection_params(
**connection_params)
if updated:
self.log.debug("Update connection params in %s: %s" % (
self.remoteAdapter.__class__.__name__, updated))
try:
vnf = self.remoteAdapter.deployNF(**params)
except NCClientError as e:
self.log.error("Got NETCONF RPC communication error during NF: %s "
"deploy! Skip deploy..." % nf.id)
self.log.error(VERBOSE, "Exception: %s" % e)
result = False
continue
except BaseException:
self.log.error("Got unexpected error during NF: %s "
"initiation! Skip initiation..." % nf.name)
result = False
continue
self.log.log(VERBOSE, "Initiated VNF:\n%s" % pprint.pformat(vnf))
# Check if NETCONF communication was OK
if vnf and 'initiated_vnfs' in vnf and vnf['initiated_vnfs']['pid'] \
and vnf['initiated_vnfs']['status'] == \
VNFStarterAPI.VNFStatus.s_UP_AND_RUNNING:
self.log.info("NF: %s initiation has been verified on Node: %s" % (
nf.id, infra.id))
self.log.debug("Initiated VNF id: %s, PID: %s, status: %s" % (
vnf['initiated_vnfs']['vnf_id'], vnf['initiated_vnfs']['pid'],
vnf['initiated_vnfs']['status']))
else:
self.log.error("Initiated NF: %s is not verified. Initiation was "
"unsuccessful!" % nf.id)
result = False
continue
# Store NETCONF related info of deployed NF
self.deployed_vnfs[(infra.id, nf.id)] = vnf['initiated_vnfs']
# Add initiated NF to topo description
self.log.debug("Update Infrastructure layer topology description...")
deployed_nf = nf.copy()
deployed_nf.ports.clear()
mn_topo.add_nf(nf=deployed_nf)
self.log.debug("Add deployed NFs to topology...")
# Add Link between actual NF and INFRA
for nf_id, infra_id, link in nffg.real_out_edges_iter(nf.id):
# Get Link's src ref to new NF's port
nf_port = deployed_nf.ports.append(nf.ports[link.src.id].copy())
def get_sw_port (vnf):
"""
Return the switch port parsed from result of getVNFInfo
:param vnf: VNF description returned by NETCONF server
:type vnf: dict
:return: port id
:rtype: int
"""
if isinstance(vnf['initiated_vnfs']['link'], list):
for _link in vnf['initiated_vnfs']['link']:
if str(_link['vnf_port']) == str(nf_port.id):
return int(_link['sw_port'])
else:
return int(vnf['initiated_vnfs']['link']['sw_port'])
# Get OVS-generated physical port number
infra_port_num = get_sw_port(vnf)
if infra_port_num is None:
self.log.warning("Can't get Container port from RPC result! Set "
"generated port number...")
# Create INFRA side Port
infra_port = mn_topo.network.node[infra_id].add_port(
id=infra_port_num)
self.log.debug("%s - detected physical %s" %
(deployed_nf, infra_port))
# Add Links to mn topo
mn_topo.add_undirected_link(port1=nf_port, port2=infra_port,
dynamic=True, delay=link.delay,
bandwidth=link.bandwidth)
# Port mapping
dynamic_port = nffg.network.node[infra_id].ports[link.dst.id].id
self.portmap[dynamic_port] = infra_port_num
# Update port in nffg_part
nffg.network.node[infra_id].ports[
link.dst.id].id = infra_port_num
self.log.debug("%s topology description is updated with NF: %s" % (
self.domain_name, deployed_nf.name))
self.log.debug("Rewrite dynamically generated port numbers in flowrules...")
# Update port numbers in flowrules
for infra in nffg.infras:
if infra.infra_type not in (
NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE,
NFFG.TYPE_INFRA_SDN_SW):
continue
# If the actual INFRA isn't in the topology(NFFG) of this domain -> skip
if infra.id not in (n.id for n in mn_topo.infras):
continue
for port in infra.ports:
for flowrule in port.flowrules:
_match = flowrule.match.split(';')
if not _match[0].startswith("in_port="):
self.log.warning("Missing 'in_port' from match field: %s" %
flowrule.match)
continue
_action = flowrule.action.split(';')
if not _action[0].startswith("output="):
self.log.warning("Missing 'output' from action field: %s" %
flowrule.action)
continue
for dyn, phy in self.portmap.iteritems():
_match[0] = _match[0].replace(str(dyn), str(phy))
_action[0] = _action[0].replace(str(dyn), str(phy))
flowrule.match = ";".join(_match)
flowrule.action = ";".join(_action)
if result:
self.log.info("Initiation of NFs in NFFG part: %s has been finished! "
"Result: SUCCESS" % nffg)
else:
self.log.info("Initiation of NFs in NFFG part: %s has been finished! "
"Result: FAILURE" % nffg)
return result
def _delete_flowrules (self, nffg=None):
"""
Delete all flowrules from the first (default) table of all infras.
:param nffg: last mapped NFFG part
:type nffg: :class:`NFFG`
:return: deletion was successful or not
:rtype: bool
"""
self.log.debug("Reset domain steering and delete installed flowrules...")
result = True
# Get topology NFFG to detect corresponding infras and skip needless infras
topo = self.topoAdapter.get_topology_resource()
if topo is None:
self.log.warning("Missing topology description from %s domain! "
"Skip flowrule deletions..." % self.domain_name)
return False
# If nffg is not given or is a bare topology, which is probably a cleanup
# topo, all the flowrules in physical topology will be removed
if nffg is None or nffg.is_bare():
self.log.debug("Detected empty request NFFG! "
"Remove all the installed flowrules...")
nffg = topo
topo_infras = [n.id for n in topo.infras]
# Iter through the container INFRAs in the given mapped NFFG part
self.log.debug("Managed topo infras: %s" % topo_infras)
for infra in nffg.infras:
self.log.debug("Process flowrules in infra: %s" % infra.id)
if infra.infra_type not in (NFFG.TYPE_INFRA_EE, NFFG.TYPE_INFRA_STATIC_EE,
NFFG.TYPE_INFRA_SDN_SW):
self.log.warning("Detected virtual Infrastructure Node type: | |
## @file scrumbotCog.py
# @author <NAME>, <NAME>, <NAME>
# @brief A cog containing all commands related to projects.
# @date Mar 12, 2020
import discord
from discord.ext import commands
import os, sys, inspect
currentDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentDir = os.path.dirname(currentDir)
sys.path.insert(0, parentDir)
from project import *
from projectList import ProjectList
from fileio import *
from timerClass import *
## @brief Discord commands related to scrumbot
# @details These commands are only to be used inside a guild.
class scrumbotCog(commands.Cog, name="Scrumbot Commands"):
TESTING = False # For timing commands
def __init__(self, bot):
self.project_list = fileio.read()
self.bot = bot
scrumbotCog.TESTING = False
# PROJECT COG
## @brief Adds meeting to a project
# @param project_id Project ID
# @param name Name of Project
# @param date Date of project
# @param time Time of project
# @param meeting_type Type of meeting
# @param Description of project
# @throws TypeError Meeting type incorrect
@commands.command(name="addMeeting", brief="Add a meeting to a project.")
@commands.guild_only()
@commands.has_role("Scrum Master")
async def add_meeting(self, ctx, project_id: int, name, date, time, meeting_type, *, description=None):
timerClass.start()
print(f'[Log] add_meeting from {ctx.author}, name: {name}, date: {date}, time: {time}, desc: {description} in project: {project_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to add meeting: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
datetime = date + " " + time
meeting_type = meeting_type.upper()
try:
proj.add_meeting(name, datetime, meeting_type, description)
except TypeError:
await ctx.send(f'> Failed to add meeting: meeting type must be GROOMING, STANDUP, RETROSPECTIVE, or SPRINTPLANNING.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
info = f'{proj.get_last_meeting_id()},{name},{datetime},{meeting_type},{description}'
fileio.write(self.project_list.get_last_id(), "addMeeting", info)
await ctx.send(f'> Added meeting **{name}** to {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Add a project
# @param name Name of project
# @param description Description of project
@commands.command(name="addProject", brief="Add a project to the guild.")
@commands.guild_only()
@commands.has_role("Admin")
async def add_project(self, ctx, name, *, description=None):
timerClass.start()
print(f'[Log] add_project from {ctx.author}, name: {name}, desc: {description}')
proj = Project(name, description)
self.project_list.add(proj)
fileio.create(self.project_list.get_last_id(), name, description)
await ctx.send(f'> Added project **{name}**')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Add requirement to a project
# @param project_id ID of project
# @param requirement Requirement to be added
@commands.command(name="addRqe", aliases=["addRequirement", "addReq"], brief="Add a requirement to a project.")
@commands.guild_only()
@commands.has_role("Business Analyst")
async def add_rqe(self, ctx, project_id: int, *, requirement):
timerClass.start()
print(f'[Log] add_rqe from {ctx.author}, project: {project_id}, rqe: {requirement}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to add requirement: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
proj.add_rqe(requirement)
fileio.write(self.project_list.get_last_id(), "addRqe", requirement)
await ctx.send(f'> Added requirement to {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Add sprint to a project
# @param project_id Project ID
@commands.command(name="addSprint", brief="Add a sprint to a project.")
@commands.guild_only()
@commands.has_role("Scrum Master")
async def add_sprint(self, ctx, project_id: int):
timerClass.start()
print(f'[Log] add_sprint from {ctx.author}, project: {project_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to add sprint: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
proj.add_sprint()
fileio.write(self.project_list.get_last_id(), "addSprint")
await ctx.send(f'> Added a new sprint to {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Add project description
# @param project_id Project ID
@commands.command(name="getProjectDesc", aliases=["getProjectDescription", "getProjDesc"], brief="Get the description of a project.")
@commands.guild_only()
async def get_project_desc(self, ctx, project_id: int):
timerClass.start()
print(f'[Log] get_project_desc from {ctx.author}, project: {project_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to get project description: project not found.')
return
desc = f'Project Name: {proj.get_name()}\nDescription: {proj.get_desc()}'
embed = discord.Embed(title='Project Description')
embed.add_field(name='\uFEFF', value=f'Description: {desc}')
await ctx.send(content=None, embed=embed)
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Get requirements of a project
# @param project_id Project ID
@commands.command(name="getRqes", aliases=["getRequirements", "getReqs"], brief="Get the requirements of a project.")
@commands.guild_only()
async def get_rqes(self, ctx, project_id: int):
timerClass.start()
print(f'[Log] get_rqes from {ctx.author}, project: {project_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to get project requirements: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
if (not proj.get_rqes()):
await ctx.send(f' No current requirements in {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
rqe_lst = [f'{i}. {proj.get_rqes()[i]}' for i in range(len(proj.get_rqes()))]
lst = '\n'.join(rqe_lst)
embed = discord.Embed(title=f'List of Requirements', description=f'{proj.get_name()}:')
embed.add_field(name='\uFEFF', value=lst)
await ctx.send(content=None, embed=embed)
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Get the sprints of a project
# @param project_id Project ID
@commands.command(name="getSprints", aliases=["listSprints"], brief="Get the sprints of a project.")
@commands.guild_only()
async def get_sprints(self, ctx, project_id: int):
timerClass.start()
print(f'[Log] get_sprints from {ctx.author}, project: {project_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to get project sprints: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
if (not proj.get_sprints()):
await ctx.send(f'> No current sprints in {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
sprint_lst = proj.get_sprints()
sprints = [f'Sprint {i} - Created on: {sprint_lst[i]}' for i in range(len(sprint_lst))]
lst = '\n'.join(sprints)
embed = discord.Embed(title=f'List of Sprints', description=f'{proj.get_name()}:')
embed.add_field(name='\uFEFF', value=lst)
await ctx.send(content=None, embed=embed)
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief List all meetings of a project
# @param project_id Project ID
@commands.command(name="listMeetings", brief="List all meetings of a project.")
@commands.guild_only()
async def list_meetings(self, ctx, project_id: int):
timerClass.start()
print(f'[Log] list_meetings from {ctx.author}, project: {project_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to list meetings: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
seq = [f'id: {i} - name: {j[0]}, on {j[1]}. Meeting type: {j[2]}' for i, j in proj.get_meetings()]
if (not seq):
await ctx.send(f'> No current projects in {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
lst = '\n'.join(seq)
embed = discord.Embed(title='List of Meetings', description=f'{proj.get_name()}')
embed.add_field(name='\uFEFF', value=lst)
await ctx.send(content=None, embed=embed)
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief List all projects
@commands.command(name="listProjects", aliases=["listProject"], brief="List all projects in a guild.")
@commands.guild_only()
async def list_projects(self, ctx):
timerClass.start()
print(f'[Log] list_projects from {ctx.author}')
seq = [f'id: {i} - name: {j.get_name()} - desc: {j.get_desc()}' for i, j in self.project_list.to_seq()]
if (not seq):
await ctx.send(f'> No current projects.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
lst = '\n'.join(seq)
embed = discord.Embed(title='List of Projects')
embed.add_field(name='\uFEFF', value=lst)
await ctx.send(content=None, embed=embed)
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Remove the last sprint of a project
# @param project_id Project ID
# @throws IndexError Failed to remove last sprint
@commands.command(name="rmLastSprint", aliases=["removeLastSprint", "rmSprint", "removeSprint"], brief="Remove the last sprint of a project.")
@commands.guild_only()
@commands.has_role("Scrum Master")
async def rm_last_sprint(self, ctx, project_id: int):
timerClass.start()
print(f'[Log] rm_last_sprint from {ctx.author}, project: {project_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to remove last sprint: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
try:
proj.rm_sprint()
except IndexError:
print(f'[Error] rm_last_sprint raised IndexError')
await ctx.send(f'> Failed to remove last sprint: no sprints found in project.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
fileio.write(self.project_list.get_last_id(), "rmLastSprint")
await ctx.send(f'> Removed last sprint from {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Removes a meeting
# @param project_id Project ID
# @param meeting_id Meeting ID
# @throws KeyError Meeting not found
@commands.command(name="rmMeeting", aliases=["removeMeeting"], brief="Removes a meeting from a project.")
@commands.guild_only()
@commands.has_role("Scrum Master")
async def rm_meeting(self, ctx, project_id: int, meeting_id: int):
timerClass.start()
print(f'[Log] rm_meeting from {ctx.author}, project: {project_id}, meeting: {meeting_id}')
proj = self.__get_project(project_id)
if (not proj):
await ctx.send(f'> Failed to remove meeting: project not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
try:
proj.rm_meeting(meeting_id)
except KeyError:
await ctx.send(f'> Failed to remove meeting: meeting not found.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
return
fileio.write(self.project_list.get_last_id(), "rmMeeting", meeting_id)
await ctx.send(f'> Removed meeting {meeting_id} from {proj.get_name()}.')
if (scrumbotCog.TESTING):
await ctx.send(f'> Elapsed time: {timerClass.end()} seconds.')
## @brief Removes a project
# @param project_id Project ID
# @throws KeyError Project not found
@commands.command(name="rmProject", aliases=["removeProject"], brief="Removes a project from the guild.")
@commands.guild_only()
@commands.has_role("Admin")
async def rm_project(self, ctx, project_id: int):
timerClass.start()
print(f'[Log] rm_project from {ctx.author}, project: {project_id}')
try:
self.project_list.remove(project_id)
except KeyError:
await ctx.send(f'> Failed to remove project: project not found.')
| |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.204603,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.363393,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.11055,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.635775,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.10093,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.631416,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.36812,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.458173,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.7797,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.209808,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0230473,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.243032,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.170449,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.45284,
'Execution Unit/Register Files/Runtime Dynamic': 0.193497,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.643318,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.59116,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.92155,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00238064,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00238064,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00206893,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000798397,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00244852,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00927873,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.02299,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.163857,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.47197,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.556533,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.22463,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0733521,
'L2/Runtime Dynamic': 0.0149759,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.49788,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.53945,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.170198,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.170198,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.30486,
'Load Store Unit/Runtime Dynamic': 3.549,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.419679,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.839358,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.148946,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.15004,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0773917,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.815992,
'Memory Management Unit/Runtime Dynamic': 0.227432,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 29.5043,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.731972,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.041318,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.317832,
'Renaming Unit/Int Front End RAT/Subthreshold | |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
import math
from GraphicsView import *
'''
10/16
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- drawing works in test.py and would like to integrate it back over here
- work like to fix the editing part - threshold needs to get the image as
its edited by the other 4 morphological transformations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.cs = [[0 for i in range(2)] for j in range(100)]
self.image = QImage("blah")
self.count = 0
self.rotate = 1
self.pos_x = 8
self.pos_y = 8
self.radius = 60
self.delta = [1, 1]
self.timerId = self.startTimer(15)
self.setGeometry(300, 300, 500, 500)
self.setWindowTitle('clipping')
self.show()
def drawImage(self, painter):
painter.drawImage(0, 0, self.image.scaled(500, 500, Qt.KeepAspectRatio))
def paintEvent(self, e):
painter = QPainter()
painter.begin(self)
self.drawImage(painter)
self.drawLines(painter)
#self.drawDonut(painter)
#self.drawShapes(painter)
#self.drawRectangles(painter)
#self.drawObjects(painter)
painter.end()
# def mousePressEvent(self, e):
# if e.button() == Qt.LeftButton:
def mousePressEvent(self, e):
if e.button() == Qt.LeftButton:
x = e.x()
y = e.y()
self.cs[self.count][0] = x
self.cs[self.count][1] = y
self.count = self.count + 1
if e.button() == Qt.RightButton:
self.repaint() #calls paintEvent()
self.count = 0
def drawLines(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
w = self.width()
h = self.height()
#painter.eraseRect(0, 0, w, h)
for i in range(self.count):
for j in range(self.count):
painter.drawLine(self.cs[i][0], self.cs[i][1],
self.cs[j][0], self.cs[j][1])
def drawDonut(self, painter):
brush = QBrush(QColor("#535353"))
painter.setPen(QPen(brush, 0.5))
painter.setRenderHint(QPainter.Antialiasing)
h = self.height()
w = self.width()
painter.translate(QPoint(w/2, h/2))
rot = 0
while rot < 360.0:
painter.drawEllipse(-125, -40, 250, 80)
painter.rotate(5.0)
rot += 5.0
def drawShapes(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(QColor("#888888")))
path1 = QPainterPath()
path1.moveTo(5, 5) #sets the start point of path1
path1.cubicTo(40, 5, 50, 50, 99, 99) #makes a bezier curve from the start point to the end point with c1 and c2 as additional points between
path1.cubicTo(5, 99, 50, 50, 5, 5) # these c points are also added in the path
painter.drawPath(path1)
def drawRectangles(self, painter):
for i in range(1, 11):
painter.setOpacity(i*0.1)
painter.fillRect(50*i, 20, 40, 40, Qt.darkGray)
def drawObjects(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
w = self.width()
h = self.height()
rect = QRect(-100, -40, 200, 80)
painter.translate(w/2, h/2)
painter.rotate(self.rotate)
painter.drawRect(rect)
brush = QBrush(QColor(110, 110, 110))
painter.setBrush(brush)
painter.setClipRect(rect)
painter.resetTransform()
painter.drawEllipse(self.pos_x, self.pos_y, self.radius, self.radius)
painter.setBrush(Qt.NoBrush)
painter.setClipping(False)
painter.drawEllipse(self.pos_x, self.pos_y, self.radius, self.radius)
def timerEvent(self, event):
self.step()
self.repaint()
def step(self):
w = self.width()
h = self.height()
if self.pos_x < 0:
self.delta[0] = 1
elif self.pos_x > w - self.radius:
self.delta[0] = -1
if self.pos_y < 0:
self.delta[1] = 1
elif self.pos_y > h - self.radius:
self.delta[1] = -1
self.pos_x += self.delta[0]
self.pos_y += self.delta[1]
self.rotate = self.rotate + 1
class Item(QGraphicsRectItem):
def __init__(self, x, y, w, h):
super().__init__(x, y, w, h)
self.setFlag(QGraphicsItem.ItemIsMovable, True)
self.setCursor(Qt.SizeAllCursor)
self.setBrush(QColor(250, 100, 0))
self.setPen(QPen(Qt.NoPen))
class ViewExample(QGraphicsView):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle("custom item")
self.init()
def init(self, *args, **kwargs):
QGraphicsView.__init__(self, *args, **kwargs)
self.scene = QGraphicsScene()
self.item = Item(0, 0, 100, 100)
self.scene.addItem(self.item)
self.setScene(self.scene)
class Rectangle(QGraphicsRectItem):
def __init__(self, x, y, w, h):
super().__init__(x, y, w, h)
self.setBrush(QColor(250, 50, 0))
self.setPen(QColor(250, 50, 0))
self.setFlag(QGraphicsItem.ItemIsMovable, True)
self.setCursor(Qt.SizeAllCursor)
self.tx = 200
self.ty = 200
def doRotate(self, alfa):
tr = QTransform()
tr.translate(self.tx, self.ty)
tr.rotate(alfa)
tr.translate(-self.tx, -self.ty)
self.setTransform(tr)
class View(QGraphicsView):
def __init__(self):
super(View, self).__init__()
self.setRenderHint(QPainter.Antialiasing)
self.initScene()
def initScene(self):
self.scene = QGraphicsScene()
self.scene2 = QGraphicsScene()
self.setSceneRect(0, 0, 1000, 400)
self.rect = Rectangle(150, 150, 100, 100)
self.scene.addItem(self.rect)
self.setScene(self.scene)
class Example2(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Rotation")
self.setGeometry(150, 150, 1000, 400)
self.initUI()
def initUI(self):
vbox = QVBoxLayout()
self.view = View()
sld = QSlider(Qt.Horizontal, self)
sld.setRange(-180, 180)
sld.valueChanged[int].connect(self.changeValue)
vbox.addWidget(self.view)
vbox.addWidget(sld)
self.setLayout(vbox)
def changeValue(self, value):
self.view.rect.doRotate(value)
TIME = 3000
class Ball(QObject):
def __init__(self):
super().__init__()
self.image = QImage('blah')
self.image = self.image.scaled(300, 300, Qt.KeepAspectRatio)
self.pixmap = QPixmap(self.image)
self.pixmap_item = QGraphicsPixmapItem(self.pixmap)
def _set_pos(self, pos):
self.pixmap_item.setPos(pos)
pos = pyqtProperty(QPointF, fset = _set_pos)
class myView(QGraphicsView):
def __init__(self):
super().__init__()
self.initView()
def initView(self):
self.ball = Ball()
self.ball.pos = QPointF(5, 50)
self.animation = QPropertyAnimation(self.ball, b'pos')
self.animation.setDuration(5000);
self.animation.setStartValue(QPointF(5, 80))
for i in range(20):
self.animation.setKeyValueAt(i/20,
QPointF(i, math.sin(i)*30))
self.animation.setEndValue(QPointF(570, 5))
self.scene = QGraphicsScene(self)
self.scene.setSceneRect(120, -50, 250, 150)
self.scene.addItem(self.ball.pixmap_item)
self.setScene(self.scene)
self.setWindowTitle("Sine wave animation")
self.setRenderHint(QPainter.Antialiasing)
self.setGeometry(300, 300, 700, 200)
self.animation.start()
class view(QGraphicsView):
def __init__(self):
super().__init__()
self.setRenderHint(QPainter.Antialiasing)
class Scene(QGraphicsScene):
def __init__(self):
super().__init__()
self.initScene()
def initScene(self):
for i in range(5):
e = self.addEllipse(20*i, 40*i, 50, 50)
flag1 = QGraphicsItem.ItemIsMovable
flag2 = QGraphicsItem.ItemIsSelectable
e.setFlag(flag1, True)
e.setFlag(flag2, True)
class Ex(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(150, 150, 350, 300)
self.setWindowTitle("Selection")
self.initUI()
def initUI(self):
hbox = QHBoxLayout()
self.view = view()
self.scene = Scene()
self.view.setScene(self.scene)
hbox.addWidget(self.view)
frame = QFrame()
self.delete = QPushButton("Delete", frame)
self.delete.setEnabled(False)
vbox = QVBoxLayout()
vbox.addWidget(self.delete)
vbox.addStretch(1)
frame.setLayout(vbox)
hbox.addWidget(frame)
self.setLayout(hbox)
self.delete.clicked.connect(self.onClick)
self.scene.selectionChanged.connect(self.selChanged)
def onClick(self):
selectedItems = self.scene.selectedItems()
if len(selectedItems) > 0:
for item in selectedItems:
self.scene.removeItem(item)
def selChanged(self):
selectedItems = self.scene.selectedItems()
if len(selectedItems):
self.delete.setEnabled(True)
else:
self.delete.setEnabled(False)
class View2(QGraphicsView):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 300, 300)
self.setRenderHint(QPainter.Antialiasing)
self.init()
def init(self):
self.scene = QGraphicsScene()
r1 = self.scene.addRect(150, 40, 100, 100)
r1.setBrush(QColor(250, 50, 0))
r1.setPen(QColor(250, 50, 0))
e1 = self.scene.addEllipse(40, 70, 80, 80)
e1.setBrush(QColor(0, 50, 250))
e1.setPen(QColor(0, 50, 250))
r2 = self.scene.addRect(60, 180, 150, 70)
r2.setBrush(QColor(0, 250, 50))
r2.setPen(QColor(0, 250, 50))
self.setScene(self.scene)
class Example3(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
vbox = QVBoxLayout()
self.view = View2()
slider = QSlider(Qt.Horizontal, self)
slider.setRange(1, 500)
slider.setValue(100)
slider.valueChanged[int].connect(self.onZoom)
vbox.addWidget(self.view)
vbox.addWidget(slider)
self.setLayout(vbox)
self.setWindowTitle("Zoom")
self.setGeometry(150, 150, 300, 300)
def onZoom(self, value):
val = value / 100
self.view.resetTransform()
self.view.scale(val, val)
class MyGroup(QGraphicsItemGroup):
def __init__(self):
super().__init__()
self.setCursor(Qt.OpenHandCursor)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
def paint(self, painter, option, widget):
painter.setRenderHint(QPainter.Antialiasing)
brush = QBrush(QColor("#333333"))
pen = QPen(brush, 0.5)
pen.setStyle(Qt.DotLine)
painter.setPen(pen)
if self.isSelected():
boundRect = self.boundingRect()
painter.drawRect(boundRect)
class Scene(QGraphicsScene):
def __init__(self):
super().__init__()
self.initScene()
def initScene(self):
self.r1 = self.addRect(20, 50, 120, 50)
self.r1.setFlag(QGraphicsItem.ItemIsMovable)
self.r1.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.r2 = self.addRect(150, 100, 50, 50)
self.r2.setFlag(QGraphicsItem.ItemIsMovable)
self.r2.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.c = self.addEllipse(30, 150, 60, 60)
self.c.setFlag(QGraphicsItem.ItemIsMovable)
self.c.setFlag(QGraphicsItem.ItemIsSelectable, True)
class View3(QGraphicsView):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 300, 300)
policy = Qt.ScrollBarAlwaysOff
self.setVerticalScrollBarPolicy(policy)
self.setHorizontalScrollBarPolicy(policy)
self.setRenderHint(QPainter.Antialiasing)
self.setDragMode(QGraphicsView.RubberBandDrag)
self.init()
def init(self):
self.group = None
self.scene = Scene()
self.setSceneRect(0, 0, 300, 300)
self.setScene(self.scene)
def keyPressEvent(self, e):
key = e.key()
if key == Qt.Key_U:
if self.group != None and self.group.isSelected():
items = self.group.childItems()
self.scene.destroyItemGroup(self.group)
self.group = None
for item in items:
item.setSelected(False)
if key == Qt.Key_G:
if self.group:
return
selectedItems = self.scene.selectedItems()
if len(selectedItems) > 0:
self.group = MyGroup()
for item in selectedItems:
self.group.addToGroup(item)
self.scene.addItem(self.group)
class Example4(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
hbox = QHBoxLayout()
self.view = View3()
hbox.addWidget(self.view)
self.setLayout(hbox)
self.setWindowTitle("Grouping")
self.setGeometry(250, 150, 300, 300)
class View4(QGraphicsView):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 300, 300)
policy = Qt.ScrollBarAlwaysOff
self.setVerticalScrollBarPolicy(policy)
self.setHorizontalScrollBarPolicy(policy)
self.setRenderHint(QPainter.Antialiasing)
self.initView()
def initView(self):
self.scene = self.Scene4()
self.setSceneRect(0, 0, 300, 300)
self.setScene(self.scene)
class Scene4(QGraphicsScene):
def __init__(self):
super().__init__()
self.initScene()
#def initScene(self):
# self.image =
class pixmapItem(QGraphicsPixmapItem):
def __init__(self):
super().__init__()
self.image = QImage("blah")
self.image = self.image.scaled(300, 300, Qt.KeepAspectRatio)
self.pixmap = QPixmap(self.image)
self.initItem()
#def initItem(self):
class attempt(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
hbox = QHBoxLayout()
self.view = View4()
hbox.addWidget(self.view)
self.setGeometry(250, 150, 300, 300)
class rdButton(QGroupBox):
buttonChanged = pyqtSignal(str)
def __init__(self, view):
super(rdButton, self).__init__()
self.view = view
rdButtons = [QRadioButton("Draw"), QRadioButton("Zoom"), QRadioButton("Pan"), QRadioButton("Reciprocal")]
rdButtons[0].setChecked(True)
button_layout = QVBoxLayout()
self._button_group = QButtonGroup()
for i in range(len(rdButtons)):
radB = rdButtons[i]
button_layout.addWidget(radB)
self._button_group.addButton(radB, i)
radB.clicked.connect(self.radio_button_clicked)
self.setLayout(button_layout)
def radio_button_clicked(self):
if self._button_group.checkedId() == 0:
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
if self._button_group.checkedId() == 1:
QApplication.setOverrideCursor(QCursor(Qt.CrossCursor))
if self._button_group.checkedId() == 2:
QApplication.setOverrideCursor(QCursor(Qt.OpenHandCursor))
if self._button_group.checkedId() == 3:
QApplication.setOverrideCursor(QCursor(Qt.PointingHandCursor))
class GraphicsScene2(QGraphicsScene):
def __init__(self, parent=None):
super(GraphicsScene2, self).__init__(parent)
def retrieval(self, scene):
self.scene = scene
def addPath2(self, path, pen=None, brush=None):
pen = QtGui.QPen(QtGui.QColor("green"), 4, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap)
self.item = graphicsPathItem(self, self.scene)
self.item.setPen(pen)
self.item.setPath(path)
self.addItem(self.item)
return(self.item)
class graphicsPathItem(QGraphicsPathItem):
def __init__(self, dscene, scene, parent = None):
super(QGraphicsPathItem, self).__init__(parent)
self.dscene = dscene
self.scene = scene
def mouseDoubleClickEvent(self, e):
pen = QtGui.QPen(QtGui.QColor("black"), 2, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap)
self.scene.addPath(self.shape(), pen)
def keyPressEvent(self, e):
items = self.dscene.selectedItems()
key = e.key()
if key == QtCore.Qt.Key_Delete or key == QtCore.Qt.Key_Backspace: #the Delete button doesnt seem to work here, only the backspace.
for item in items:
self.dscene.removeItem(item)
self.dscene.update()
class GraphicsView(QGraphicsView, photoManager):
rectChanged = pyqtSignal(QRect)
def __init__(self, dView, parent = None):
super(GraphicsView, self).__init__(parent)
self.parent = parent
self.dView = dView
self.button = 0
self.setGeometry(300, 300, 250, 150)
self.setScene(GraphicsScene2(self))
self.dView.setScene(GraphicsScene2(self))
self.dView.scene().retrieval(self.scene())
self.pixmapItem = QGraphicsPixmapItem()
self.dpixmapItem = QGraphicsPixmapItem()
self.scene().addItem(self.pixmapItem)
self.dView.scene().addItem(self.dpixmapItem)
self._empty = True
self._path_item = None
self.rubberBand = QRubberBand(QRubberBand.Rectangle, self)
self.setMouseTracking(True)
| |
East Asian ideograph
0x295825: (0x9CBA, 0), # East Asian ideograph
0x224D73: (0x6FB6, 0), # East Asian ideograph
0x224652: (0x6C0D, 0), # East Asian ideograph
0x234653: (0x93F1, 0), # East Asian ideograph
0x225851: (0x739E, 0), # East Asian ideograph
0x6F4E40: (0xB5D1, 0), # Korean hangul
0x213868: (0x58D5, 0), # East Asian ideograph
0x69253A: (0x30BA, 0), # Katakana letter ZU
0x274655: (0x6C14, 0), # East Asian ideograph
0x234656: (0x93DE, 0), # East Asian ideograph
0x234657: (0x93EE, 0), # East Asian ideograph
0x234D30: (0x976E, 0), # East Asian ideograph
0x274658: (0x6C22, 0), # East Asian ideograph
0x27384A: (0x573A, 0), # East Asian ideograph
0x223244: (0x63E5, 0), # East Asian ideograph
0x224659: (0x6C15, 0), # East Asian ideograph
0x51563F: (0x8616, 0), # East Asian ideograph
0x23465A: (0x93C7, 0), # East Asian ideograph
0x23465B: (0x93F2, 0), # East Asian ideograph
0x232625: (0x8580, 0), # East Asian ideograph
0x222626: (0x5DA7, 0), # East Asian ideograph
0x232628: (0x858F, 0), # East Asian ideograph
0x22465C: (0x6C1A, 0), # East Asian ideograph
0x22262A: (0x5DB0, 0), # East Asian ideograph
0x23262D: (0x8579, 0), # East Asian ideograph
0x22262E: (0x5DB4, 0), # East Asian ideograph
0x23465D: (0x93D4, 0), # East Asian ideograph
0x222630: (0x5DB6, 0), # East Asian ideograph
0x232632: (0x857F, 0), # East Asian ideograph
0x232633: (0x8577, 0), # East Asian ideograph
0x232634: (0x8578, 0), # East Asian ideograph
0x22465E: (0x6C1D, 0), # East Asian ideograph
0x222636: (0x5DB7, 0), # East Asian ideograph
0x6F4C37: (0xB18B, 0), # Korean hangul
0x2D6132: (0x99EE, 0), # East Asian ideograph
0x23263D: (0x85A4, 0), # East Asian ideograph
0x22263E: (0x5DC3, 0), # East Asian ideograph
0x224660: (0x6C20, 0), # East Asian ideograph
0x232642: (0x857A, 0), # East Asian ideograph
0x222644: (0x5DC7, 0), # East Asian ideograph
0x232645: (0x8557, 0), # East Asian ideograph
0x222646: (0x5DC9, 0), # East Asian ideograph
0x222647: (0x5DCB, 0), # East Asian ideograph
0x232649: (0x85A8, 0), # East Asian ideograph
0x213D4F: (0x5F59, 0), # East Asian ideograph
0x234D32: (0x9778, 0), # East Asian ideograph
0x224662: (0x6C21, 0), # East Asian ideograph
0x22264E: (0x5DD8, 0), # East Asian ideograph
0x232650: (0x8599, 0), # East Asian ideograph
0x232651: (0x858A, 0), # East Asian ideograph
0x222652: (0x5DDC, 0), # East Asian ideograph
0x234663: (0x93CA, 0), # East Asian ideograph
0x232654: (0x8590, 0), # East Asian ideograph
0x232656: (0x8585, 0), # East Asian ideograph
0x232657: (0x8588, 0), # East Asian ideograph
0x225A40: (0x7447, 0), # East Asian ideograph
0x224664: (0x6C2A, 0), # East Asian ideograph
0x23265A: (0x85B8, 0), # East Asian ideograph
0x6F5749: (0xC870, 0), # Korean hangul
0x23265D: (0x85C1, 0), # East Asian ideograph
0x334665: (0x6C61, 0), # East Asian ideograph
0x232661: (0x85BA, 0), # East Asian ideograph
0x222662: (0x5E00, 0), # East Asian ideograph
0x222664: (0x51E7, 0), # East Asian ideograph
0x234666: (0x93E8, 0), # East Asian ideograph
0x224934: (0x6D79, 0), # East Asian ideograph
0x232668: (0x85CE, 0), # East Asian ideograph
0x23266A: (0x85C2, 0), # East Asian ideograph
0x23266B: (0x85B7, 0), # East Asian ideograph
0x23266C: (0x85B9, 0), # East Asian ideograph
0x23266E: (0x85B3, 0), # East Asian ideograph
0x23266F: (0x85BD, 0), # East Asian ideograph
0x232670: (0x85C4, 0), # East Asian ideograph
0x224668: (0x6C2C, 0), # East Asian ideograph
0x222672: (0x5E14, 0), # East Asian ideograph
0x222673: (0x5E17, 0), # East Asian ideograph
0x232675: (0x85BE, 0), # East Asian ideograph
0x222676: (0x5E19, 0), # East Asian ideograph
0x224669: (0x6C31, 0), # East Asian ideograph (not in Unicode)
0x222678: (0x5E1F, 0), # East Asian ideograph
0x22267A: (0x5E23, 0), # East Asian ideograph
0x22267B: (0x5E21, 0), # East Asian ideograph
0x23267E: (0x85B6, 0), # East Asian ideograph
0x295421: (0x9AA3, 0), # East Asian ideograph
0x2D4647: (0x6BD8, 0), # East Asian ideograph
0x284359: (0x6989, 0), # East Asian ideograph
0x2D466D: (0x51B3, 0), # East Asian ideograph
0x294758: (0x9561, 0), # East Asian ideograph
0x69253F: (0x30BF, 0), # Katakana letter TA
0x227C5B: (0x82D0, 0), # East Asian ideograph
0x28723C: (0x7F08, 0), # East Asian ideograph
0x224670: (0x6C3B, 0), # East Asian ideograph
0x295422: (0x9A81, 0), # East Asian ideograph
0x234D35: (0x9773, 0), # East Asian ideograph
0x276174: (0x9C7C, 0), # East Asian ideograph
0x234672: (0x93DA, 0), # East Asian ideograph
0x234673: (0x93D0, 0), # East Asian ideograph
0x335E42: (0x9452, 0), # East Asian ideograph
0x2D353C: (0x6B62, 0), # East Asian ideograph
0x234674: (0x93EF, 0), # East Asian ideograph
0x6F4E37: (0xB5B3, 0), # Korean hangul
0x4B4676: (0x6C89, 0), # East Asian ideograph
0x213121: (0x4F11, 0), # East Asian ideograph
0x276136: (0x9A77, 0), # East Asian ideograph
0x21386F: (0x58E2, 0), # East Asian ideograph
0x223C6E: (0x68B2, 0), # East Asian ideograph
0x6F2472: (0x314F, 0), # Korean hangul
0x224678: (0x6C46, 0), # East Asian ideograph
0x6F5078: (0xBC29, 0), # Korean hangul
0x28723E: (0x7F0C, 0), # East Asian ideograph
0x29364E: (0x8D33, 0), # East Asian ideograph
0x22467A: (0x6C52, 0), # East Asian ideograph
0x213125: (0x4F01, 0), # East Asian ideograph
0x234D37: (0x9783, 0), # East Asian ideograph
0x215F69: (0x9739, 0), # East Asian ideograph
0x276176: (0x9C81, 0), # East Asian ideograph
0x6F4E48: (0xB69D, 0), # Korean hangul
0x23467C: (0x93CC, 0), # East Asian ideograph
0x6F574A: (0xC871, 0), # Korean hangul
0x224D7C: (0x6FC6, 0), # East Asian ideograph
0x23517B: (0x9954, 0), # East Asian ideograph
0x21312A: (0x4F4F, 0), # East Asian ideograph
0x234D38: (0x977A, 0), # East Asian ideograph
0x213C76: (0x5EAB, 0), # East Asian ideograph
0x21312B: (0x4F4D, 0), # East Asian ideograph
0x6F4E49: (0xB6A4, 0), # Korean hangul
0x213871: (0x58E9, 0), # East Asian ideograph
0x21312C: (0x4F34, 0), # East Asian ideograph
0x6F594C: (0xCD18, 0), # Korean hangul
0x21342E: (0x52C3, 0), # East Asian ideograph
0x21312D: (0x4F47, 0), # East Asian ideograph
0x2D5758: (0x890E, 0), # East Asian ideograph
0x21312F: (0x4F3A, 0), # East Asian ideograph
0x275B3F: (0x8F7D, 0), # East Asian ideograph
0x6F4F3D: (0xB86D, 0), # Korean hangul
0x28704A: (0x7EBE, 0), # East Asian ideograph
0x222722: (0x5E22, 0), # East Asian ideograph
0x286577: (0x789C, 0), # East Asian ideograph
0x222724: (0x5E28, 0), # East Asian ideograph
0x213872: (0x58EB, 0), # East Asian ideograph
0x232728: (0x85F7, 0), # East Asian ideograph
0x6F5424: (0xC2DD, 0), # Korean hangul
0x23272C: (0x85E6, 0), # East Asian ideograph
0x223132: (0x6360, 0), # East Asian ideograph
0x23272E: (0x85D4, 0), # East Asian ideograph
0x232731: (0x85ED, 0), # East Asian ideograph
0x6F5D42: (0xD65C, 0), # Korean hangul
0x222735: (0x5E44, 0), # East Asian ideograph
0x222736: (0x5E43, 0), # East Asian ideograph
0x222739: (0x5E42, 0), # East Asian ideograph
0x22273F: (0x5E4E, 0), # East Asian ideograph
0x6F4E4B: (0xB6AC, 0), # Korean hangul
0x232743: (0x85DF, 0), # East Asian ideograph
0x232745: (0x85D8, 0), # East Asian ideograph
0x692545: (0x30C5, 0), # Katakana letter DU
0x222747: (0x5E58, 0), # East Asian ideograph
0x222748: (0x5E48, 0), # East Asian ideograph
0x513B52: (0x6C3D, 0), # East Asian ideograph
0x213137: (0x4F3D, 0), # East Asian ideograph
0x23274C: (0x85DC, 0), # East Asian ideograph
0x23274E: (0x85F5, 0), # East Asian ideograph
0x273138: (0x5E03, 0), # East Asian ideograph
0x232752: (0x8622, 0), # East Asian ideograph
0x232754: (0x8610, 0), # East Asian ideograph
0x285029: (0x6EDF, 0), # East Asian ideograph
0x232757: (0x85FC, 0), # East Asian ideograph
0x222758: (0x5E61, 0), # East Asian ideograph
0x23275B: (0x85FF, 0), # East Asian ideograph
0x23313A: (0x89D6, 0), # East Asian ideograph
0x23275E: (0x85FE, 0), # East Asian ideograph
0x22275F: (0x5E6C, 0), # East Asian ideograph
0x222760: (0x5E6A, 0), # East Asian ideograph
0x222763: (0x5E6E, 0), # East Asian ideograph
0x222764: (0x5E6D, 0), # East Asian ideograph
0x222765: (0x5E70, 0), # East Asian ideograph
0x232768: (0x8604, 0), # East Asian ideograph
0x27313C: (0x5360, 0), # East Asian ideograph
0x227C6E: (0x8314, 0), # East Asian ideograph
0x22276D: (0x5E75, 0), # East Asian ideograph
0x232771: (0x8605, 0), # East Asian ideograph
0x216757: (0x50A3, 0), # East Asian ideograph
0x232775: (0x862B, 0), # East Asian ideograph
0x213D51: (0x5F62, 0), # East Asian ideograph
0x222777: (0x5E80, 0), # East Asian ideograph
| |
size for first batch
returned.
produce_cursors (bool): Whether to generate cursors from query.
start_cursor: Starting point for search.
end_cursor: Endpoint point for search.
timeout (Optional[int]): Override the gRPC timeout, in seconds.
deadline (Optional[int]): DEPRECATED: Synonym for ``timeout``.
read_consistency: If not in a transaction, defaults to
``ndb.EVENTUAL`` for potentially faster query results without
having to wait for Datastore to apply pending changes to all
returned records. Otherwise consistency with current
transaction is maintained.
read_policy: DEPRECATED: Synonym for ``read_consistency``.
transaction (bytes): Transaction ID to use for query. Results will
be consistent with Datastore state for that transaction.
Implies ``read_policy=ndb.STRONG``.
options (QueryOptions): DEPRECATED: An object containing options
values for some of these arguments.
Returns:
:class:`QueryIterator`: An iterator.
"""
return _datastore_query.iterate(_options)
__iter__ = iter
def map(
self,
callback,
*,
pass_batch_into_callback=None,
merge_future=None,
keys_only=None,
limit=None,
projection=None,
offset=None,
batch_size=None,
prefetch_size=None,
produce_cursors=False,
start_cursor=None,
end_cursor=None,
timeout=None,
deadline=None,
read_consistency=None,
read_policy=None,
transaction=None,
options=None,
):
"""Map a callback function or tasklet over the query results.
DEPRECATED: This method is no longer supported.
Args:
callback (Callable): A function or tasklet to be applied to each
result; see below.
merge_future: Optional ``Future`` subclass; see below.
keys_only (bool): Return keys instead of entities.
projection (list[str]): The fields to return as part of the query
results.
offset (int): Number of query results to skip.
limit (Optional[int]): Maximum number of query results to return.
If not specified, there is no limit.
batch_size (Optional[int]): Number of results to fetch in a single
RPC call. Affects efficiency of queries only. Larger batch
sizes use more memory but make fewer RPC calls.
prefetch_size (Optional[int]): Overrides batch size for first batch
returned.
produce_cursors (bool): Whether to generate cursors from query.
start_cursor: Starting point for search.
end_cursor: Endpoint point for search.
timeout (Optional[int]): Override the gRPC timeout, in seconds.
deadline (Optional[int]): DEPRECATED: Synonym for ``timeout``.
read_consistency: If not in a transaction, defaults to
``ndb.EVENTUAL`` for potentially faster query results without
having to wait for Datastore to apply pending changes to all
returned records. Otherwise consistency with current
transaction is maintained.
read_policy: DEPRECATED: Synonym for ``read_consistency``.
transaction (bytes): Transaction ID to use for query. Results will
be consistent with Datastore state for that transaction.
Implies ``read_policy=ndb.STRONG``.
options (QueryOptions): DEPRECATED: An object containing options
values for some of these arguments.
Callback signature: The callback is normally called with an entity
as argument. However if keys_only=True is given, it is called
with a Key. Also, when pass_batch_into_callback is True, it is
called with three arguments: the current batch, the index within
the batch, and the entity or Key at that index. The callback can
return whatever it wants. If the callback is None, a trivial
callback is assumed that just returns the entity or key passed in
(ignoring produce_cursors).
Optional merge future: The merge_future is an advanced argument
that can be used to override how the callback results are combined
into the overall map() return value. By default a list of
callback return values is produced. By substituting one of a
small number of specialized alternatives you can arrange
otherwise. See tasklets.MultiFuture for the default
implementation and a description of the protocol the merge_future
object must implement the default. Alternatives from the same
module include QueueFuture, SerialQueueFuture and ReducingFuture.
Returns:
Any: When the query has run to completion and all callbacks have
returned, map() returns a list of the results of all callbacks.
(But see 'optional merge future' above.)
"""
raise exceptions.NoLongerImplementedError()
def map_async(
self,
callback,
*,
pass_batch_into_callback=None,
merge_future=None,
keys_only=None,
limit=None,
projection=None,
offset=None,
batch_size=None,
prefetch_size=None,
produce_cursors=False,
start_cursor=None,
end_cursor=None,
timeout=None,
deadline=None,
read_consistency=None,
read_policy=None,
transaction=None,
options=None,
):
"""Map a callback function or tasklet over the query results.
DEPRECATED: This method is no longer supported.
This is the asynchronous version of :meth:`Query.map`.
Returns:
tasklets.Future: See :meth:`Query.map` for eventual result.
"""
raise exceptions.NoLongerImplementedError()
@_query_options
def get(
self,
*,
keys_only=None,
projection=None,
batch_size=None,
prefetch_size=None,
produce_cursors=False,
start_cursor=None,
end_cursor=None,
timeout=None,
deadline=None,
read_consistency=None,
read_policy=None,
transaction=None,
options=None,
_options=None,
):
"""Get the first query result, if any.
This is equivalent to calling ``q.fetch(1)`` and returning the first
result, if any.
Args:
keys_only (bool): Return keys instead of entities.
projection (list[str]): The fields to return as part of the query
results.
batch_size (Optional[int]): Number of results to fetch in a single
RPC call. Affects efficiency of queries only. Larger batch
sizes use more memory but make fewer RPC calls.
prefetch_size (Optional[int]): Overrides batch size for first batch
returned.
produce_cursors (bool): Whether to generate cursors from query.
start_cursor: Starting point for search.
end_cursor: Endpoint point for search.
timeout (Optional[int]): Override the gRPC timeout, in seconds.
deadline (Optional[int]): DEPRECATED: Synonym for ``timeout``.
read_consistency: If not in a transaction, defaults to
``ndb.EVENTUAL`` for potentially faster query results without
having to wait for Datastore to apply pending changes to all
returned records. Otherwise consistency with current
transaction is maintained.
read_policy: DEPRECATED: Synonym for ``read_consistency``.
transaction (bytes): Transaction ID to use for query. Results will
be consistent with Datastore state for that transaction.
Implies ``read_policy=ndb.STRONG``.
options (QueryOptions): DEPRECATED: An object containing options
values for some of these arguments.
Returns:
Optional[Union[google.cloud.datastore.entity.Entity, key.Key]]:
A single result, or :data:`None` if there are no results.
"""
return self.get_async(_options=_options).result()
@tasklets.tasklet
@_query_options
def get_async(
self,
*,
keys_only=None,
projection=None,
offset=None,
batch_size=None,
prefetch_size=None,
produce_cursors=False,
start_cursor=None,
end_cursor=None,
timeout=None,
deadline=None,
read_consistency=None,
read_policy=None,
transaction=None,
options=None,
_options=None,
):
"""Get the first query result, if any.
This is the asynchronous version of :meth:`Query.get`.
Returns:
tasklets.Future: See :meth:`Query.get` for eventual result.
"""
options = _options.copy(limit=1)
results = yield _datastore_query.fetch(options)
if results:
raise tasklets.Return(results[0])
@_query_options
def count(
self,
limit=None,
*,
offset=None,
batch_size=None,
prefetch_size=None,
produce_cursors=False,
start_cursor=None,
end_cursor=None,
timeout=None,
deadline=None,
read_consistency=None,
read_policy=None,
transaction=None,
options=None,
_options=None,
):
"""Count the number of query results, up to a limit.
This returns the same result as ``len(q.fetch(limit))``.
Note that you should pass a maximum value to limit the amount of
work done by the query.
Note:
The legacy GAE version of NDB claims this is more efficient than
just calling ``len(q.fetch(limit))``. Since Datastore does not
provide API for ``count``, this version ends up performing the
fetch underneath hood. We can specify ``keys_only`` to save some
network traffic, making this call really equivalent to
``len(q.fetch(limit, keys_only=True))``. We can also avoid
marshalling NDB key objects from the returned protocol buffers, but
this is a minor savings--most applications that use NDB will have
their performance bound by the Datastore backend, not the CPU.
Generally, any claim of performance improvement using this versus
the equivalent call to ``fetch`` is exaggerated, at best.
Args:
limit (Optional[int]): Maximum number of query results to return.
If not specified, there is no limit.
projection (list[str]): The fields to return as part of the query
results.
offset (int): Number of query results to skip.
batch_size (Optional[int]): Number of results to fetch in a single
RPC call. Affects efficiency of queries only. Larger batch
sizes use more memory but make fewer RPC calls.
prefetch_size (Optional[int]): Overrides batch size for first batch
returned.
produce_cursors (bool): Whether to generate cursors from query.
start_cursor: Starting point for search.
end_cursor: Endpoint point for search.
timeout (Optional[int]): Override the gRPC timeout, in seconds.
deadline (Optional[int]): DEPRECATED: Synonym for ``timeout``.
read_consistency: If not in a transaction, defaults to
``ndb.EVENTUAL`` for potentially faster query results without
having to wait for Datastore to apply pending changes to all
returned records. Otherwise consistency with current
transaction is maintained.
read_policy: DEPRECATED: Synonym for ``read_consistency``.
transaction (bytes): Transaction ID to use for query. Results will
be consistent with Datastore state for that transaction.
Implies ``read_policy=ndb.STRONG``.
options (QueryOptions): DEPRECATED: An object containing options
values for some of these arguments.
Returns:
Optional[Union[google.cloud.datastore.entity.Entity, key.Key]]:
A single result, or :data:`None` if there are no results.
"""
return self.count_async(_options=_options).result()
@tasklets.tasklet
@_query_options
def count_async(
self,
limit=None,
*,
offset=None,
batch_size=None,
prefetch_size=None,
produce_cursors=False,
start_cursor=None,
end_cursor=None,
timeout=None,
deadline=None,
read_consistency=None,
read_policy=None,
transaction=None,
options=None,
_options=None,
):
"""Count the number of query results, up to a limit.
This is the asynchronous version of :meth:`Query.count`.
Returns:
tasklets.Future: | |
% key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_items_by_id_download`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}/Download', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_items_by_id_file(self, id, **kwargs): # noqa: E501
"""Gets the original file of an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_file(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_items_by_id_file_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_items_by_id_file_with_http_info(id, **kwargs) # noqa: E501
return data
def get_items_by_id_file_with_http_info(self, id, **kwargs): # noqa: E501
"""Gets the original file of an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_file_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_items_by_id_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_items_by_id_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}/File', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_items_by_id_similar(self, id, **kwargs): # noqa: E501
"""Gets similar items # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_similar(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str include_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param bool enable_images: Optional, include image information in output
:param bool enable_user_data: Optional, include user data
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param str user_id: Optional. Filter by user id, and attach user data
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_items_by_id_similar_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_items_by_id_similar_with_http_info(id, **kwargs) # noqa: E501
return data
def get_items_by_id_similar_with_http_info(self, id, **kwargs): # noqa: E501
"""Gets similar items # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_similar_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str include_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param bool enable_images: Optional, include image information in output
:param bool enable_user_data: Optional, include user data
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param str user_id: Optional. Filter by user id, and attach user data
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'include_item_types', 'enable_images', 'enable_user_data', 'image_type_limit', 'enable_image_types', 'user_id', 'limit', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_items_by_id_similar" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_items_by_id_similar`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'include_item_types' in params:
query_params.append(('IncludeItemTypes', params['include_item_types'])) # noqa: E501
if 'enable_images' in params:
query_params.append(('EnableImages', params['enable_images'])) # noqa: E501
if 'enable_user_data' in params:
query_params.append(('EnableUserData', params['enable_user_data'])) # noqa: E501
if 'image_type_limit' in params:
query_params.append(('ImageTypeLimit', params['image_type_limit'])) # noqa: E501
if 'enable_image_types' in params:
query_params.append(('EnableImageTypes', params['enable_image_types'])) # noqa: E501
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'limit' in params:
query_params.append(('Limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('Fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}/Similar', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultBaseItemDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_items_by_id_thememedia(self, id, **kwargs): # noqa: E501
"""Gets theme videos and songs for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_thememedia(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str user_id: Optional. Filter by user id, and attach user data
:param bool inherit_from_parent: Determines whether or not parent items should be searched for theme media.
:return: AllThemeMediaResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_items_by_id_thememedia_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_items_by_id_thememedia_with_http_info(id, **kwargs) # noqa: E501
return data
def get_items_by_id_thememedia_with_http_info(self, id, **kwargs): # noqa: E501
"""Gets theme videos and songs for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_thememedia_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str user_id: Optional. Filter by user id, and attach user data
:param bool inherit_from_parent: Determines whether or not parent items should be searched for theme media.
:return: AllThemeMediaResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'user_id', 'inherit_from_parent'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got | |
to do.
"""
for act in self._cleanup_actions():
return True
return False
def _cleanup_actions(self):
"""Iterator giving (func,args,kwds) tuples of cleanup actions.
This encapsulates the logic of the "cleanup" method without actually
performing any of the actions, making it easy to check whether cleanup
is required without duplicating the logic.
"""
appdir = self.appdir
vsdir = self._get_versions_dir()
best_version = get_best_version(vsdir)
new_version = get_best_version(vsdir, include_partial_installs=True)
# If there's a partial install we must complete it, since it
# could have left exes in the bootstrap env and we don't want
# to accidentally delete their dependencies.
if best_version != new_version:
(_, v, _) = split_app_version(new_version)
yield (self.install_version, (v,))
best_version = new_version
# TODO: remove compatability hooks for ESKY_APPDATA_DIR=""
if vsdir == appdir and ESKY_APPDATA_DIR:
appdatadir = os.path.join(appdir, ESKY_APPDATA_DIR)
if os.path.isdir(appdatadir) and os.listdir(appdatadir):
new_version = get_best_version(appdatadir,
include_partial_installs=True)
if best_version != new_version:
(_, v, _) = split_app_version(new_version)
yield (self.install_version, (v,))
best_version = new_version
# Now we can safely remove all the old versions.
# We except the currently-executing version, and silently
# ignore any locked versions.
manifest = self._version_manifest(best_version)
manifest.add("updates")
manifest.add("locked")
manifest.add(best_version)
if self.active_version:
if self.active_version != split_app_version(best_version)[1]:
yield lambda: False
manifest.add(self.active_version)
# TODO: remove compatability hooks for ESKY_APPDATA_DIR=""
for tdir in (appdir, vsdir):
for nm in os.listdir(tdir):
if nm not in manifest:
fullnm = os.path.join(tdir, nm)
if ".old." in nm or nm.endswith(".old"):
# It's a temporary backup file; remove it.
yield (self._try_remove, (tdir, nm, manifest,))
elif not os.path.isdir(fullnm):
# It's an unaccounted-for file in the bootstrap env.
# Leave it alone.
pass
elif is_version_dir(fullnm):
# It's an installed-but-obsolete version. Properly
# uninstall it so it will clean up the bootstrap env.
(_, v, _) = split_app_version(nm)
try:
yield (self.uninstall_version, (v,))
except VersionLockedError:
yield lambda: False
else:
yield (self._try_remove, (tdir, nm, manifest,))
elif is_uninstalled_version_dir(fullnm):
# It's a partially-removed version; finish removing it.
yield (self._try_remove, (tdir, nm, manifest,))
else:
for (_, _, filenms) in os.walk(fullnm):
if filenms:
# It contains unaccounted-for files in the
# bootstrap env. Can't prove it's safe to
# remove, so leave it alone.
break
else:
# It's an empty directory structure, remove it.
yield (self._try_remove, (tdir, nm, manifest,))
# If there are pending overwrites, try to do them.
ovrdir = os.path.join(vsdir, best_version, ESKY_CONTROL_DIR,
"overwrite")
if os.path.exists(ovrdir):
try:
for (dirnm, _, filenms) in os.walk(ovrdir, topdown=False):
for nm in filenms:
ovrsrc = os.path.join(dirnm, nm)
ovrdst = os.path.join(appdir, ovrsrc[len(ovrdir)+1:])
yield (self._overwrite, (ovrsrc, ovrdst,))
yield (os.unlink, (ovrsrc,))
yield (os.rmdir, (dirnm,))
except EnvironmentError:
yield lambda: False
# Get the VersionFinder to clean up after itself
if self.version_finder is not None:
if self.version_finder.needs_cleanup(self):
yield (self.version_finder.cleanup, (self,))
def _overwrite(self, src, dst):
"""Directly overwrite file 'dst' with the contents of file 'src'."""
with open(src, "rb") as fIn:
with open(dst, "ab") as fOut:
fOut.seek(0)
chunk = fIn.read(512*16)
while chunk:
fOut.write(chunk)
chunk = fIn.read(512*16)
@allow_from_sudo()
def cleanup_at_exit(self):
"""Arrange for cleanup to occur after application exit.
This operates by using the atexit module to spawn a new instance of
this app, with appropriate flags that cause it to launch directly into
the cleanup process.
Recall that sys.executable points to a specific version dir, so this
new process will not hold any filesystem locks in the main app dir.
"""
if self.sudo_proxy is not None:
self.keep_sudo_proxy_alive = True
return self.sudo_proxy.cleanup_at_exit()
if not getattr(sys, "frozen", False):
exe = [sys.executable, "-c",
"import esky; esky.run_startup_hooks()",
"--esky-spawn-cleanup"]
else:
exe = sys.executable
# Try to re-launch the best available version, so that
# the currently in-use version can be cleaned up.
if self.active_version is not None:
vsdir = self._get_versions_dir()
bestver = get_best_version(vsdir,
include_partial_installs=True)
if bestver is not None:
(_, version, _) = split_app_version(bestver)
if self.active_version != version:
if self.active_version in exe:
exe = exe.replace(self.active_version, version)
if not os.path.isfile(exe):
exe = sys.executable
if os.path.basename(exe).lower() in ("python", "pythonw"):
exe = [exe, "-c", "import esky; esky.run_startup_hooks()",
"--esky-spawn-cleanup"]
else:
if not _startup_hooks_were_run:
raise OSError(None,
"unable to cleanup: startup hooks not run")
exe = [exe, "--esky-spawn-cleanup"]
appdata = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
exe = exe + [base64.b64encode(appdata).decode("ascii")]
@atexit.register
def spawn_cleanup():
rnul = open(os.devnull, "r")
wnul = open(os.devnull, "w")
if sys.platform == "win32":
if sys.hexversion >= 0x02060000:
kwds = dict(close_fds=True)
else:
kwds = {}
else:
kwds = dict(stdin=rnul, stdout=wnul, stderr=wnul,
close_fds=True)
subprocess.Popen(exe, **kwds)
def _try_remove(self, tdir, path, manifest=[]):
"""Try to remove the file/directory at given path in the target dir.
This method attempts to remove the file or directory at the given path,
but will fail silently under a number of conditions:
* if a file is locked or permission is denied
* if a directory cannot be emptied of all contents
* if the path appears on sys.path
* if the path appears in the given manifest
"""
fullpath = os.path.join(tdir, path)
if fullpath in sys.path:
return False
if path in manifest:
return False
try:
if os.path.isdir(fullpath):
# Remove paths starting with "esky" last, since we use
# these to maintain state information.
esky_paths = []
success = True
for nm in os.listdir(fullpath):
if nm == "esky" or nm.startswith("esky-"):
esky_paths.append(nm)
else:
subdir = os.path.join(path, nm)
success &= self._try_remove(tdir, subdir, manifest)
if not success:
return False
for nm in sorted(esky_paths):
self._try_remove(tdir, os.path.join(path, nm), manifest)
os.rmdir(fullpath)
else:
os.unlink(fullpath)
except EnvironmentError, e:
if e.errno not in self._errors_to_ignore:
raise
return False
else:
return True
_errors_to_ignore = (errno.ENOENT, errno.EPERM, errno.EACCES,
errno.ENOTDIR, errno.EISDIR, errno.EINVAL,
errno.ENOTEMPTY,)
def auto_update(self, callback=None):
"""Automatically install the latest version of the app.
This method automatically performs the following sequence of actions,
escalating to root privileges if a permission error is encountered:
* find the latest version [self.find_update()]
* fetch the new version [self.fetch_version()]
* install the new version [self.install_version()]
* attempt to uninstall the old version [self.uninstall_version()]
* reinitialize internal state [self.reinitialize()]
* clean up the appdir [self.cleanup()]
This method is mostly here to help you get started. For an app of
any serious complexity, you will probably want to build your own
variant that e.g. operates in a background thread, prompts the user
for confirmation, etc.
"""
if self.version_finder is None:
raise NoVersionFinderError
if callback is None:
callback = lambda *args: True
got_root = False
cleaned = False
try:
callback({"status": "searching"})
version = self.find_update()
if version is not None:
callback({"status": "found", "new_version": version})
# Try to install the new version. If it fails with
# a permission error, escalate to root and try again.
try:
self._do_auto_update(version, callback)
except EnvironmentError:
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_value.errno != errno.EACCES or self.has_root():
raise
try:
self.get_root()
except Exception, e:
raise exc_type, exc_value, exc_traceback
else:
got_root = True
self._do_auto_update(version, callback)
self.reinitialize()
# Try to clean up the app dir. If it fails with a
# permission error, escalate to root and try again.
try:
callback({"status": "cleaning up"})
cleaned = self.cleanup()
except EnvironmentError:
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_value.errno != errno.EACCES or self.has_root():
raise
try:
self.get_root()
except Exception, e:
raise exc_type, exc_value, exc_traceback
else:
got_root = True
callback({"status": "cleaning up"})
cleaned = self.cleanup()
except Exception, e:
callback({"status": "error", "exception": e})
raise
else:
callback({"status": "done"})
finally:
# Drop root privileges as soon as possible.
if not cleaned and self.needs_cleanup():
self.cleanup_at_exit()
if got_root:
self.drop_root()
def _do_auto_update(self, version, callback):
"""Actual sequence of operations for auto-update.
This is a separate method so it can easily be retried after gaining
root privileges.
"""
self.fetch_version(version, callback)
callback({"status": "installing", "new_version": version})
self.install_version(version)
try:
self.uninstall_version(self.version)
except VersionLockedError:
pass
def find_update(self):
"""Check for an available update to this app.
This method returns either None, or a string giving the version of
the newest available update.
"""
if self.version_finder is None:
raise NoVersionFinderError
best_version = None
best_version_p = parse_version(self.version)
for version in self.version_finder.find_versions(self):
version_p = parse_version(version)
if version_p > best_version_p:
best_version_p = version_p
best_version = version
return best_version
def fetch_version(self, version, callback=None):
"""Fetch the specified updated version of the app."""
if self.sudo_proxy is not None:
for status in self.sudo_proxy.fetch_version_iter(version):
if callback is not None:
callback(status)
return self.version_finder.has_version(self, version)
if self.version_finder is None:
raise NoVersionFinderError
# Guard against malicious input (might | |
{},
"ccapAppTypeHisPSTNOutCallHandedOut": {},
"ccapAppTypeHisPSTNOutCallInHandoff": {},
"ccapAppTypeHisPSTNOutCallInHandoffRet": {},
"ccapAppTypeHisPSTNOutCallSetupReq": {},
"ccapAppTypeHisPSTNOutCallTotConn": {},
"ccapAppTypeHisPlaceCallAttempts": {},
"ccapAppTypeHisPlaceCallFailure": {},
"ccapAppTypeHisPlaceCallSuccess": {},
"ccapAppTypeHisPromptPlayAttempts": {},
"ccapAppTypeHisPromptPlayDuration": {},
"ccapAppTypeHisPromptPlayFailed": {},
"ccapAppTypeHisPromptPlaySuccess": {},
"ccapAppTypeHisRecordingAttempts": {},
"ccapAppTypeHisRecordingDuration": {},
"ccapAppTypeHisRecordingFailed": {},
"ccapAppTypeHisRecordingSuccess": {},
"ccapAppTypeHisTTSAttempts": {},
"ccapAppTypeHisTTSFailed": {},
"ccapAppTypeHisTTSSuccess": {},
"ccarConfigAccIdx": {},
"ccarConfigConformAction": {},
"ccarConfigExceedAction": {},
"ccarConfigExtLimit": {},
"ccarConfigLimit": {},
"ccarConfigRate": {},
"ccarConfigType": {},
"ccarStatCurBurst": {},
"ccarStatFilteredBytes": {},
"ccarStatFilteredBytesOverflow": {},
"ccarStatFilteredPkts": {},
"ccarStatFilteredPktsOverflow": {},
"ccarStatHCFilteredBytes": {},
"ccarStatHCFilteredPkts": {},
"ccarStatHCSwitchedBytes": {},
"ccarStatHCSwitchedPkts": {},
"ccarStatSwitchedBytes": {},
"ccarStatSwitchedBytesOverflow": {},
"ccarStatSwitchedPkts": {},
"ccarStatSwitchedPktsOverflow": {},
"ccbptPolicyIdNext": {},
"ccbptTargetTable.1.10": {},
"ccbptTargetTable.1.6": {},
"ccbptTargetTable.1.7": {},
"ccbptTargetTable.1.8": {},
"ccbptTargetTable.1.9": {},
"ccbptTargetTableLastChange": {},
"cciDescriptionEntry": {"1": {}, "2": {}},
"ccmCLICfgRunConfNotifEnable": {},
"ccmCLIHistoryCmdEntries": {},
"ccmCLIHistoryCmdEntriesAllowed": {},
"ccmCLIHistoryCommand": {},
"ccmCLIHistoryMaxCmdEntries": {},
"ccmCTID": {},
"ccmCTIDLastChangeTime": {},
"ccmCTIDRolledOverNotifEnable": {},
"ccmCTIDWhoChanged": {},
"ccmCallHomeAlertGroupCfg": {"3": {}, "5": {}},
"ccmCallHomeConfiguration": {
"1": {},
"10": {},
"11": {},
"13": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"23": {},
"24": {},
"27": {},
"28": {},
"29": {},
"3": {},
"34": {},
"35": {},
"36": {},
"37": {},
"38": {},
"39": {},
"4": {},
"40": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ccmCallHomeDiagSignature": {"2": {}, "3": {}},
"ccmCallHomeDiagSignatureInfoEntry": {
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"ccmCallHomeMessageSource": {"1": {}, "2": {}, "3": {}},
"ccmCallHomeNotifConfig": {"1": {}},
"ccmCallHomeReporting": {"1": {}},
"ccmCallHomeSecurity": {"1": {}},
"ccmCallHomeStats": {"1": {}, "2": {}, "3": {}, "4": {}},
"ccmCallHomeStatus": {"1": {}, "2": {}, "3": {}, "5": {}},
"ccmCallHomeVrf": {"1": {}},
"ccmDestProfileTestEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"ccmEventAlertGroupEntry": {"1": {}, "2": {}},
"ccmEventStatsEntry": {
"10": {},
"11": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ccmHistoryCLICmdEntriesBumped": {},
"ccmHistoryEventCommandSource": {},
"ccmHistoryEventCommandSourceAddrRev1": {},
"ccmHistoryEventCommandSourceAddrType": {},
"ccmHistoryEventCommandSourceAddress": {},
"ccmHistoryEventConfigDestination": {},
"ccmHistoryEventConfigSource": {},
"ccmHistoryEventEntriesBumped": {},
"ccmHistoryEventFile": {},
"ccmHistoryEventRcpUser": {},
"ccmHistoryEventServerAddrRev1": {},
"ccmHistoryEventServerAddrType": {},
"ccmHistoryEventServerAddress": {},
"ccmHistoryEventTerminalLocation": {},
"ccmHistoryEventTerminalNumber": {},
"ccmHistoryEventTerminalType": {},
"ccmHistoryEventTerminalUser": {},
"ccmHistoryEventTime": {},
"ccmHistoryEventVirtualHostName": {},
"ccmHistoryMaxEventEntries": {},
"ccmHistoryRunningLastChanged": {},
"ccmHistoryRunningLastSaved": {},
"ccmHistoryStartupLastChanged": {},
"ccmOnDemandCliMsgControl": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}, "6": {}},
"ccmOnDemandMsgSendControl": {"1": {}, "2": {}, "3": {}, "4": {}},
"ccmPatternAlertGroupEntry": {"2": {}, "3": {}, "4": {}},
"ccmPeriodicAlertGroupEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
},
"ccmPeriodicSwInventoryCfg": {"1": {}},
"ccmSeverityAlertGroupEntry": {"1": {}},
"ccmSmartCallHomeActions": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}},
"ccmSmtpServerStatusEntry": {"1": {}},
"ccmSmtpServersEntry": {"3": {}, "4": {}, "5": {}, "6": {}},
"cdeCircuitEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"cdeFastEntry": {
"10": {},
"11": {},
"12": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cdeIfEntry": {"1": {}},
"cdeNode": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cdeTConnConfigEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cdeTConnDirectConfigEntry": {"1": {}, "2": {}, "3": {}},
"cdeTConnOperEntry": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}},
"cdeTConnTcpConfigEntry": {"1": {}},
"cdeTrapControl": {"1": {}, "2": {}},
"cdlCivicAddrLocationStatus": {},
"cdlCivicAddrLocationStorageType": {},
"cdlCivicAddrLocationValue": {},
"cdlCustomLocationStatus": {},
"cdlCustomLocationStorageType": {},
"cdlCustomLocationValue": {},
"cdlGeoAltitude": {},
"cdlGeoAltitudeResolution": {},
"cdlGeoAltitudeType": {},
"cdlGeoLatitude": {},
"cdlGeoLatitudeResolution": {},
"cdlGeoLongitude": {},
"cdlGeoLongitudeResolution": {},
"cdlGeoResolution": {},
"cdlGeoStatus": {},
"cdlGeoStorageType": {},
"cdlKey": {},
"cdlLocationCountryCode": {},
"cdlLocationPreferWeightValue": {},
"cdlLocationSubTypeCapability": {},
"cdlLocationTargetIdentifier": {},
"cdlLocationTargetType": {},
"cdot3OamAdminState": {},
"cdot3OamConfigRevision": {},
"cdot3OamCriticalEventEnable": {},
"cdot3OamDuplicateEventNotificationRx": {},
"cdot3OamDuplicateEventNotificationTx": {},
"cdot3OamDyingGaspEnable": {},
"cdot3OamErrFrameEvNotifEnable": {},
"cdot3OamErrFramePeriodEvNotifEnable": {},
"cdot3OamErrFramePeriodThreshold": {},
"cdot3OamErrFramePeriodWindow": {},
"cdot3OamErrFrameSecsEvNotifEnable": {},
"cdot3OamErrFrameSecsSummaryThreshold": {},
"cdot3OamErrFrameSecsSummaryWindow": {},
"cdot3OamErrFrameThreshold": {},
"cdot3OamErrFrameWindow": {},
"cdot3OamErrSymPeriodEvNotifEnable": {},
"cdot3OamErrSymPeriodThresholdHi": {},
"cdot3OamErrSymPeriodThresholdLo": {},
"cdot3OamErrSymPeriodWindowHi": {},
"cdot3OamErrSymPeriodWindowLo": {},
"cdot3OamEventLogEventTotal": {},
"cdot3OamEventLogLocation": {},
"cdot3OamEventLogOui": {},
"cdot3OamEventLogRunningTotal": {},
"cdot3OamEventLogThresholdHi": {},
"cdot3OamEventLogThresholdLo": {},
"cdot3OamEventLogTimestamp": {},
"cdot3OamEventLogType": {},
"cdot3OamEventLogValue": {},
"cdot3OamEventLogWindowHi": {},
"cdot3OamEventLogWindowLo": {},
"cdot3OamFramesLostDueToOam": {},
"cdot3OamFunctionsSupported": {},
"cdot3OamInformationRx": {},
"cdot3OamInformationTx": {},
"cdot3OamLoopbackControlRx": {},
"cdot3OamLoopbackControlTx": {},
"cdot3OamLoopbackIgnoreRx": {},
"cdot3OamLoopbackStatus": {},
"cdot3OamMaxOamPduSize": {},
"cdot3OamMode": {},
"cdot3OamOperStatus": {},
"cdot3OamOrgSpecificRx": {},
"cdot3OamOrgSpecificTx": {},
"cdot3OamPeerConfigRevision": {},
"cdot3OamPeerFunctionsSupported": {},
"cdot3OamPeerMacAddress": {},
"cdot3OamPeerMaxOamPduSize": {},
"cdot3OamPeerMode": {},
"cdot3OamPeerVendorInfo": {},
"cdot3OamPeerVendorOui": {},
"cdot3OamUniqueEventNotificationRx": {},
"cdot3OamUniqueEventNotificationTx": {},
"cdot3OamUnsupportedCodesRx": {},
"cdot3OamUnsupportedCodesTx": {},
"cdot3OamVariableRequestRx": {},
"cdot3OamVariableRequestTx": {},
"cdot3OamVariableResponseRx": {},
"cdot3OamVariableResponseTx": {},
"cdpCache.2.1.4": {},
"cdpCache.2.1.5": {},
"cdpCacheEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cdpGlobal": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}, "6": {}, "7": {}},
"cdpInterface.2.1.1": {},
"cdpInterface.2.1.2": {},
"cdpInterfaceEntry": {"2": {}, "3": {}, "4": {}, "5": {}, "6": {}},
"cdspActiveChannels": {},
"cdspAlarms": {},
"cdspCardIndex": {},
"cdspCardLastHiWaterUtilization": {},
"cdspCardLastResetTime": {},
"cdspCardMaxChanPerDSP": {},
"cdspCardResourceUtilization": {},
"cdspCardState": {},
"cdspCardVideoPoolUtilization": {},
"cdspCardVideoPoolUtilizationThreshold": {},
"cdspCodecTemplateSupported": {},
"cdspCongestedDsp": {},
"cdspCurrentAvlbCap": {},
"cdspCurrentUtilCap": {},
"cdspDspNum": {},
"cdspDspSwitchOverThreshold": {},
"cdspDspfarmObjects.5.1.10": {},
"cdspDspfarmObjects.5.1.11": {},
"cdspDspfarmObjects.5.1.2": {},
"cdspDspfarmObjects.5.1.3": {},
"cdspDspfarmObjects.5.1.4": {},
"cdspDspfarmObjects.5.1.5": {},
"cdspDspfarmObjects.5.1.6": {},
"cdspDspfarmObjects.5.1.7": {},
"cdspDspfarmObjects.5.1.8": {},
"cdspDspfarmObjects.5.1.9": {},
"cdspDtmfPowerLevel": {},
"cdspDtmfPowerTwist": {},
"cdspEnableOperStateNotification": {},
"cdspFailedDsp": {},
"cdspGlobMaxAvailTranscodeSess": {},
"cdspGlobMaxConfTranscodeSess": {},
"cdspInUseChannels": {},
"cdspLastAlarmCause": {},
"cdspLastAlarmCauseText": {},
"cdspLastAlarmTime": {},
"cdspMIBEnableCardStatusNotification": {},
"cdspMtpProfileEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cdspMtpProfileMaxAvailHardSess": {},
"cdspMtpProfileMaxConfHardSess": {},
"cdspMtpProfileMaxConfSoftSess": {},
"cdspMtpProfileRowStatus": {},
"cdspNormalDsp": {},
"cdspNumCongestionOccurrence": {},
"cdspNx64Dsp": {},
"cdspOperState": {},
"cdspPktLossConcealment": {},
"cdspRtcpControl": {},
"cdspRtcpRecvMultiplier": {},
"cdspRtcpTimerControl": {},
"cdspRtcpTransInterval": {},
"cdspRtcpXrControl": {},
"cdspRtcpXrExtRfactor": {},
"cdspRtcpXrGminDefault": {},
"cdspRtcpXrTransMultiplier": {},
"cdspRtpSidPayloadType": {},
"cdspSigBearerChannelSplit": {},
"cdspTotAvailMtpSess": {},
"cdspTotAvailTranscodeSess": {},
"cdspTotUnusedMtpSess": {},
"cdspTotUnusedTranscodeSess": {},
"cdspTotalChannels": {},
"cdspTotalDsp": {},
"cdspTranscodeProfileEntry": {
"10": {},
"11": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cdspTranscodeProfileMaxAvailSess": {},
"cdspTranscodeProfileMaxConfSess": {},
"cdspTranscodeProfileRowStatus": {},
"cdspTransparentIpIp": {},
"cdspVadAdaptive": {},
"cdspVideoOutOfResourceNotificationEnable": {},
"cdspVideoUsageNotificationEnable": {},
"cdspVoiceModeIpIp": {},
"cdspVqmControl": {},
"cdspVqmThreshSES": {},
"cdspXAvailableBearerBandwidth": {},
"cdspXAvailableSigBandwidth": {},
"cdspXNumberOfBearerCalls": {},
"cdspXNumberOfSigCalls": {},
"cdtCommonAddrPool": {},
"cdtCommonDescr": {},
"cdtCommonIpv4AccessGroup": {},
"cdtCommonIpv4Unreachables": {},
"cdtCommonIpv6AccessGroup": {},
"cdtCommonIpv6Unreachables": {},
"cdtCommonKeepaliveInt": {},
"cdtCommonKeepaliveRetries": {},
"cdtCommonSrvAcct": {},
"cdtCommonSrvNetflow": {},
"cdtCommonSrvQos": {},
"cdtCommonSrvRedirect": {},
"cdtCommonSrvSubControl": {},
"cdtCommonValid": {},
"cdtCommonVrf": {},
"cdtEthernetBridgeDomain": {},
"cdtEthernetIpv4PointToPoint": {},
"cdtEthernetMacAddr": {},
"cdtEthernetPppoeEnable": {},
"cdtEthernetValid": {},
"cdtIfCdpEnable": {},
"cdtIfFlowMonitor": {},
"cdtIfIpv4Mtu": {},
"cdtIfIpv4SubEnable": {},
"cdtIfIpv4TcpMssAdjust": {},
"cdtIfIpv4Unnumbered": {},
"cdtIfIpv4VerifyUniRpf": {},
"cdtIfIpv4VerifyUniRpfAcl": {},
"cdtIfIpv4VerifyUniRpfOpts": {},
"cdtIfIpv6Enable": {},
"cdtIfIpv6NdDadAttempts": {},
"cdtIfIpv6NdNsInterval": {},
"cdtIfIpv6NdOpts": {},
"cdtIfIpv6NdPreferredLife": {},
"cdtIfIpv6NdPrefix": {},
"cdtIfIpv6NdPrefixLength": {},
"cdtIfIpv6NdRaIntervalMax": {},
"cdtIfIpv6NdRaIntervalMin": {},
"cdtIfIpv6NdRaIntervalUnits": {},
"cdtIfIpv6NdRaLife": {},
"cdtIfIpv6NdReachableTime": {},
"cdtIfIpv6NdRouterPreference": {},
"cdtIfIpv6NdValidLife": {},
"cdtIfIpv6SubEnable": {},
"cdtIfIpv6TcpMssAdjust": {},
"cdtIfIpv6VerifyUniRpf": {},
"cdtIfIpv6VerifyUniRpfAcl": {},
"cdtIfIpv6VerifyUniRpfOpts": {},
"cdtIfMtu": {},
"cdtIfValid": {},
"cdtPppAccounting": {},
"cdtPppAuthentication": {},
"cdtPppAuthenticationMethods": {},
"cdtPppAuthorization": {},
"cdtPppChapHostname": {},
"cdtPppChapOpts": {},
"cdtPppChapPassword": {},
"cdtPppEapIdentity": {},
"cdtPppEapOpts": {},
"cdtPppEapPassword": {},
"cdtPppIpcpAddrOption": {},
"cdtPppIpcpDnsOption": {},
"cdtPppIpcpDnsPrimary": {},
"cdtPppIpcpDnsSecondary": {},
"cdtPppIpcpMask": {},
"cdtPppIpcpMaskOption": {},
"cdtPppIpcpWinsOption": {},
"cdtPppIpcpWinsPrimary": {},
"cdtPppIpcpWinsSecondary": {},
"cdtPppLoopbackIgnore": {},
"cdtPppMaxBadAuth": {},
"cdtPppMaxConfigure": {},
"cdtPppMaxFailure": {},
"cdtPppMaxTerminate": {},
"cdtPppMsChapV1Hostname": {},
"cdtPppMsChapV1Opts": {},
"cdtPppMsChapV1Password": {},
"cdtPppMsChapV2Hostname": {},
"cdtPppMsChapV2Opts": {},
"cdtPppMsChapV2Password": {},
"cdtPppPapOpts": {},
"cdtPppPapPassword": {},
"cdtPppPapUsername": {},
"cdtPppPeerDefIpAddr": {},
"cdtPppPeerDefIpAddrOpts": {},
"cdtPppPeerDefIpAddrSrc": {},
"cdtPppPeerIpAddrPoolName": {},
"cdtPppPeerIpAddrPoolStatus": {},
"cdtPppPeerIpAddrPoolStorage": {},
"cdtPppTimeoutAuthentication": {},
"cdtPppTimeoutRetry": {},
"cdtPppValid": {},
"cdtSrvMulticast": {},
"cdtSrvNetworkSrv": {},
"cdtSrvSgSrvGroup": {},
"cdtSrvSgSrvType": {},
"cdtSrvValid": {},
"cdtSrvVpdnGroup": {},
"cdtTemplateAssociationName": {},
"cdtTemplateAssociationPrecedence": {},
"cdtTemplateName": {},
"cdtTemplateSrc": {},
"cdtTemplateStatus": {},
"cdtTemplateStorage": {},
"cdtTemplateTargetStatus": {},
"cdtTemplateTargetStorage": {},
"cdtTemplateType": {},
"cdtTemplateUsageCount": {},
"cdtTemplateUsageTargetId": {},
"cdtTemplateUsageTargetType": {},
"ceAlarmCriticalCount": {},
"ceAlarmCutOff": {},
"ceAlarmDescrSeverity": {},
"ceAlarmDescrText": {},
"ceAlarmDescrVendorType": {},
"ceAlarmFilterAlarmsEnabled": {},
"ceAlarmFilterAlias": {},
"ceAlarmFilterNotifiesEnabled": {},
"ceAlarmFilterProfile": {},
"ceAlarmFilterProfileIndexNext": {},
"ceAlarmFilterStatus": {},
"ceAlarmFilterSyslogEnabled": {},
"ceAlarmHistAlarmType": {},
"ceAlarmHistEntPhysicalIndex": {},
"ceAlarmHistLastIndex": {},
"ceAlarmHistSeverity": {},
"ceAlarmHistTableSize": {},
"ceAlarmHistTimeStamp": {},
"ceAlarmHistType": {},
"ceAlarmList": {},
"ceAlarmMajorCount": {},
"ceAlarmMinorCount": {},
"ceAlarmNotifiesEnable": {},
"ceAlarmSeverity": {},
"ceAlarmSyslogEnable": {},
"ceAssetAlias": {},
"ceAssetCLEI": {},
"ceAssetFirmwareID": {},
"ceAssetFirmwareRevision": {},
"ceAssetHardwareRevision": {},
"ceAssetIsFRU": {},
"ceAssetMfgAssyNumber": {},
"ceAssetMfgAssyRevision": {},
"ceAssetOEMString": {},
"ceAssetOrderablePartNumber": {},
"ceAssetSerialNumber": {},
"ceAssetSoftwareID": {},
"ceAssetSoftwareRevision": {},
"ceAssetTag": {},
"ceDiagEntityCurrentTestEntry": {"1": {}},
"ceDiagEntityEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"ceDiagErrorInfoEntry": {"2": {}},
| |
from __future__ import absolute_import, print_function, division
import os
import numpy as np
from glob import glob
import matplotlib
from matplotlib import pyplot as plt
import astropy.units as u
from astropy.constants import R_sun
from astropy.coordinates import SphericalRepresentation
from .lightcurve import LightCurve, BestLightCurve
import corner
import h5py
import pandas as pd
# Import friedrich
import sys
#sys.path.insert(0, '/astro/users/bmmorris/git/friedrich')
sys.path.insert(0, '/Users/bmmorris/git/friedrich')
from friedrich.stsp import STSP
__all__ = ['MCMCResults']
def load_best_light_curve(results_dir, window_ind, transit_params):
lc_paths = sorted(glob(os.path.join(results_dir, 'window{0:03d}/run???/*_lcbest.txt'
.format(window_ind))))
print(lc_paths)
last_lc = BestLightCurve(lc_paths[-1] if isinstance(lc_paths, list) else lc_paths,
transit_params=transit_params)
return last_lc
def chains_to_spot_params(radius_chains, theta_chains, phi_chains,
best_chain_index, best_step_index_of_best_chain):
spot_params = []
for r, t, p in zip( radius_chains[best_chain_index][best_step_index_of_best_chain],
theta_chains[best_chain_index][best_step_index_of_best_chain],
phi_chains[best_chain_index][best_step_index_of_best_chain]):
spot_params.extend([r, t, p])
return spot_params
class MCMCResults(object):
def __init__(self, radius=None, theta=None, phi=None, acceptance_rates=None,
n_spots=None, burnin=None, window_ind=None, light_curve=None,
transit_params=None, chi2=None, radius_chains=None,
theta_chains=None, phi_chains=None, chi2_chains=None,
spot_params=None):
self.radius = radius
self.theta = theta
self.phi = phi
self.radius_chains = radius_chains
self.theta_chains = theta_chains
self.phi_chains = phi_chains
self.chi2_chains = chi2_chains
self.acceptance_rates = acceptance_rates
self.n_spots = n_spots
self.burnin = burnin
self.window_ind = window_ind
self.transit_params = transit_params
self.light_curve = light_curve
self.chi2 = chi2
self._min_chi2_ind = None
self.spot_params = spot_params
self.x = None
self.y = None
self.z = None
@classmethod
def from_stsp(cls, results_dir, window_ind, burnin=0.8, transit_params=None):
table = None
chain_ind = []
burnin = burnin
acceptance_rates = []
paths = sorted(glob(os.path.join(results_dir,
'window{0:03d}/run???/*_mcmc.txt'
.format(window_ind))))
print(paths)
for path in paths:
results_file_size = os.stat(path).st_size
if results_file_size > 0:
new = np.loadtxt(path)
n_walkers = len(np.unique(new[:, 0]))
n_accepted_steps = np.count_nonzero(new[:, 2] != 0)
n_steps_total = np.max(new[:, 2])
acceptance_rates.append(n_accepted_steps /
n_steps_total / n_walkers)
if table is None:
table = new.copy()
else:
table = np.vstack([table, new])
chain_ind = np.concatenate([chain_ind, new[:, 0]])
n_properties_per_spot = 3
col_offset = 4
n_spots = (table.shape[1] - col_offset)//n_properties_per_spot
chi2_col = 3
radius_col = (col_offset + n_properties_per_spot *
np.arange(n_spots))
theta_col = (col_offset + 1 + n_properties_per_spot *
np.arange(n_spots))
phi_col = (col_offset + 2 + n_properties_per_spot *
np.arange(n_spots))
radius = table[:, radius_col]
theta = table[:, theta_col]
phi = table[:, phi_col]
chi2 = table[:, chi2_col]
burnin_int = int(burnin*table.shape[0])
# last_lc = load_best_light_curve(results_dir, window_ind, transit_params)
last_lc = None
kwargs = dict(burnin=burnin, acceptance_rates=acceptance_rates,
radius=radius, theta=theta, phi=phi, n_spots=n_spots,
window_ind=window_ind, transit_params=transit_params,
chi2=chi2, light_curve=last_lc)
return cls(**kwargs)
@classmethod
def from_stsp_cat(cls, results_dir, window_ind, burnin=0.8,
transit_params=None):
table = None
chain_ind = []
burnin = burnin
chains_path = glob(os.path.join(results_dir,
'window{0:03d}_mcmc.txt'
.format(window_ind)))[0]
results_file_size = os.stat(chains_path).st_size
table = pd.read_csv(chains_path, header=None, delimiter=' ',
skiprows=[0], error_bad_lines=False)
table = table.as_matrix(columns=table.columns)
# Toss nans:
table = table[np.logical_not(np.any(np.isnan(table), axis=1))]
n_walkers = len(np.unique(table[:, 0]))
n_accepted_steps = np.count_nonzero(table[:, 2] != 0)
n_steps_total = np.max(table[:, 2])
acceptance_rates = [n_accepted_steps / n_steps_total / n_walkers]
n_properties_per_spot = 3
col_offset = 4
n_spots = (table.shape[1] - col_offset) // n_properties_per_spot
chi2_col = 3
radius_col = (col_offset + n_properties_per_spot * np.arange(n_spots))
theta_col = (col_offset + 1 + n_properties_per_spot * np.arange(n_spots))
phi_col = (col_offset + 2 + n_properties_per_spot * np.arange(n_spots))
# Save flattened chains
radius = table[:, radius_col]
theta = table[:, theta_col]
phi = table[:, phi_col]
chi2 = table[:, chi2_col]
# Save un-flattened chains
radius_chains = []
theta_chains = []
phi_chains = []
chi2_chains = []
chain_inds = table[:, 0]
for i in range(n_walkers):
chain_i = chain_inds == i
radius_i = table[chain_i, :][:, radius_col]
theta_i = table[chain_i, :][:, theta_col]
phi_i = table[chain_i, :][:, phi_col]
chi2_i = table[chain_i, :][:, chi2_col]
radius_chains.append(radius_i)
theta_chains.append(theta_i)
phi_chains.append(phi_i)
chi2_chains.append(chi2_i)
burnin_int = int(burnin*table.shape[0])
# last_lc = load_best_light_curve(results_dir, window_ind, transit_params)
lc_path = glob(os.path.join(results_dir, 'window{0:03d}.dat'
.format(window_ind)))[0]
times, fluxes, errors = np.loadtxt(lc_path, unpack=True)
## calculate best transit model
min_chi2_chain = [min(chi2) for chi2 in chi2_chains]
best_chain_index = np.argmin(min_chi2_chain)
best_step_index_of_best_chain = np.argmin(chi2_chains[best_chain_index])
spot_params = chains_to_spot_params(radius_chains, theta_chains,
phi_chains, best_chain_index,
best_step_index_of_best_chain)
stsp = STSP(LightCurve(times=times, fluxes=fluxes, errors=errors),
transit_params, spot_params)
t, f = stsp.stsp_lc(t_bypass=True)
last_lc = BestLightCurve(times=times, fluxes_kepler=fluxes, errors=errors, fluxes_model=f)
kwargs = dict(burnin=burnin, acceptance_rates=acceptance_rates,
radius=radius, theta=theta, phi=phi, n_spots=n_spots,
window_ind=window_ind, transit_params=transit_params,
chi2=chi2, light_curve=last_lc,
radius_chains=radius_chains, theta_chains=theta_chains,
phi_chains=phi_chains, chi2_chains=chi2_chains,
spot_params=spot_params)
return cls(**kwargs)
@classmethod
def from_stsp_local(cls, mcmc_path, lc_path, burnin=0.8,
transit_params=None, window_ind=0):
table = None
chain_ind = []
burnin = burnin
chains_path = mcmc_path
results_file_size = os.stat(chains_path).st_size
table = pd.read_csv(chains_path, header=None, delimiter=' ',
skiprows=[0], error_bad_lines=False)
table = table.as_matrix(columns=table.columns)
# Toss nans:
table = table[np.logical_not(np.any(np.isnan(table), axis=1))]
n_walkers = len(np.unique(table[:, 0]))
n_accepted_steps = np.count_nonzero(table[:, 2] != 0)
n_steps_total = np.max(table[:, 2])
acceptance_rates = [n_accepted_steps / n_steps_total / n_walkers]
n_properties_per_spot = 3
col_offset = 4
n_spots = (table.shape[1] - col_offset) // n_properties_per_spot
chi2_col = 3
radius_col = (col_offset + n_properties_per_spot * np.arange(n_spots))
theta_col = (col_offset + 1 + n_properties_per_spot * np.arange(n_spots))
phi_col = (col_offset + 2 + n_properties_per_spot * np.arange(n_spots))
# Save flattened chains
radius = table[:, radius_col]
theta = table[:, theta_col]
phi = table[:, phi_col]
chi2 = table[:, chi2_col]
# Save un-flattened chains
radius_chains = []
theta_chains = []
phi_chains = []
chi2_chains = []
chain_inds = table[:, 0]
for i in range(n_walkers):
chain_i = chain_inds == i
radius_i = table[chain_i, :][:, radius_col]
theta_i = table[chain_i, :][:, theta_col]
phi_i = table[chain_i, :][:, phi_col]
chi2_i = table[chain_i, :][:, chi2_col]
radius_chains.append(radius_i)
theta_chains.append(theta_i)
phi_chains.append(phi_i)
chi2_chains.append(chi2_i)
burnin_int = int(burnin*table.shape[0])
# last_lc = load_best_light_curve(results_dir, window_ind, transit_params)
times, fluxes, errors = np.loadtxt(lc_path, unpack=True)
## calculate best transit model
min_chi2_chain = [min(chi2) for chi2 in chi2_chains]
best_chain_index = np.argmin(min_chi2_chain)
best_step_index_of_best_chain = np.argmin(chi2_chains[best_chain_index])
spot_params = chains_to_spot_params(radius_chains, theta_chains,
phi_chains, best_chain_index,
best_step_index_of_best_chain)
stsp = STSP(LightCurve(times=times, fluxes=fluxes, errors=errors),
transit_params, spot_params)
t, f = stsp.stsp_lc(t_bypass=True)
last_lc = BestLightCurve(times=times, fluxes_kepler=fluxes, errors=errors, fluxes_model=f)
kwargs = dict(burnin=burnin, acceptance_rates=acceptance_rates,
radius=radius, theta=theta, phi=phi, n_spots=n_spots,
window_ind=window_ind, transit_params=transit_params,
chi2=chi2, light_curve=last_lc,
radius_chains=radius_chains, theta_chains=theta_chains,
phi_chains=phi_chains, chi2_chains=chi2_chains,
spot_params=spot_params)
return cls(**kwargs)
# @property
# def min_chi2_index(self):
# if self._min_chi2_ind is None:
# chi2_order = np.argsort(self.chi2)
# index = 0
# while np.any(np.isnan(self.radius[chi2_order[index], :])):
# index += 0
#
# self._min_chi2_ind = chi2_order[index]
#
# return self._min_chi2_ind
def to_hdf5(self, results_dir):
hdf5_results_dir = os.path.join(results_dir, 'hdf5')
if not os.path.exists(hdf5_results_dir):
os.makedirs(hdf5_results_dir)
file_path = os.path.join(hdf5_results_dir,
"window{0:03}.hdf5".format(self.window_ind))
f = h5py.File(file_path, 'w')
attrs_to_save = ['radius', 'theta', 'phi', 'acceptance_rates', 'chi2']
for attr in attrs_to_save:
f.create_dataset(attr, data=getattr(self, attr))
f.close()
@classmethod
def from_hdf5(cls, results_dir, window_ind, transit_params=None):
saved_attrs = ['radius', 'theta', 'phi', 'acceptance_rates', 'chi2']
file_path = os.path.join(results_dir, 'hdf5',
"window{0:03}.hdf5".format(window_ind))
f = h5py.File(file_path, 'r')
kwargs = dict(window_ind=window_ind, transit_params=transit_params)
for attr in saved_attrs:
kwargs[attr] = f[attr][:]
f.close()
# Load last light curve
#last_lc = load_best_light_curve(results_dir, window_ind, transit_params)
#kwargs['light_curve'] = last_lc
return cls(**kwargs)
def plot_chains_hist(self, burn_in=0):
n_spots = self.radius.shape[1]
fig, ax = plt.subplots(n_spots, 3, figsize=(16, 8))
n_bins = 30
low = 4
high = 96
burnin_int = int(burn_in * self.radius.shape[0])
for i in range(self.radius.shape[1]):
r_range = np.percentile(self.radius[burnin_int:, i], [low, high])
theta_range = np.percentile(self.theta[burnin_int:, i], [low, high])
phi_range = np.percentile(self.phi[burnin_int:, i], [low, high])
ax[i, 0].hist(self.radius[burnin_int:, i], n_bins, color='k',
range=r_range)
ax[i, 1].hist(self.theta[burnin_int:, i], n_bins, color='k',
range=theta_range)
ax[i, 2].hist(self.phi[burnin_int:, i], n_bins, color='k',
range=phi_range)
ax[i, 0].set_ylabel('Spot {0}'.format(i))
ax[0, 0].set(title='Radius')
ax[0, 1].set(title='theta')
ax[0, 2].set(title='phi')
ax[1, 0].set_xlabel('$R_s/R_\star$')
ax[1, 1].set_xlabel('[radians]')
ax[1, 2].set_xlabel('[radians]')
fig.tight_layout()
def plot_chains(self, burn_in=0):
n_spots = self.radius.shape[1]
fig, ax = plt.subplots(n_spots, 3, figsize=(16, 8))
n_bins = 30
low = 4
high = 96
burnin_int = int(burn_in * self.radius.shape[0])
colors = ['b', 'g', 'r', 'm']
for i in range(len(self.radius_chains)):
for j in range(n_spots):
kwargs = dict(alpha=0.3, color=colors[j])
ax[j, 0].plot(self.radius_chains[i][:, j], **kwargs)
ax[j, 1].plot(self.theta_chains[i][:, j], **kwargs)
ax[j, 2].plot(self.phi_chains[i][:, j], **kwargs)
ax[j, 0].set_ylabel('Spot {0}'.format(j))
ax[0, 0].set(title='Radius')
ax[0, 1].set(title='theta')
ax[0, 2].set(title='phi')
ax[1, 0].set_xlabel('$R_s/R_\star$')
ax[1, 1].set_xlabel('[radians]')
ax[1, 2].set_xlabel('[radians]')
# Mark the maximum likelihood values
max_likelihood_ind = np.argmin(self.chi2)
for j in range(n_spots):
kwargs = dict(ls='--', lw=2, color=colors[j])
ax[j, 0].axhline(self.radius[max_likelihood_ind, j], **kwargs)
ax[j, 1].axhline(self.theta[max_likelihood_ind, j], **kwargs)
ax[j, 2].axhline(self.phi[max_likelihood_ind, j], **kwargs)
fig.tight_layout()
def plot_chains_cartesian(self, burn_in=0):
if self.x is None:
self.to_cartesian()
n_spots = self.radius.shape[1]
fig, ax = plt.subplots(n_spots, 3, figsize=(16, 8))
n_bins = 30
low = 4
high = 96
burnin_int = int(burn_in * self.radius.shape[0])
for i in range(self.radius.shape[1]):
x_range = np.percentile(self.x[burnin_int:, i], [low, high])
y_range = np.percentile(self.y[burnin_int:, i], [low, high])
z_range = np.percentile(self.z[burnin_int:, i], [low, high])
ax[i, 0].hist(self.x[burnin_int:, i], n_bins, color='k',
range=x_range)
ax[i, 1].hist(self.y[burnin_int:, i], n_bins, color='k',
range=y_range)
ax[i, 2].hist(self.z[burnin_int:, i], n_bins, color='k',
range=z_range)
ax[i, 0].set_ylabel('Spot {0}'.format(i))
ax[0, 0].set(title='x')
ax[0, 1].set(title='y')
ax[0, 2].set(title='z')
# ax[1, 0].set_xlabel('$R_s/R_\star$')
# ax[1, 1].set_xlabel('[radians]')
# ax[1, 2].set_xlabel('[radians]')
fig.tight_layout()
def plot_each_spot(self, burn_in=0):
#fig, ax = plt.subplots(5)
burnin_int = int(burn_in * self.radius.shape[0])
n_spots = self.radius.shape[1]
burn_in_to_index = int(burn_in*self.radius.shape[0])
for i in range(n_spots):
samples = np.array([self.radius[burn_in_to_index:, i],
self.phi[burn_in_to_index:, i]]).T # self.theta[:, i],
corner.corner(samples, plot_contours=False)
def plot_star(self, fade_out=True):
spots_spherical = SphericalRepresentation(self.phi*u.rad,
(self.theta - np.pi/2)*u.rad,
1*R_sun)
self.spots_spherical = spots_spherical
fig, ax = plot_star(spots_spherical, fade_out=fade_out)
#plt.show()
def plot_corner(self):
exclude_columns | |
<filename>emat/analysis/explore_2/explore_visualizer.py
import numpy
import pandas
import warnings
import functools
from ...viz import colors
from ...scope.box import GenericBox
from traitlets import TraitError
from plotly import graph_objs as go
from ipywidgets import Dropdown
import ipywidgets as widget
import logging
_logger = logging.getLogger('EMAT.widget')
from .explore_base import DataFrameExplorer
def _deselect_all_points(trace):
trace.selectedpoints = None
# def _debugprint(s):
# print(s.replace("rgb(255, 127, 14)", "<ORANGE>").replace("rgb(255, 46, 241)","<PINK>"))
from .components import *
class Visualizer(DataFrameExplorer):
def __init__(
self,
data,
selections=None,
scope=None,
active_selection_name=None,
reference_point=None,
):
if selections is None:
from ...scope.box import Box
selections = {'Explore': Box(name='Explore', scope=scope)}
if active_selection_name is None:
active_selection_name = 'Explore'
super().__init__(
data,
selections=selections,
active_selection_name=active_selection_name,
reference_point=reference_point,
)
self.scope = scope
self._figures_hist = {}
self._figures_freq = {}
self._base_histogram = {}
self._categorical_data = {}
self._freeze = False
self._two_way = {}
self._splom = {}
self._hmm = {}
self._parcoords = {}
self._selection_feature_score_fig = None
self._status_txt = widget.HTML(
value="<i>Explore Status Not Set</i>",
)
self._status_pie = go.FigureWidget(
go.Pie(
values=[75, 250],
labels=['Inside', 'Outside'],
hoverinfo='label+value',
textinfo='percent',
textfont_size=10,
marker=dict(
colors=[
self.active_selection_color(),
colors.DEFAULT_BASE_COLOR,
],
line=dict(color='#FFF', width=0.25),
)
),
layout=dict(
width=100,
height=100,
showlegend=False,
margin=dict(l=10, r=10, t=10, b=10),
)
)
self._status = widget.HBox(
[
widget.VBox([self._active_selection_chooser, self._status_txt]),
self._status_pie
],
layout=dict(
justify_content = 'space-between',
align_items = 'center',
)
)
self._update_status()
def get_histogram_figure(self, col, bins=20, marker_line_width=None):
try:
this_type = self.scope.get_dtype(col)
except:
this_type = 'float'
if this_type in ('cat','bool'):
return self.get_frequency_figure(col)
if this_type in ('int',):
param = self.scope[col]
if param.max - param.min + 1 <= bins * 4:
bins = param.max - param.min + 1
if marker_line_width is None:
marker_line_width = 0
self._create_histogram_figure(col, bins=bins, marker_line_width=marker_line_width)
return self._figures_hist[col]
def get_frequency_figure(self, col):
if self.scope.get_dtype(col) == 'cat':
labels = self.scope.get_cat_values(col)
else:
labels = [False, True]
self._create_frequencies_figure(col, labels=labels)
return self._figures_freq[col]
def _create_histogram_figure(self, col, bins=20, *, marker_line_width=None):
if col in self._figures_hist:
self._update_histogram_figure(col)
else:
selection = self.active_selection()
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
else:
box = None
fig = new_histogram_figure(
selection, self.data[col], bins,
marker_line_width=marker_line_width,
on_deselect=lambda *a: self._on_deselect_from_histogram(*a,name=col),
on_select=lambda *a: self._on_select_from_histogram(*a,name=col),
box=box,
title_text=self.scope.shortname(col),
ref_point=self.reference_point(col),
)
self._figures_hist[col] = fig
def _create_frequencies_figure(self, col, labels=None, *, marker_line_width=None):
if col in self._figures_freq:
self._update_frequencies_figure(col)
else:
selection = self.active_selection()
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
else:
box = None
fig = new_frequencies_figure(
selection, self.data[col], labels,
marker_line_width=marker_line_width,
on_deselect=functools.partial(self._on_deselect_from_histogram, name=col),
on_select=functools.partial(self._on_select_from_freq, name=col),
#on_click=functools.partial(self._on_click_from_frequencies, name=col), # not always stable
box=box,
title_text=self.scope.shortname(col),
ref_point=self.reference_point(col),
)
self._figures_freq[col] = fig
def _update_histogram_figure(self, col):
if col in self._figures_hist:
fig = self._figures_hist[col]
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
else:
box = None
with fig.batch_update():
update_histogram_figure(
fig,
self.active_selection(),
self.data[col],
box=box,
ref_point=self.reference_point(col),
)
def _update_frequencies_figure(self, col):
if col in self._figures_freq:
fig = self._figures_freq[col]
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
else:
box = None
with fig.batch_update():
update_frequencies_figure(
fig,
self.active_selection(),
self.data[col],
box=box,
ref_point=self.reference_point(col),
)
def _compute_histogram(self, col, selection, bins=None):
if col not in self._base_histogram:
if bins is None:
bins = 20
bar_heights, bar_x = numpy.histogram(self.data[col], bins=bins)
self._base_histogram[col] = bar_heights, bar_x
else:
bar_heights, bar_x = self._base_histogram[col]
bins_left = bar_x[:-1]
bins_width = bar_x[1:] - bar_x[:-1]
bar_heights_select, bar_x = numpy.histogram(self.data[col][selection], bins=bar_x)
return bar_heights, bar_heights_select, bins_left, bins_width
def _compute_frequencies(self, col, selection, labels):
if col in self._categorical_data:
v = self._categorical_data[col]
else:
self._categorical_data[col] = v = self.data[col].astype(
pandas.CategoricalDtype(categories=labels, ordered=False)
).cat.codes
if col not in self._base_histogram:
bar_heights, bar_x = numpy.histogram(v, bins=numpy.arange(0, len(labels) + 1))
self._base_histogram[col] = bar_heights, bar_x
else:
bar_heights, bar_x = self._base_histogram[col]
bar_heights_select, _ = numpy.histogram(v[selection], bins=numpy.arange(0, len(labels) + 1))
return bar_heights, bar_heights_select, labels
def _on_select_from_histogram(self, *args, name=None):
if self._freeze:
return
try:
self._freeze = True
select_min, select_max = args[2].xrange
_logger.debug("name: %s range: %f - %f", name, select_min, select_max)
self._figures_hist[name].for_each_trace(_deselect_all_points)
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
box = interpret_histogram_selection(name, args[2].xrange, box, self.data, self.scope)
self.new_selection(box, name=self.active_selection_name())
self._active_selection_changed()
except:
_logger.exception("error in _on_select_from_histogram")
raise
finally:
self._freeze = False
def _on_deselect_from_histogram(self, *args, name=None):
_logger.debug("deselect %s", name)
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
if name in box:
del box[name]
self.new_selection(box, name=self.active_selection_name())
self._active_selection_changed()
def _on_select_from_freq(self, *args, name=None):
select_min, select_max = args[2].xrange
select_min = int(numpy.ceil(select_min))
select_max = int(numpy.ceil(select_max))
fig = self.get_figure(name)
toggles = fig.layout['meta']['x_tick_values'][select_min:select_max]
fig.for_each_trace(_deselect_all_points)
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
box.scope = self.scope
if name not in box:
for x in toggles:
box.add_to_allowed_set(name, x)
else:
for x in toggles:
if name not in box or x in box[name]:
box.remove_from_allowed_set(name, x)
if len(box[name]) == 0:
del box[name]
else:
box.add_to_allowed_set(name, x)
if toggles:
self.new_selection(box, name=self.active_selection_name())
self._active_selection_changed()
def _on_click_from_frequencies(self, *args, name=None):
x = None
if len(args) >= 2:
xs = getattr(args[1],'xs',None)
if xs:
x = xs[0]
if x is not None:
if self.active_selection_deftype() == 'box':
box = self._selection_defs[self.active_selection_name()]
box.scope = self.scope
if name not in box or x in box[name]:
box.remove_from_allowed_set(name, x)
if len(box[name]) == 0:
del box[name]
else:
box.add_to_allowed_set(name, x)
self.new_selection(box, name=self.active_selection_name())
self._active_selection_changed()
def _active_selection_changed(self):
if hasattr(self, '_active_selection_changing_'):
return # prevent recursive looping
try:
self._active_selection_changing_ = True
with self._status_pie.batch_update():
super()._active_selection_changed()
self._pre_update_selection_feature_score_figure()
self._update_status()
for col in self._figures_hist:
self._update_histogram_figure(col)
for col in self._figures_freq:
self._update_frequencies_figure(col)
for key in self._two_way:
self._two_way[key].refresh_selection_names()
self._two_way[key]._on_change_selection_choose(payload={
'new':self.active_selection_name(),
})
self._update_sploms()
self._update_hmms()
self._update_selection_feature_score_figure()
finally:
del self._active_selection_changing_
def status(self):
return self._status
def _update_status(self):
text = '<span style="font-weight:bold;font-size:150%">{:,d} Cases Selected out of {:,d} Total Cases</span>'
selection = self.active_selection()
values = (int(numpy.sum(selection)), int(selection.size))
self._status_txt.value = text.format(*values)
self._status_pie.data[0].values = [values[0], values[1]-values[0]]
def get_figure(self, col):
if col in self._figures_hist:
return self._figures_hist[col]
if col in self._figures_freq:
return self._figures_freq[col]
return None
def _clear_boxes_on_figure(self, col):
fig = self.get_figure(col)
if fig is None: return
foreground_shapes = []
refpoint = self.reference_point(col)
if refpoint is not None:
if refpoint in (True, False):
refpoint = str(refpoint).lower()
_y_max = sum(t.y for t in fig.select_traces()).max()
y_range = (
-_y_max * 0.02,
_y_max * 1.04,
)
foreground_shapes.append(
go.layout.Shape(
type="line",
xref="x1",
yref="y1",
x0=refpoint,
y0=y_range[0],
x1=refpoint,
y1=y_range[1],
**colors.DEFAULT_REF_LINE_STYLE,
)
)
fig.layout.shapes= foreground_shapes
fig.layout.title.font.color = 'black'
fig.layout.title.text = col
# def _draw_boxes_on_figure(self, col):
#
# if self.active_selection_deftype() != 'box':
# self._clear_boxes_on_figure(col)
# return
#
# fig = self.get_figure(col)
# if fig is None: return
# box = self._selection_defs[self.active_selection_name()]
# if box is None:
# self._clear_boxes_on_figure(col)
# return
#
# from ...scope.box import Bounds
#
# if col in box.thresholds:
# x_lo, x_hi = None, None
# thresh = box.thresholds.get(col)
# if isinstance(thresh, Bounds):
# x_lo, x_hi = thresh
# if isinstance(thresh, set):
# x_lo, x_hi = [], []
# for tickval, ticktext in enumerate(fig.data[0].x):
# if ticktext in thresh:
# x_lo.append(tickval-0.45)
# x_hi.append(tickval+0.45)
#
# try:
# x_range = (
# fig.data[0].x[0] - (fig.data[0].width[0] / 2),
# fig.data[0].x[-1] + (fig.data[0].width[-1] / 2),
# )
# except TypeError:
# x_range = (
# -0.5,
# len(fig.data[0].x)+0.5
# )
# x_width = x_range[1] - x_range[0]
# if x_lo is None:
# x_lo = x_range[0]-x_width * 0.02
# if x_hi is None:
# x_hi = x_range[1]+x_width * 0.02
# if not isinstance(x_lo, list):
# x_lo = [x_lo]
# if not isinstance(x_hi, list):
# x_hi = [x_hi]
#
# y_lo, y_hi = None, None
# _y_max = sum(t.y for t in fig.select_traces()).max()
# y_range = (
# -_y_max * 0.02,
# _y_max * 1.04,
# )
# y_width = y_range[1] - y_range[0]
# if y_lo is None:
# y_lo = y_range[0]-y_width * 0
# if y_hi is None:
# y_hi = y_range[1]+y_width * 0
# if not isinstance(y_lo, list):
# y_lo = [y_lo]
# if not isinstance(y_hi, list):
# y_hi = [y_hi]
#
# x_pairs = list(zip(x_lo, x_hi))
# y_pairs = list(zip(y_lo, y_hi))
#
# background_shapes = [
# # Rectangle background color
# go.layout.Shape(
# type="rect",
# xref="x1",
# yref="y1",
# x0=x_pair[0],
# y0=y_pair[0],
# x1=x_pair[1],
# y1=y_pair[1],
# line=dict(
# width=0,
# ),
# fillcolor=colors.DEFAULT_BOX_BG_COLOR,
# opacity=0.2,
# layer="below",
# )
# for x_pair in x_pairs
# for y_pair in y_pairs
# ]
#
# foreground_shapes = [
# # Rectangle reference to the axes
# go.layout.Shape(
# type="rect",
# xref="x1",
# yref="y1",
# x0=x_pair[0],
# y0=y_pair[0],
# x1=x_pair[1],
# y1=y_pair[1],
# line=dict(
# width=2,
# color=colors.DEFAULT_BOX_LINE_COLOR,
# ),
# fillcolor='rgba(0,0,0,0)',
# opacity=1.0,
# )
# for x_pair in x_pairs
# for y_pair in y_pairs
# ]
#
# refpoint = self.reference_point(col)
# if refpoint is not None:
# if refpoint in (True, False):
# refpoint = str(refpoint).lower()
# foreground_shapes.append(
# go.layout.Shape(
# type="line",
# xref="x1",
# yref="y1",
# x0=refpoint,
# y0=y_range[0],
# x1=refpoint,
# y1=y_range[1],
# **colors.DEFAULT_REF_LINE_STYLE,
# )
# )
#
# fig.layout.shapes=background_shapes+foreground_shapes
# fig.layout.title.font.color = colors.DEFAULT_BOX_LINE_COLOR
# fig.layout.title.text = f'<b>{col}</b>'
# else:
# self._clear_boxes_on_figure(col)
def _get_widgets(self, *include):
if self.scope is None:
raise ValueError('cannot create visualization with no scope')
viz_widgets = []
for i in include:
if i not in self.scope:
warnings.warn(f'{i} not in scope')
elif i not in self.data.columns:
warnings.warn(f'{i} not in data')
else:
fig = self.get_histogram_figure(i)
if fig is not None:
viz_widgets.append(fig)
return widget.Box(viz_widgets, layout=widget.Layout(flex_flow='row wrap'))
def uncertainty_selectors(self, style='hist'):
return self._get_widgets(*self.scope.get_uncertainty_names())
def lever_selectors(self, style='hist'):
return self._get_widgets(*self.scope.get_lever_names())
def measure_selectors(self, style='hist'):
return self._get_widgets(*self.scope.get_measure_names())
def complete(self, measure_style='hist'):
return widget.VBox([
self.status(),
widget.HTML("<h3>Policy Levers</h3>"),
self.lever_selectors(),
widget.HTML("<h3>Exogenous Uncertainties</h3>"),
self.uncertainty_selectors(),
widget.HTML("<h3>Performance Measures</h3>"),
#self._measure_notes(style=measure_style),
self.measure_selectors(),
])
def set_active_selection_color(self, color):
super().set_active_selection_color(color)
for col, fig in self._figures_freq.items():
fig.data[0].marker.color = color
for col, fig in self._figures_hist.items():
fig.data[0].marker.color = color
c = self._status_pie.data[0].marker.colors
self._status_pie.data[0].marker.colors = [color, c[1]]
for k, twoway in self._two_way.items():
#_debugprint(f"twoway[{self._active_selection_name}][{k}] to {color}")
twoway.change_selection_color(color)
def refresh_selection_names(self):
super().refresh_selection_names()
try:
_two_way = self._two_way
except AttributeError:
pass
else:
for k, twoway in _two_way.items():
twoway.refresh_selection_names()
def two_way(
self,
key=None,
reset=False,
*,
x=None,
y=None,
use_gl=True,
):
if key is None and (x is not None or y is not None):
key = (x,y)
if key in self._two_way and not reset:
return self._two_way[key]
from .twoway import TwoWayFigure
self._two_way[key] = TwoWayFigure(self, use_gl=use_gl)
self._two_way[key].selection_choose.value = self.active_selection_name()
def _try_set_value(where, value, describe):
if value is not None:
try:
where.value = value
except TraitError:
warnings.warn(f'"{value}" is not a valid value for {describe}')
_try_set_value(self._two_way[key].x_axis_choose, x, 'the x axis dimension')
_try_set_value(self._two_way[key].y_axis_choose, y, 'the y axis dimension')
return self._two_way[key]
def splom(
self,
key=None,
reset=False,
*,
cols='M',
rows='L',
use_gl=True,
):
if not isinstance(rows, str):
rows = tuple(rows)
if not isinstance(cols, str):
cols = tuple(cols)
if key is None and (cols is not None or rows is not None):
key = (cols,rows)
if key in self._splom and not reset:
return self._splom[key]
box = None
if self.active_selection_deftype() == 'box':
name = self.active_selection_name()
box = self._selection_defs[name]
self._splom[key] = new_splom_figure(
self.scope,
self.data,
rows=rows,
cols=cols,
use_gl=use_gl,
mass=250,
row_titles='side',
size=150,
selection=self.active_selection(),
box=box,
refpoint=self._reference_point,
figure_class=go.FigureWidget,
on_select=functools.partial(self._on_select_from_splom, name=key),
)
return self._splom[key]
def _on_select_from_splom(self, row, col, trace, points, selection, name=None):
# if len(points.point_inds)==0:
# return
# print("name=",name)
# print(row, col, "->", selection)
# print( "->", selection.xrange)
# print( "->", selection.yrange)
# print( "->", type(selection.yrange))
# trace.selectedpoints = None
pass
def _update_sploms(self):
box = None
if self.active_selection_deftype() == 'box':
name = self.active_selection_name()
box = self._selection_defs[name]
for fig in self._splom.values():
with fig.batch_update():
update_splom_figure(
self.scope,
self.data,
fig,
self.active_selection(),
box,
mass=None,
selected_color=self.active_selection_color(),
)
def hmm(
self,
key=None,
reset=False,
*,
cols='M',
rows='L',
emph_selected=True,
show_points=30,
size=150,
):
if not isinstance(rows, str):
rows = tuple(rows)
if not isinstance(cols, str):
cols = tuple(cols)
if key is None and (cols is not None or rows is not None):
key = (cols,rows)
if key in self._hmm and not reset:
return self._hmm[key]
box = None
if self.active_selection_deftype() == 'box':
name = self.active_selection_name()
box = self._selection_defs[name]
self._hmm[key] = new_hmm_figure(
self.scope,
self.data,
rows=rows,
cols=cols,
row_titles='side',
size=size,
selection=self.active_selection(),
box=box,
refpoint=self._reference_point,
figure_class=go.FigureWidget,
emph_selected=emph_selected,
show_points=show_points,
)
return self._hmm[key]
def _update_hmms(self):
box = None
if self.active_selection_deftype() == 'box':
name = self.active_selection_name()
box = self._selection_defs[name]
for fig in self._hmm.values():
with fig.batch_update():
update_hmm_figure(
self.scope,
self.data,
fig,
self.active_selection(),
box,
)
def parcoords(
self,
key=None,
reset=False,
*,
coords='XLM',
):
if not isinstance(coords, str):
coords = tuple(coords)
if key is None and coords is not None:
key = coords
if key in self._parcoords and not reset:
return self._parcoords[key]
self._parcoords[key] = new_parcoords_figure(
self.scope,
self.data,
coords=coords,
selection=self.active_selection(),
figure_class=go.FigureWidget,
selected_color=self.active_selection_color(),
# on_select=functools.partial(self._on_select_from_splom, name=key),
)
return self._parcoords[key]
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError(f'selection names must be str not {type(key)}')
color = None
if value is None:
from ...scope.box import Box
value = Box(name=key, scope=self.scope)
if isinstance(value, GenericBox):
color = colors.DEFAULT_HIGHLIGHT_COLOR
elif isinstance(value, str):
color = colors.DEFAULT_EXPRESSION_COLOR
elif isinstance(value, pandas.Series):
color = colors.DEFAULT_LASSO_COLOR
self.new_selection(value, name=key, color=color)
def __getitem__(self, item):
if item not in self.selection_names():
return KeyError(item)
return self._selection_defs.get(item, None)
def prim(self, data='parameters', target=None, threshold=0.2, **kwargs):
from .prim import Prim
if target is None:
of_interest = self.active_selection()
elif isinstance(target, str):
of_interest = self._selections[target]
else:
raise ValueError("must give a target")
if data == 'parameters':
data_ = self.data[self.scope.get_parameter_names()]
elif data == 'levers':
data_ = self.data[self.scope.get_lever_names()]
elif data == 'uncertainties':
data_ = self.data[self.scope.get_uncertainty_names()]
elif data == 'measures':
data_ = self.data[self.scope.get_measure_names()]
elif data == 'all':
data_ = self.data
else:
data_ = self.data[data]
self._prim_target = of_interest
if (of_interest).all():
raise ValueError("all points are in the target, cannot run PRIM")
if (~of_interest).all():
raise ValueError("no points are in the target, cannot | |
<gh_stars>0
"""
Objects for dealing with Laguerre series.
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `lagdomain` -- Laguerre series default domain, [-1,1].
- `lagzero` -- Laguerre series that evaluates identically to 0.
- `lagone` -- Laguerre series that evaluates identically to 1.
- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
- `lagval` -- evaluate a Laguerre series at given points.
Calculus
--------
- `lagder` -- differentiate a Laguerre series.
- `lagint` -- integrate a Laguerre series.
Misc Functions
--------------
- `lagfromroots` -- create a Laguerre series with specified roots.
- `lagroots` -- find the roots of a Laguerre series.
- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
- `lagfit` -- least-squares fit returning a Laguerre series.
- `lagtrim` -- trim leading coefficients from a Laguerre series.
- `lagline` -- Laguerre series of given straight line.
- `lag2poly` -- convert a Laguerre series to a polynomial.
- `poly2lag` -- convert a polynomial to a Laguerre series.
Classes
-------
- `Laguerre` -- A Laguerre series class.
See also
--------
`numpy.polynomial`
"""
__all__ = ['lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline',
'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagval',
'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots',
'lagvander', 'lagfit', 'lagtrim', 'lagroots', 'Laguerre']
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
import warnings
from .polytemplate import polytemplate
lagtrim = pu.trimcoef
def poly2lag(pol) :
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-d array containing the polynomial coefficients
Returns
-------
cs : ndarray
1-d array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(cs) :
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
cs : array_like
1-d array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-d array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[cs] = pu.as_series([cs])
n = len(cs)
if n == 1:
return cs
else:
c0 = cs[-2]
c1 = cs[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(cs[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0,1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl) :
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0 :
return np.array([off + scl, -scl])
else :
return np.array([off])
def lagfromroots(roots) :
"""
Generate a Laguerre series with the given roots.
Return the array of coefficients for the P-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the Laguerre series coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots, chebfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Laguerre
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the Laguerre basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [lagline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [lagmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = lagmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) | |
import support.classes as classes
import numpy as np
import scipy
from scipy import stats
class LgstReg(classes.Data):
def __init__(self, dinfo):
'''
Given data with X_tr, X_te, y_tr, y_te, we
initialize a model object with loss functions, gradients,
Hessians, etc., as well as an "eval" method which
automatically knows to use the test data.
'''
# Given data info, load up the (X,y) data.
super(LgstReg,self).__init__(dinfo)
# Convert original labels to a one-hot binary representation.
self.nc = self.get_nc() # get the number of classes.
self.C_tr = self.onehot(y=self.y_tr) # one-hot training labels.
self.C_te = self.onehot(y=self.y_te) # one-hot testing labels.
self.n = self.X_tr.shape[0] # number of training obs.
self.d_feat = self.X_tr.shape[1] # number of features.
self.d_para = self.d_feat * (self.nc-1) # number of parameters to set.
def __str__(self):
s_mod = "MODEL: Logistic regression."\
+ "\n" + "Info on data as follows..."
s_data = super(LgstReg,self).__str__()
return s_mod + "\n" + s_data
def w_initialize(self):
# Randomly generated (uniformly on [-1,1].
out = 2 * np.random.random_sample( (self.d_para,1) ) - 1
return out
def get_nc(self):
'''
Get the number of classes.
'''
return np.unique(np.concatenate( (self.y_tr, self.y_te), axis=0)).size
def onehot(self, y):
'''
A function for encoding y into a one-hot vector.
'''
# Inputs:
# y is a (k x 1) array, taking values in {0,1,...,nc-1}.
# NOTE: we say "k" here because it may be the training,
# test, or both training/test labels together.
nc = self.nc
k = y.size
C = np.zeros(nc*k, dtype=np.int16).reshape( (k,nc) )
for i in range(k):
print("y:", y)
j = y[i,0] # assumes y has only one column.
C[i,j] = 1
return C
def eval(self, w):
'''
Evaluate a parameter vector on the test data.
'''
losses = self.l_te(w=w) # logistic reg loss.
# Based on pre-specified decision rule, get classification rate.
y_est = self.classify(w=w, X=self.X_te)
perf = self.class_perf(y_est, self.y_te)
# Specify the loss-based statistics to use here.
rawres = [losses.mean(), losses.std(), perf["rate"]]
# potential extension: can add per-class P/R/F1 if desired.
return rawres
def class_perf(self, y_est, y_true):
'''
Given class label estimates and true values, compute the
fraction of correct classifications made.
'''
# Input:
# y_est and y_true are (k x 1) matrices of labels.
# Output:
# Returns a dictionary with two components, (1) being
# the fraction of correctly classified labels, and
# (2) being a dict of per-label precison/recall/F1
# scores.
# First, get the classification rate.
k = y_est.size
num_correct = (y_est == y_true).sum()
frac_correct = num_correct / k
# Then, get precision/recall for each class.
prec_rec = { i:None for i in range(self.nc) } # initialize
for c in range(self.nc):
idx_c = (y_true == c)
idx_notc = (idx_c == False)
TP = (y_est[idx_c] == c).sum()
FN = idx_c.sum() - TP
FP = (y_est[idx_notc] == c).sum()
TN = idx_notc.sum() - FP
# Precision.
if (TP == 0 and FP == 0):
prec = 0
else:
prec = TP / (TP+FP)
# Recall.
if (TP == 0 and FN == 0):
rec = 0
else:
rec = TP / (TP+FN)
# F1 (harmonic mean of precision and recall).
if (prec == 0 or rec == 0):
f1 = 0
else:
f1 = 2 * prec * rec / (prec + rec)
prec_rec[c] = {"P": prec,
"R": rec,
"F1": f1}
return {"rate": frac_correct,
"PRF1": prec_rec}
def l_imp(self, w, X, C, lam=0):
'''
Implementation of the multi-class logistic regression
loss function.
'''
# Input:
# w is a (d_para x 1) matrix of weights.
# X is a (k x d_feat) matrix of k observations.
# C is a (k x nc) matrix giving a binarized encoding of the
# class labels for each observation; each row a one-hot vector.
# lam is a non-negative regularization parameter.
# NOTE: k can be anything, the training/test sample size.
# Output:
# A vector of length k with losses evaluated at k points.
k = X.shape[0]
# Initialize and populate the activations.
A = np.zeros(k*self.nc).reshape( (self.nc, k) )
A[:-1,:] = np.dot(w.reshape((self.nc-1,self.d_feat)), # reshape w.
np.transpose(X)) # leave last row as zeros.
# Raw activations of all the correct weights.
cvec = np.sum(A*np.transpose(C), axis=0)
# Compute the negative log-likelihoods.
err = np.log(np.sum(np.exp(A), axis=0)) - cvec
# Return the losses (all data points), with penalty if needed.
if (lam > 0):
return err + lam * np.linalg.norm(W)**2
else:
return err
def l_tr(self, w, lam=0):
return self.l_imp(w=w, X=self.X_tr, C=self.C_tr, lam=lam)
def l_te(self, w, lam=0):
return self.l_imp(w=w, X=self.X_te, C=self.C_te, lam=lam)
def g_imp(self, w, X, C, lam=0):
'''
Implementation of the gradient of the loss function used in
multi-class logistic regression.
'''
# Input:
# w is a (d_para x 1) matrix of weights.
# X is a (k x d_feat) matrix of k observations.
# C is a (k x nc) matrix giving a binarized encoding of the
# class labels for each observation; each row a one-hot vector.
# lam is a non-negative regularization parameter.
# NOTE: k can be anything, the training/test sample size.
# Output:
# A (k x d_para) matrix of gradients eval'd at k points.
# Initialize and populate the activations.
k = X.shape[0]
A = np.zeros(k*self.nc).reshape( (self.nc,k) )
A[:-1,:] = np.dot(w.reshape((self.nc-1,self.d_feat)), # reshape w.
np.transpose(X)) # leave last row as zeros.
# Compute the conditional label probabilities.
P = np.exp(A) / np.sum(np.exp(A), axis=0) # (nc x k)
# Initialize a large matrix (k x d_para) to house per-point grads.
G = np.arange(k*self.d_para).reshape( (k,self.d_para) )
for i in range(k):
# A very tall vector (i.e., just one "axis").
G[i,:] = np.kron(a=(P[:-1,i]-C[i,:-1]), b=X[i,:])
# NOTE: carefully removing the last elements.
return G
def g_tr(self, w, lam=0):
return self.g_imp(w=w, X=self.X_tr, C=self.C_tr, lam=lam)
def g_te(self, w, lam=0):
return self.g_imp(w=w, X=self.X_te, C=self.C_te, lam=lam)
def h_imp(self, w, lam=0):
pass
def h_tr(self, w, lam=0):
pass
def h_te(self, w, lam=0):
pass
def classify(self, w, X):
'''
Given learned weights (w) and a matrix of one or
more observations, classify them as {0,...,nc-1}.
'''
# Input:
# w is a (d_para x 1) matrix of weights.
# X is a (k x d_feat) matrix of k observations.
# NOTE: k can be anything, the training/test sample size.
# Output:
# A vector of length k, housing labels in {0,...,nc-1}.
# Get the activations, and then the conditional probabilities.
k = X.shape[0]
A = np.zeros(k*self.nc).reshape( (self.nc,k) )
A[:-1,:] = np.dot(w.reshape((self.nc-1,self.d_feat)), # reshape w.
np.transpose(X)) # leave last row as zeros.
P = np.exp(A) / np.sum(np.exp(A), axis=0) # (nc x k)
# Return the class with the largest prob, given the data.
return np.argmax(P, axis=0).reshape( (k,1) )
class LinReg(classes.Data):
def __init__(self, dinfo):
'''
Given data with X_tr, X_te, y_tr, y_te, we
initialize a model object with loss functions, gradients,
Hessians, etc., as well as an "eval" method which
automatically knows to use the test data.
'''
# Given data info, load it up into memory for use.
super(LinReg,self).__init__(dinfo)
self.n = self.X_tr.shape[0]
self.d = self.X_tr.shape[1]
def __str__(self):
s_mod = "MODEL: Linear regression."\
+ "\n" + "Info on data as follows..."
s_data = super(LinReg,self).__str__()
return s_mod + "\n" + s_data
def w_initialize(self):
# Randomly generated uniformly on [-1,1].
out = 2 * np.random.random_sample((self.d,1)) - 1
return out
def eval(self, w):
'''
Evaluate a parameter vector on the test data.
'''
# Specify the loss to use here.
losses = self.l_te(w=w)
# Specify the loss-based statistics to use here.
rawres = [losses.mean(), losses.std()]
return rawres
def l_imp(self, w, X, y, lam_l1=0, lam_l2=0):
'''
Implementation of (regularized) squared error as
loss function under a linear model.
'''
# Args:
# w is a (d x 1) matrix taking real values.
# X is a (k x d) matrix of n observations.
# y is a (k x 1) matrix taking real values.
# lam_* are parameters | |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DICOM IO connector
This module implements several tools to facilitate the interaction between
a Google Cloud Healthcare DICOM store and a Beam pipeline.
For more details on DICOM store and API:
https://cloud.google.com/healthcare/docs/how-tos/dicom
The DICOM IO connector can be used to search metadata or write DICOM files
to DICOM store.
When used together with Google Pubsub message connector, the
`FormatToQido` PTransform implemented in this module can be used
to convert Pubsub messages to search requests.
Since Traceability is crucial for healthcare
API users, every input or error message will be recorded in the output of
the DICOM IO connector. As a result, every PTransform in this module will
return a PCollection of dict that encodes results and detailed error messages.
Search instance's metadata (QIDO request)
===================================================
DicomSearch() wraps the QIDO request client and supports 3 levels of search.
Users should specify the level by setting the 'search_type' entry in the input
dict. They can also refine the search by adding tags to filter the results using
the 'params' entry. Here is a sample usage:
with Pipeline() as p:
input_dict = p | beam.Create(
[{'project_id': 'abc123', 'type': 'instances',...},
{'project_id': 'dicom_go', 'type': 'series',...}])
results = input_dict | io.gcp.DicomSearch()
results | 'print successful search' >> beam.Map(
lambda x: print(x['result'] if x['success'] else None))
results | 'print failed search' >> beam.Map(
lambda x: print(x['result'] if not x['success'] else None))
In the example above, successful qido search results and error messages for
failed requests are printed. When used in real life, user can choose to filter
those data and output them to wherever they want.
Convert DICOM Pubsub message to Qido search request
===================================================
Healthcare API users might read messages from Pubsub to monitor the store
operations (e.g. new file) in a DICOM storage. Pubsub message encode
DICOM as a web store path as well as instance ids. If users are interested in
getting new instance's metadata, they can use the `FormatToQido` transform
to convert the message into Qido Search dict then use the `DicomSearch`
transform. Here is a sample usage:
pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
p = beam.Pipeline(options=pipeline_options)
pubsub = p | beam.io.ReadStringFromPubsub(subscription='a_dicom_store')
results = pubsub | FormatToQido()
success = results | 'filter message' >> beam.Filter(lambda x: x['success'])
qido_dict = success | 'get qido request' >> beam.Map(lambda x: x['result'])
metadata = qido_dict | DicomSearch()
In the example above, the pipeline is listening to a pubsub topic and waiting
for messages from DICOM API. When a new DICOM file comes into the storage, the
pipeline will receive a pubsub message, convert it to a Qido request dict and
feed it to DicomSearch() PTransform. As a result, users can get the metadata for
every new DICOM file. Note that not every pubsub message received is from DICOM
API, so we to filter the results first.
Store a DICOM file in a DICOM storage
===================================================
UploadToDicomStore() wraps store request API and users can use it to send a
DICOM file to a DICOM store. It supports two types of input: 1.file data in
byte[] 2.fileio object. Users should set the 'input_type' when initialzing
this PTransform. Here are the examples:
with Pipeline() as p:
input_dict = {'project_id': 'abc123', 'type': 'instances',...}
path = "gcs://bucketname/something/a.dcm"
match = p | fileio.MatchFiles(path)
fileio_obj = match | fileio.ReadAll()
results = fileio_obj | UploadToDicomStore(input_dict, 'fileio')
with Pipeline() as p:
input_dict = {'project_id': 'abc123', 'type': 'instances',...}
f = open("abc.dcm", "rb")
dcm_file = f.read()
byte_file = p | 'create byte file' >> beam.Create([dcm_file])
results = byte_file | UploadToDicomStore(input_dict, 'bytes')
The first example uses a PCollection of fileio objects as input.
UploadToDicomStore will read DICOM files from the objects and send them
to a DICOM storage.
The second example uses a PCollection of byte[] as input. UploadToDicomStore
will directly send those DICOM files to a DICOM storage.
Users can also get the operation results in the output PCollection if they want
to handle the failed store requests.
"""
# pytype: skip-file
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
import apache_beam as beam
from apache_beam.io.gcp.dicomclient import DicomApiHttpClient
from apache_beam.transforms import PTransform
class DicomSearch(PTransform):
"""A PTransform used for retrieving DICOM instance metadata from Google
Cloud DICOM store. It takes a PCollection of dicts as input and return
a PCollection of dict as results:
INPUT:
The input dict represents DICOM web path parameters, which has the following
string keys and values:
{
'project_id': str,
'region': str,
'dataset_id': str,
'dicom_store_id': str,
'search_type': str,
'params': dict(str,str) (Optional),
}
Key-value pairs:
project_id: Id of the project in which the DICOM store is
located. (Required)
region: Region where the DICOM store resides. (Required)
dataset_id: Id of the dataset where DICOM store belongs to. (Required)
dicom_store_id: Id of the dicom store. (Required)
search_type: Which type of search it is, could only be one of the three
values: 'instances', 'series', or 'studies'. (Required)
params: A dict of str:str pairs used to refine QIDO search. (Optional)
Supported tags in three categories:
1.Studies:
* StudyInstanceUID,
* PatientName,
* PatientID,
* AccessionNumber,
* ReferringPhysicianName,
* StudyDate,
2.Series: all study level search terms and
* SeriesInstanceUID,
* Modality,
3.Instances: all study/series level search terms and
* SOPInstanceUID,
e.g. {"StudyInstanceUID":"1","SeriesInstanceUID":"2"}
OUTPUT:
The output dict wraps results as well as error messages:
{
'result': a list of dicts in JSON style.
'success': boolean value telling whether the operation is successful.
'input': detail ids and dicomweb path for this retrieval.
'status': status code from the server, used as error message.
}
"""
def __init__(
self, buffer_size=8, max_workers=5, client=None, credential=None):
"""Initializes DicomSearch.
Args:
buffer_size: # type: Int. Size of the request buffer.
max_workers: # type: Int. Maximum number of threads a worker can
create. If it is set to one, all the request will be processed
sequentially in a worker.
client: # type: object. If it is specified, all the Api calls will
made by this client instead of the default one (DicomApiHttpClient).
credential: # type: Google credential object, if it is specified, the
Http client will use it to create sessions instead of the default.
"""
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client or DicomApiHttpClient()
self.credential = credential
def expand(self, pcoll):
return pcoll | beam.ParDo(
_QidoReadFn(
self.buffer_size, self.max_workers, self.client, self.credential))
class _QidoReadFn(beam.DoFn):
"""A DoFn for executing every qido query request."""
def __init__(self, buffer_size, max_workers, client, credential=None):
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client
self.credential = credential
def start_bundle(self):
self.buffer = []
def finish_bundle(self):
for item in self._flush():
yield item
def validate_element(self, element):
# Check if all required keys present.
required_keys = [
'project_id', 'region', 'dataset_id', 'dicom_store_id', 'search_type'
]
for key in required_keys:
if key not in element:
error_message = 'Must have %s in the dict.' % (key)
return False, error_message
# Check if return type is correct.
if element['search_type'] in ['instances', "studies", "series"]:
return True, None
else:
error_message = (
'Search type can only be "studies", '
'"instances" or "series"')
return False, error_message
def process(
self,
element,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
# Check if the element is valid
valid, error_message = self.validate_element(element)
if valid:
self.buffer.append((element, window, timestamp))
if len(self.buffer) >= self.buffer_size:
for item in self._flush():
yield item
else:
# Return this when the input dict dose not meet the requirements
out = {}
out['result'] = []
out['status'] = error_message
out['input'] = element
out['success'] = False
yield out
def make_request(self, element):
# Sending Qido request to DICOM Api
project_id = element['project_id']
region = element['region']
dataset_id = element['dataset_id']
dicom_store_id = element['dicom_store_id']
search_type = element['search_type']
params = element['params'] if 'params' in element else None
# Call qido search http | |
<gh_stars>1-10
"""
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
from collections import defaultdict
from importlib import import_module
from bson.objectid import ObjectId
from ccx_keys.locator import CCXBlockUsageLocator, CCXLocator
from mongodb_proxy import autoretry_read
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import (
BlockUsageLocator,
CourseLocator,
DefinitionLocator,
LibraryLocator,
LocalId,
)
from path import Path as path
from pytz import UTC
from xblock.core import XBlock
from xblock.fields import Reference, ReferenceList, ReferenceValueDict, Scope
from xmodule.assetstore import AssetMetadata
from xmodule.course_module import CourseSummary
from xmodule.error_module import ErrorBlock
from xmodule.errortracker import null_error_tracker
from xmodule.library_content_module import LibrarySummary
from xmodule.modulestore import (
BlockData,
BulkOperationsMixin,
BulkOpsRecord,
ModuleStoreEnum,
ModuleStoreWriteBase,
SortedAssetList,
inheritance
)
from xmodule.modulestore.exceptions import (
DuplicateCourseError,
DuplicateItemError,
InsufficientSpecificationError,
MultipleCourseBlocksFound,
MultipleLibraryBlocksFound,
VersionConflictError
)
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.modulestore.split_mongo.mongo_connection import DuplicateKeyError, MongoConnection
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.partitions.partitions_service import PartitionService
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
class SplitBulkWriteRecord(BulkOpsRecord): # lint-amnesty, pylint: disable=missing-class-docstring
def __init__(self):
super().__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
self.course_key = None
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return list(self.index.get('versions', {}).keys())
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return "SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active_count,
self.initial_index,
self.index,
self.structures,
self.structures_in_db,
)
class SplitBulkWriteMixin(BulkOperationsMixin):
"""
This implements the :meth:`bulk_operations` modulestore semantics for the :class:`SplitMongoModuleStore`.
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface, and then exposes a set of methods
for interacting with course_indexes and structures that can be used by :class:`SplitMongoModuleStore`.
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values to ``self.mongo_connection`` when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
_bulk_ops_record_type = SplitBulkWriteRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.SplitBulkWriteRecord` for this course.
"""
# handle split specific things and defer to super otherwise
if course_key is None:
return self._bulk_ops_record_type()
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(f'{course_key!r} is not a CourseLocator or LibraryLocator')
# handle version_guid based retrieval locally
if course_key.org is None or course_key.course is None or course_key.run is None:
return self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
# handle ignore case and general use
return super()._get_bulk_ops_record(
course_key.replace(branch=None, version_guid=None), ignore_case
)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(f'{course_key!r} is not a CourseLocator or LibraryLocator')
if course_key.org and course_key.course and course_key.run:
del self._active_bulk_ops.records[course_key.replace(branch=None, version_guid=None)]
else:
del self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
def _start_outermost_bulk_operation(self, bulk_write_record, course_key, ignore_case=False): # lint-amnesty, pylint: disable=arguments-differ
"""
Begin a bulk write operation on course_key.
"""
bulk_write_record.initial_index = self.db_connection.get_course_index(course_key, ignore_case=ignore_case)
# Ensure that any edits to the index don't pollute the initial_index
bulk_write_record.index = copy.deepcopy(bulk_write_record.initial_index)
bulk_write_record.course_key = course_key
def _end_outermost_bulk_operation(self, bulk_write_record, structure_key): # lint-amnesty, pylint: disable=arguments-differ
"""
End the active bulk write operation on structure_key (course or library key).
"""
dirty = False
# If the content is dirty, then update the database
for _id in bulk_write_record.structures.keys() - bulk_write_record.structures_in_db:
dirty = True
try:
self.db_connection.insert_structure(bulk_write_record.structures[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this structure inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate structure %s", _id)
for _id in bulk_write_record.definitions.keys() - bulk_write_record.definitions_in_db:
dirty = True
try:
self.db_connection.insert_definition(bulk_write_record.definitions[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this definition inside this bulk operation, and thus
# didn't realize that it | |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_bacnet', [dirname(__file__)])
except ImportError:
import _bacnet
return _bacnet
if fp is not None:
try:
_mod = imp.load_module('_bacnet', fp, pathname, description)
finally:
fp.close()
return _mod
_bacnet = swig_import_helper()
del swig_import_helper
else:
import _bacnet
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
_bacnet.PROP_ACKED_TRANSITIONS_swigconstant(_bacnet)
PROP_ACKED_TRANSITIONS = _bacnet.PROP_ACKED_TRANSITIONS
_bacnet.PROP_ACK_REQUIRED_swigconstant(_bacnet)
PROP_ACK_REQUIRED = _bacnet.PROP_ACK_REQUIRED
_bacnet.PROP_ACTION_swigconstant(_bacnet)
PROP_ACTION = _bacnet.PROP_ACTION
_bacnet.PROP_ACTION_TEXT_swigconstant(_bacnet)
PROP_ACTION_TEXT = _bacnet.PROP_ACTION_TEXT
_bacnet.PROP_ACTIVE_TEXT_swigconstant(_bacnet)
PROP_ACTIVE_TEXT = _bacnet.PROP_ACTIVE_TEXT
_bacnet.PROP_ACTIVE_VT_SESSIONS_swigconstant(_bacnet)
PROP_ACTIVE_VT_SESSIONS = _bacnet.PROP_ACTIVE_VT_SESSIONS
_bacnet.PROP_ALARM_VALUE_swigconstant(_bacnet)
PROP_ALARM_VALUE = _bacnet.PROP_ALARM_VALUE
_bacnet.PROP_ALARM_VALUES_swigconstant(_bacnet)
PROP_ALARM_VALUES = _bacnet.PROP_ALARM_VALUES
_bacnet.PROP_ALL_swigconstant(_bacnet)
PROP_ALL = _bacnet.PROP_ALL
_bacnet.PROP_ALL_WRITES_SUCCESSFUL_swigconstant(_bacnet)
PROP_ALL_WRITES_SUCCESSFUL = _bacnet.PROP_ALL_WRITES_SUCCESSFUL
_bacnet.PROP_APDU_SEGMENT_TIMEOUT_swigconstant(_bacnet)
PROP_APDU_SEGMENT_TIMEOUT = _bacnet.PROP_APDU_SEGMENT_TIMEOUT
_bacnet.PROP_APDU_TIMEOUT_swigconstant(_bacnet)
PROP_APDU_TIMEOUT = _bacnet.PROP_APDU_TIMEOUT
_bacnet.PROP_APPLICATION_SOFTWARE_VERSION_swigconstant(_bacnet)
PROP_APPLICATION_SOFTWARE_VERSION = _bacnet.PROP_APPLICATION_SOFTWARE_VERSION
_bacnet.PROP_ARCHIVE_swigconstant(_bacnet)
PROP_ARCHIVE = _bacnet.PROP_ARCHIVE
_bacnet.PROP_BIAS_swigconstant(_bacnet)
PROP_BIAS = _bacnet.PROP_BIAS
_bacnet.PROP_CHANGE_OF_STATE_COUNT_swigconstant(_bacnet)
PROP_CHANGE_OF_STATE_COUNT = _bacnet.PROP_CHANGE_OF_STATE_COUNT
_bacnet.PROP_CHANGE_OF_STATE_TIME_swigconstant(_bacnet)
PROP_CHANGE_OF_STATE_TIME = _bacnet.PROP_CHANGE_OF_STATE_TIME
_bacnet.PROP_NOTIFICATION_CLASS_swigconstant(_bacnet)
PROP_NOTIFICATION_CLASS = _bacnet.PROP_NOTIFICATION_CLASS
_bacnet.PROP_BLANK_1_swigconstant(_bacnet)
PROP_BLANK_1 = _bacnet.PROP_BLANK_1
_bacnet.PROP_CONTROLLED_VARIABLE_REFERENCE_swigconstant(_bacnet)
PROP_CONTROLLED_VARIABLE_REFERENCE = _bacnet.PROP_CONTROLLED_VARIABLE_REFERENCE
_bacnet.PROP_CONTROLLED_VARIABLE_UNITS_swigconstant(_bacnet)
PROP_CONTROLLED_VARIABLE_UNITS = _bacnet.PROP_CONTROLLED_VARIABLE_UNITS
_bacnet.PROP_CONTROLLED_VARIABLE_VALUE_swigconstant(_bacnet)
PROP_CONTROLLED_VARIABLE_VALUE = _bacnet.PROP_CONTROLLED_VARIABLE_VALUE
_bacnet.PROP_COV_INCREMENT_swigconstant(_bacnet)
PROP_COV_INCREMENT = _bacnet.PROP_COV_INCREMENT
_bacnet.PROP_DATE_LIST_swigconstant(_bacnet)
PROP_DATE_LIST = _bacnet.PROP_DATE_LIST
_bacnet.PROP_DAYLIGHT_SAVINGS_STATUS_swigconstant(_bacnet)
PROP_DAYLIGHT_SAVINGS_STATUS = _bacnet.PROP_DAYLIGHT_SAVINGS_STATUS
_bacnet.PROP_DEADBAND_swigconstant(_bacnet)
PROP_DEADBAND = _bacnet.PROP_DEADBAND
_bacnet.PROP_DERIVATIVE_CONSTANT_swigconstant(_bacnet)
PROP_DERIVATIVE_CONSTANT = _bacnet.PROP_DERIVATIVE_CONSTANT
_bacnet.PROP_DERIVATIVE_CONSTANT_UNITS_swigconstant(_bacnet)
PROP_DERIVATIVE_CONSTANT_UNITS = _bacnet.PROP_DERIVATIVE_CONSTANT_UNITS
_bacnet.PROP_DESCRIPTION_swigconstant(_bacnet)
PROP_DESCRIPTION = _bacnet.PROP_DESCRIPTION
_bacnet.PROP_DESCRIPTION_OF_HALT_swigconstant(_bacnet)
PROP_DESCRIPTION_OF_HALT = _bacnet.PROP_DESCRIPTION_OF_HALT
_bacnet.PROP_DEVICE_ADDRESS_BINDING_swigconstant(_bacnet)
PROP_DEVICE_ADDRESS_BINDING = _bacnet.PROP_DEVICE_ADDRESS_BINDING
_bacnet.PROP_DEVICE_TYPE_swigconstant(_bacnet)
PROP_DEVICE_TYPE = _bacnet.PROP_DEVICE_TYPE
_bacnet.PROP_EFFECTIVE_PERIOD_swigconstant(_bacnet)
PROP_EFFECTIVE_PERIOD = _bacnet.PROP_EFFECTIVE_PERIOD
_bacnet.PROP_ELAPSED_ACTIVE_TIME_swigconstant(_bacnet)
PROP_ELAPSED_ACTIVE_TIME = _bacnet.PROP_ELAPSED_ACTIVE_TIME
_bacnet.PROP_ERROR_LIMIT_swigconstant(_bacnet)
PROP_ERROR_LIMIT = _bacnet.PROP_ERROR_LIMIT
_bacnet.PROP_EVENT_ENABLE_swigconstant(_bacnet)
PROP_EVENT_ENABLE = _bacnet.PROP_EVENT_ENABLE
_bacnet.PROP_EVENT_STATE_swigconstant(_bacnet)
PROP_EVENT_STATE = _bacnet.PROP_EVENT_STATE
_bacnet.PROP_EVENT_TYPE_swigconstant(_bacnet)
PROP_EVENT_TYPE = _bacnet.PROP_EVENT_TYPE
_bacnet.PROP_EXCEPTION_SCHEDULE_swigconstant(_bacnet)
PROP_EXCEPTION_SCHEDULE = _bacnet.PROP_EXCEPTION_SCHEDULE
_bacnet.PROP_FAULT_VALUES_swigconstant(_bacnet)
PROP_FAULT_VALUES = _bacnet.PROP_FAULT_VALUES
_bacnet.PROP_FEEDBACK_VALUE_swigconstant(_bacnet)
PROP_FEEDBACK_VALUE = _bacnet.PROP_FEEDBACK_VALUE
_bacnet.PROP_FILE_ACCESS_METHOD_swigconstant(_bacnet)
PROP_FILE_ACCESS_METHOD = _bacnet.PROP_FILE_ACCESS_METHOD
_bacnet.PROP_FILE_SIZE_swigconstant(_bacnet)
PROP_FILE_SIZE = _bacnet.PROP_FILE_SIZE
_bacnet.PROP_FILE_TYPE_swigconstant(_bacnet)
PROP_FILE_TYPE = _bacnet.PROP_FILE_TYPE
_bacnet.PROP_FIRMWARE_REVISION_swigconstant(_bacnet)
PROP_FIRMWARE_REVISION = _bacnet.PROP_FIRMWARE_REVISION
_bacnet.PROP_HIGH_LIMIT_swigconstant(_bacnet)
PROP_HIGH_LIMIT = _bacnet.PROP_HIGH_LIMIT
_bacnet.PROP_INACTIVE_TEXT_swigconstant(_bacnet)
PROP_INACTIVE_TEXT = _bacnet.PROP_INACTIVE_TEXT
_bacnet.PROP_IN_PROCESS_swigconstant(_bacnet)
PROP_IN_PROCESS = _bacnet.PROP_IN_PROCESS
_bacnet.PROP_INSTANCE_OF_swigconstant(_bacnet)
PROP_INSTANCE_OF = _bacnet.PROP_INSTANCE_OF
_bacnet.PROP_INTEGRAL_CONSTANT_swigconstant(_bacnet)
PROP_INTEGRAL_CONSTANT = _bacnet.PROP_INTEGRAL_CONSTANT
_bacnet.PROP_INTEGRAL_CONSTANT_UNITS_swigconstant(_bacnet)
PROP_INTEGRAL_CONSTANT_UNITS = _bacnet.PROP_INTEGRAL_CONSTANT_UNITS
_bacnet.PROP_ISSUE_CONFIRMED_NOTIFICATIONS_swigconstant(_bacnet)
PROP_ISSUE_CONFIRMED_NOTIFICATIONS = _bacnet.PROP_ISSUE_CONFIRMED_NOTIFICATIONS
_bacnet.PROP_LIMIT_ENABLE_swigconstant(_bacnet)
PROP_LIMIT_ENABLE = _bacnet.PROP_LIMIT_ENABLE
_bacnet.PROP_LIST_OF_GROUP_MEMBERS_swigconstant(_bacnet)
PROP_LIST_OF_GROUP_MEMBERS = _bacnet.PROP_LIST_OF_GROUP_MEMBERS
_bacnet.PROP_LIST_OF_OBJECT_PROPERTY_REFERENCES_swigconstant(_bacnet)
PROP_LIST_OF_OBJECT_PROPERTY_REFERENCES = _bacnet.PROP_LIST_OF_OBJECT_PROPERTY_REFERENCES
_bacnet.PROP_LIST_OF_SESSION_KEYS_swigconstant(_bacnet)
PROP_LIST_OF_SESSION_KEYS = _bacnet.PROP_LIST_OF_SESSION_KEYS
_bacnet.PROP_LOCAL_DATE_swigconstant(_bacnet)
PROP_LOCAL_DATE = _bacnet.PROP_LOCAL_DATE
_bacnet.PROP_LOCAL_TIME_swigconstant(_bacnet)
PROP_LOCAL_TIME = _bacnet.PROP_LOCAL_TIME
_bacnet.PROP_LOCATION_swigconstant(_bacnet)
PROP_LOCATION = _bacnet.PROP_LOCATION
_bacnet.PROP_LOW_LIMIT_swigconstant(_bacnet)
PROP_LOW_LIMIT = _bacnet.PROP_LOW_LIMIT
_bacnet.PROP_MANIPULATED_VARIABLE_REFERENCE_swigconstant(_bacnet)
PROP_MANIPULATED_VARIABLE_REFERENCE = _bacnet.PROP_MANIPULATED_VARIABLE_REFERENCE
_bacnet.PROP_MAXIMUM_OUTPUT_swigconstant(_bacnet)
PROP_MAXIMUM_OUTPUT = _bacnet.PROP_MAXIMUM_OUTPUT
_bacnet.PROP_MAX_APDU_LENGTH_ACCEPTED_swigconstant(_bacnet)
PROP_MAX_APDU_LENGTH_ACCEPTED = _bacnet.PROP_MAX_APDU_LENGTH_ACCEPTED
_bacnet.PROP_MAX_INFO_FRAMES_swigconstant(_bacnet)
PROP_MAX_INFO_FRAMES = _bacnet.PROP_MAX_INFO_FRAMES
_bacnet.PROP_MAX_MASTER_swigconstant(_bacnet)
PROP_MAX_MASTER = _bacnet.PROP_MAX_MASTER
_bacnet.PROP_MAX_PRES_VALUE_swigconstant(_bacnet)
PROP_MAX_PRES_VALUE = _bacnet.PROP_MAX_PRES_VALUE
_bacnet.PROP_MINIMUM_OFF_TIME_swigconstant(_bacnet)
PROP_MINIMUM_OFF_TIME = _bacnet.PROP_MINIMUM_OFF_TIME
_bacnet.PROP_MINIMUM_ON_TIME_swigconstant(_bacnet)
PROP_MINIMUM_ON_TIME = _bacnet.PROP_MINIMUM_ON_TIME
_bacnet.PROP_MINIMUM_OUTPUT_swigconstant(_bacnet)
PROP_MINIMUM_OUTPUT = _bacnet.PROP_MINIMUM_OUTPUT
_bacnet.PROP_MIN_PRES_VALUE_swigconstant(_bacnet)
PROP_MIN_PRES_VALUE = _bacnet.PROP_MIN_PRES_VALUE
_bacnet.PROP_MODEL_NAME_swigconstant(_bacnet)
PROP_MODEL_NAME = _bacnet.PROP_MODEL_NAME
_bacnet.PROP_MODIFICATION_DATE_swigconstant(_bacnet)
PROP_MODIFICATION_DATE = _bacnet.PROP_MODIFICATION_DATE
_bacnet.PROP_NOTIFY_TYPE_swigconstant(_bacnet)
PROP_NOTIFY_TYPE = _bacnet.PROP_NOTIFY_TYPE
_bacnet.PROP_NUMBER_OF_APDU_RETRIES_swigconstant(_bacnet)
PROP_NUMBER_OF_APDU_RETRIES = _bacnet.PROP_NUMBER_OF_APDU_RETRIES
_bacnet.PROP_NUMBER_OF_STATES_swigconstant(_bacnet)
PROP_NUMBER_OF_STATES = _bacnet.PROP_NUMBER_OF_STATES
_bacnet.PROP_OBJECT_IDENTIFIER_swigconstant(_bacnet)
PROP_OBJECT_IDENTIFIER = _bacnet.PROP_OBJECT_IDENTIFIER
_bacnet.PROP_OBJECT_LIST_swigconstant(_bacnet)
PROP_OBJECT_LIST = _bacnet.PROP_OBJECT_LIST
_bacnet.PROP_OBJECT_NAME_swigconstant(_bacnet)
PROP_OBJECT_NAME = _bacnet.PROP_OBJECT_NAME
_bacnet.PROP_OBJECT_PROPERTY_REFERENCE_swigconstant(_bacnet)
PROP_OBJECT_PROPERTY_REFERENCE = _bacnet.PROP_OBJECT_PROPERTY_REFERENCE
_bacnet.PROP_OBJECT_TYPE_swigconstant(_bacnet)
PROP_OBJECT_TYPE = _bacnet.PROP_OBJECT_TYPE
_bacnet.PROP_OPTIONAL_swigconstant(_bacnet)
PROP_OPTIONAL = _bacnet.PROP_OPTIONAL
_bacnet.PROP_OUT_OF_SERVICE_swigconstant(_bacnet)
PROP_OUT_OF_SERVICE = _bacnet.PROP_OUT_OF_SERVICE
_bacnet.PROP_OUTPUT_UNITS_swigconstant(_bacnet)
PROP_OUTPUT_UNITS = _bacnet.PROP_OUTPUT_UNITS
_bacnet.PROP_EVENT_PARAMETERS_swigconstant(_bacnet)
PROP_EVENT_PARAMETERS = _bacnet.PROP_EVENT_PARAMETERS
_bacnet.PROP_POLARITY_swigconstant(_bacnet)
PROP_POLARITY = _bacnet.PROP_POLARITY
_bacnet.PROP_PRESENT_VALUE_swigconstant(_bacnet)
PROP_PRESENT_VALUE = _bacnet.PROP_PRESENT_VALUE
_bacnet.PROP_PRIORITY_swigconstant(_bacnet)
PROP_PRIORITY = _bacnet.PROP_PRIORITY
_bacnet.PROP_PRIORITY_ARRAY_swigconstant(_bacnet)
PROP_PRIORITY_ARRAY = _bacnet.PROP_PRIORITY_ARRAY
_bacnet.PROP_PRIORITY_FOR_WRITING_swigconstant(_bacnet)
PROP_PRIORITY_FOR_WRITING = _bacnet.PROP_PRIORITY_FOR_WRITING
_bacnet.PROP_PROCESS_IDENTIFIER_swigconstant(_bacnet)
PROP_PROCESS_IDENTIFIER = _bacnet.PROP_PROCESS_IDENTIFIER
_bacnet.PROP_PROGRAM_CHANGE_swigconstant(_bacnet)
PROP_PROGRAM_CHANGE = _bacnet.PROP_PROGRAM_CHANGE
_bacnet.PROP_PROGRAM_LOCATION_swigconstant(_bacnet)
PROP_PROGRAM_LOCATION = _bacnet.PROP_PROGRAM_LOCATION
_bacnet.PROP_PROGRAM_STATE_swigconstant(_bacnet)
PROP_PROGRAM_STATE = _bacnet.PROP_PROGRAM_STATE
_bacnet.PROP_PROPORTIONAL_CONSTANT_swigconstant(_bacnet)
PROP_PROPORTIONAL_CONSTANT = _bacnet.PROP_PROPORTIONAL_CONSTANT
_bacnet.PROP_PROPORTIONAL_CONSTANT_UNITS_swigconstant(_bacnet)
PROP_PROPORTIONAL_CONSTANT_UNITS = _bacnet.PROP_PROPORTIONAL_CONSTANT_UNITS
_bacnet.PROP_PROTOCOL_CONFORMANCE_CLASS_swigconstant(_bacnet)
PROP_PROTOCOL_CONFORMANCE_CLASS = _bacnet.PROP_PROTOCOL_CONFORMANCE_CLASS
_bacnet.PROP_PROTOCOL_OBJECT_TYPES_SUPPORTED_swigconstant(_bacnet)
PROP_PROTOCOL_OBJECT_TYPES_SUPPORTED = _bacnet.PROP_PROTOCOL_OBJECT_TYPES_SUPPORTED
_bacnet.PROP_PROTOCOL_SERVICES_SUPPORTED_swigconstant(_bacnet)
PROP_PROTOCOL_SERVICES_SUPPORTED = _bacnet.PROP_PROTOCOL_SERVICES_SUPPORTED
_bacnet.PROP_PROTOCOL_VERSION_swigconstant(_bacnet)
PROP_PROTOCOL_VERSION = _bacnet.PROP_PROTOCOL_VERSION
_bacnet.PROP_READ_ONLY_swigconstant(_bacnet)
PROP_READ_ONLY = _bacnet.PROP_READ_ONLY
_bacnet.PROP_REASON_FOR_HALT_swigconstant(_bacnet)
PROP_REASON_FOR_HALT = _bacnet.PROP_REASON_FOR_HALT
_bacnet.PROP_RECIPIENT_swigconstant(_bacnet)
PROP_RECIPIENT = _bacnet.PROP_RECIPIENT
_bacnet.PROP_RECIPIENT_LIST_swigconstant(_bacnet)
PROP_RECIPIENT_LIST = _bacnet.PROP_RECIPIENT_LIST
_bacnet.PROP_RELIABILITY_swigconstant(_bacnet)
PROP_RELIABILITY = _bacnet.PROP_RELIABILITY
_bacnet.PROP_RELINQUISH_DEFAULT_swigconstant(_bacnet)
PROP_RELINQUISH_DEFAULT = _bacnet.PROP_RELINQUISH_DEFAULT
_bacnet.PROP_REQUIRED_swigconstant(_bacnet)
PROP_REQUIRED = _bacnet.PROP_REQUIRED
_bacnet.PROP_RESOLUTION_swigconstant(_bacnet)
PROP_RESOLUTION = _bacnet.PROP_RESOLUTION
_bacnet.PROP_SEGMENTATION_SUPPORTED_swigconstant(_bacnet)
PROP_SEGMENTATION_SUPPORTED = _bacnet.PROP_SEGMENTATION_SUPPORTED
_bacnet.PROP_SETPOINT_swigconstant(_bacnet)
PROP_SETPOINT = _bacnet.PROP_SETPOINT
_bacnet.PROP_SETPOINT_REFERENCE_swigconstant(_bacnet)
PROP_SETPOINT_REFERENCE = _bacnet.PROP_SETPOINT_REFERENCE
_bacnet.PROP_STATE_TEXT_swigconstant(_bacnet)
PROP_STATE_TEXT = _bacnet.PROP_STATE_TEXT
_bacnet.PROP_STATUS_FLAGS_swigconstant(_bacnet)
PROP_STATUS_FLAGS = _bacnet.PROP_STATUS_FLAGS
_bacnet.PROP_SYSTEM_STATUS_swigconstant(_bacnet)
PROP_SYSTEM_STATUS = _bacnet.PROP_SYSTEM_STATUS
_bacnet.PROP_TIME_DELAY_swigconstant(_bacnet)
PROP_TIME_DELAY = _bacnet.PROP_TIME_DELAY
_bacnet.PROP_TIME_OF_ACTIVE_TIME_RESET_swigconstant(_bacnet)
PROP_TIME_OF_ACTIVE_TIME_RESET = _bacnet.PROP_TIME_OF_ACTIVE_TIME_RESET
_bacnet.PROP_TIME_OF_STATE_COUNT_RESET_swigconstant(_bacnet)
PROP_TIME_OF_STATE_COUNT_RESET = _bacnet.PROP_TIME_OF_STATE_COUNT_RESET
_bacnet.PROP_TIME_SYNCHRONIZATION_RECIPIENTS_swigconstant(_bacnet)
PROP_TIME_SYNCHRONIZATION_RECIPIENTS = _bacnet.PROP_TIME_SYNCHRONIZATION_RECIPIENTS
_bacnet.PROP_UNITS_swigconstant(_bacnet)
PROP_UNITS = _bacnet.PROP_UNITS
_bacnet.PROP_UPDATE_INTERVAL_swigconstant(_bacnet)
PROP_UPDATE_INTERVAL = _bacnet.PROP_UPDATE_INTERVAL
_bacnet.PROP_UTC_OFFSET_swigconstant(_bacnet)
PROP_UTC_OFFSET = _bacnet.PROP_UTC_OFFSET
_bacnet.PROP_VENDOR_IDENTIFIER_swigconstant(_bacnet)
PROP_VENDOR_IDENTIFIER = _bacnet.PROP_VENDOR_IDENTIFIER
_bacnet.PROP_VENDOR_NAME_swigconstant(_bacnet)
PROP_VENDOR_NAME = _bacnet.PROP_VENDOR_NAME
_bacnet.PROP_VT_CLASSES_SUPPORTED_swigconstant(_bacnet)
PROP_VT_CLASSES_SUPPORTED = _bacnet.PROP_VT_CLASSES_SUPPORTED
_bacnet.PROP_WEEKLY_SCHEDULE_swigconstant(_bacnet)
PROP_WEEKLY_SCHEDULE = _bacnet.PROP_WEEKLY_SCHEDULE
_bacnet.PROP_ATTEMPTED_SAMPLES_swigconstant(_bacnet)
PROP_ATTEMPTED_SAMPLES = _bacnet.PROP_ATTEMPTED_SAMPLES
_bacnet.PROP_AVERAGE_VALUE_swigconstant(_bacnet)
PROP_AVERAGE_VALUE = _bacnet.PROP_AVERAGE_VALUE
_bacnet.PROP_BUFFER_SIZE_swigconstant(_bacnet)
PROP_BUFFER_SIZE = _bacnet.PROP_BUFFER_SIZE
_bacnet.PROP_CLIENT_COV_INCREMENT_swigconstant(_bacnet)
PROP_CLIENT_COV_INCREMENT = _bacnet.PROP_CLIENT_COV_INCREMENT
_bacnet.PROP_COV_RESUBSCRIPTION_INTERVAL_swigconstant(_bacnet)
PROP_COV_RESUBSCRIPTION_INTERVAL = _bacnet.PROP_COV_RESUBSCRIPTION_INTERVAL
_bacnet.PROP_CURRENT_NOTIFY_TIME_swigconstant(_bacnet)
PROP_CURRENT_NOTIFY_TIME = _bacnet.PROP_CURRENT_NOTIFY_TIME
_bacnet.PROP_EVENT_TIME_STAMPS_swigconstant(_bacnet)
PROP_EVENT_TIME_STAMPS = _bacnet.PROP_EVENT_TIME_STAMPS
_bacnet.PROP_LOG_BUFFER_swigconstant(_bacnet)
PROP_LOG_BUFFER = _bacnet.PROP_LOG_BUFFER
_bacnet.PROP_LOG_DEVICE_OBJECT_PROPERTY_swigconstant(_bacnet)
PROP_LOG_DEVICE_OBJECT_PROPERTY = _bacnet.PROP_LOG_DEVICE_OBJECT_PROPERTY
_bacnet.PROP_ENABLE_swigconstant(_bacnet)
PROP_ENABLE = _bacnet.PROP_ENABLE
_bacnet.PROP_LOG_INTERVAL_swigconstant(_bacnet)
PROP_LOG_INTERVAL = _bacnet.PROP_LOG_INTERVAL
_bacnet.PROP_MAXIMUM_VALUE_swigconstant(_bacnet)
PROP_MAXIMUM_VALUE = _bacnet.PROP_MAXIMUM_VALUE
_bacnet.PROP_MINIMUM_VALUE_swigconstant(_bacnet)
PROP_MINIMUM_VALUE = _bacnet.PROP_MINIMUM_VALUE
_bacnet.PROP_NOTIFICATION_THRESHOLD_swigconstant(_bacnet)
PROP_NOTIFICATION_THRESHOLD = _bacnet.PROP_NOTIFICATION_THRESHOLD
_bacnet.PROP_PREVIOUS_NOTIFY_TIME_swigconstant(_bacnet)
PROP_PREVIOUS_NOTIFY_TIME = _bacnet.PROP_PREVIOUS_NOTIFY_TIME
_bacnet.PROP_PROTOCOL_REVISION_swigconstant(_bacnet)
PROP_PROTOCOL_REVISION = _bacnet.PROP_PROTOCOL_REVISION
_bacnet.PROP_RECORDS_SINCE_NOTIFICATION_swigconstant(_bacnet)
PROP_RECORDS_SINCE_NOTIFICATION = _bacnet.PROP_RECORDS_SINCE_NOTIFICATION
_bacnet.PROP_RECORD_COUNT_swigconstant(_bacnet)
PROP_RECORD_COUNT = _bacnet.PROP_RECORD_COUNT
_bacnet.PROP_START_TIME_swigconstant(_bacnet)
PROP_START_TIME = _bacnet.PROP_START_TIME
_bacnet.PROP_STOP_TIME_swigconstant(_bacnet)
PROP_STOP_TIME = _bacnet.PROP_STOP_TIME
_bacnet.PROP_STOP_WHEN_FULL_swigconstant(_bacnet)
PROP_STOP_WHEN_FULL = _bacnet.PROP_STOP_WHEN_FULL
_bacnet.PROP_TOTAL_RECORD_COUNT_swigconstant(_bacnet)
PROP_TOTAL_RECORD_COUNT = _bacnet.PROP_TOTAL_RECORD_COUNT
_bacnet.PROP_VALID_SAMPLES_swigconstant(_bacnet)
PROP_VALID_SAMPLES = _bacnet.PROP_VALID_SAMPLES
_bacnet.PROP_WINDOW_INTERVAL_swigconstant(_bacnet)
PROP_WINDOW_INTERVAL = _bacnet.PROP_WINDOW_INTERVAL
_bacnet.PROP_WINDOW_SAMPLES_swigconstant(_bacnet)
PROP_WINDOW_SAMPLES = _bacnet.PROP_WINDOW_SAMPLES
_bacnet.PROP_MAXIMUM_VALUE_TIMESTAMP_swigconstant(_bacnet)
PROP_MAXIMUM_VALUE_TIMESTAMP = _bacnet.PROP_MAXIMUM_VALUE_TIMESTAMP
_bacnet.PROP_MINIMUM_VALUE_TIMESTAMP_swigconstant(_bacnet)
PROP_MINIMUM_VALUE_TIMESTAMP = _bacnet.PROP_MINIMUM_VALUE_TIMESTAMP
_bacnet.PROP_VARIANCE_VALUE_swigconstant(_bacnet)
PROP_VARIANCE_VALUE = _bacnet.PROP_VARIANCE_VALUE
_bacnet.PROP_ACTIVE_COV_SUBSCRIPTIONS_swigconstant(_bacnet)
PROP_ACTIVE_COV_SUBSCRIPTIONS = _bacnet.PROP_ACTIVE_COV_SUBSCRIPTIONS
_bacnet.PROP_BACKUP_FAILURE_TIMEOUT_swigconstant(_bacnet)
PROP_BACKUP_FAILURE_TIMEOUT = _bacnet.PROP_BACKUP_FAILURE_TIMEOUT
_bacnet.PROP_CONFIGURATION_FILES_swigconstant(_bacnet)
PROP_CONFIGURATION_FILES = _bacnet.PROP_CONFIGURATION_FILES
_bacnet.PROP_DATABASE_REVISION_swigconstant(_bacnet)
PROP_DATABASE_REVISION = _bacnet.PROP_DATABASE_REVISION
_bacnet.PROP_DIRECT_READING_swigconstant(_bacnet)
PROP_DIRECT_READING = _bacnet.PROP_DIRECT_READING
_bacnet.PROP_LAST_RESTORE_TIME_swigconstant(_bacnet)
PROP_LAST_RESTORE_TIME = _bacnet.PROP_LAST_RESTORE_TIME
_bacnet.PROP_MAINTENANCE_REQUIRED_swigconstant(_bacnet)
PROP_MAINTENANCE_REQUIRED = _bacnet.PROP_MAINTENANCE_REQUIRED
_bacnet.PROP_MEMBER_OF_swigconstant(_bacnet)
PROP_MEMBER_OF = _bacnet.PROP_MEMBER_OF
_bacnet.PROP_MODE_swigconstant(_bacnet)
PROP_MODE = _bacnet.PROP_MODE
_bacnet.PROP_OPERATION_EXPECTED_swigconstant(_bacnet)
PROP_OPERATION_EXPECTED = _bacnet.PROP_OPERATION_EXPECTED
_bacnet.PROP_SETTING_swigconstant(_bacnet)
PROP_SETTING = _bacnet.PROP_SETTING
_bacnet.PROP_SILENCED_swigconstant(_bacnet)
PROP_SILENCED = _bacnet.PROP_SILENCED
_bacnet.PROP_TRACKING_VALUE_swigconstant(_bacnet)
PROP_TRACKING_VALUE = _bacnet.PROP_TRACKING_VALUE
_bacnet.PROP_ZONE_MEMBERS_swigconstant(_bacnet)
PROP_ZONE_MEMBERS = _bacnet.PROP_ZONE_MEMBERS
_bacnet.PROP_LIFE_SAFETY_ALARM_VALUES_swigconstant(_bacnet)
PROP_LIFE_SAFETY_ALARM_VALUES = _bacnet.PROP_LIFE_SAFETY_ALARM_VALUES
_bacnet.PROP_MAX_SEGMENTS_ACCEPTED_swigconstant(_bacnet)
PROP_MAX_SEGMENTS_ACCEPTED = _bacnet.PROP_MAX_SEGMENTS_ACCEPTED
_bacnet.PROP_PROFILE_NAME_swigconstant(_bacnet)
PROP_PROFILE_NAME = _bacnet.PROP_PROFILE_NAME
_bacnet.PROP_AUTO_SLAVE_DISCOVERY_swigconstant(_bacnet)
PROP_AUTO_SLAVE_DISCOVERY = _bacnet.PROP_AUTO_SLAVE_DISCOVERY
_bacnet.PROP_MANUAL_SLAVE_ADDRESS_BINDING_swigconstant(_bacnet)
PROP_MANUAL_SLAVE_ADDRESS_BINDING = _bacnet.PROP_MANUAL_SLAVE_ADDRESS_BINDING
_bacnet.PROP_SLAVE_ADDRESS_BINDING_swigconstant(_bacnet)
PROP_SLAVE_ADDRESS_BINDING = _bacnet.PROP_SLAVE_ADDRESS_BINDING
_bacnet.PROP_SLAVE_PROXY_ENABLE_swigconstant(_bacnet)
PROP_SLAVE_PROXY_ENABLE = _bacnet.PROP_SLAVE_PROXY_ENABLE
_bacnet.PROP_LAST_NOTIFY_RECORD_swigconstant(_bacnet)
PROP_LAST_NOTIFY_RECORD = _bacnet.PROP_LAST_NOTIFY_RECORD
_bacnet.PROP_SCHEDULE_DEFAULT_swigconstant(_bacnet)
PROP_SCHEDULE_DEFAULT = _bacnet.PROP_SCHEDULE_DEFAULT
_bacnet.PROP_ACCEPTED_MODES_swigconstant(_bacnet)
PROP_ACCEPTED_MODES = _bacnet.PROP_ACCEPTED_MODES
_bacnet.PROP_ADJUST_VALUE_swigconstant(_bacnet)
PROP_ADJUST_VALUE = _bacnet.PROP_ADJUST_VALUE
_bacnet.PROP_COUNT_swigconstant(_bacnet)
PROP_COUNT = _bacnet.PROP_COUNT
_bacnet.PROP_COUNT_BEFORE_CHANGE_swigconstant(_bacnet)
PROP_COUNT_BEFORE_CHANGE = _bacnet.PROP_COUNT_BEFORE_CHANGE
_bacnet.PROP_COUNT_CHANGE_TIME_swigconstant(_bacnet)
PROP_COUNT_CHANGE_TIME = _bacnet.PROP_COUNT_CHANGE_TIME
_bacnet.PROP_COV_PERIOD_swigconstant(_bacnet)
PROP_COV_PERIOD = _bacnet.PROP_COV_PERIOD
_bacnet.PROP_INPUT_REFERENCE_swigconstant(_bacnet)
PROP_INPUT_REFERENCE = _bacnet.PROP_INPUT_REFERENCE
_bacnet.PROP_LIMIT_MONITORING_INTERVAL_swigconstant(_bacnet)
PROP_LIMIT_MONITORING_INTERVAL = _bacnet.PROP_LIMIT_MONITORING_INTERVAL
_bacnet.PROP_LOGGING_OBJECT_swigconstant(_bacnet)
PROP_LOGGING_OBJECT = _bacnet.PROP_LOGGING_OBJECT
_bacnet.PROP_LOGGING_RECORD_swigconstant(_bacnet)
PROP_LOGGING_RECORD = _bacnet.PROP_LOGGING_RECORD
_bacnet.PROP_PRESCALE_swigconstant(_bacnet)
PROP_PRESCALE = _bacnet.PROP_PRESCALE
_bacnet.PROP_PULSE_RATE_swigconstant(_bacnet)
PROP_PULSE_RATE = _bacnet.PROP_PULSE_RATE
_bacnet.PROP_SCALE_swigconstant(_bacnet)
PROP_SCALE = _bacnet.PROP_SCALE
_bacnet.PROP_SCALE_FACTOR_swigconstant(_bacnet)
PROP_SCALE_FACTOR = _bacnet.PROP_SCALE_FACTOR
_bacnet.PROP_UPDATE_TIME_swigconstant(_bacnet)
PROP_UPDATE_TIME = _bacnet.PROP_UPDATE_TIME
_bacnet.PROP_VALUE_BEFORE_CHANGE_swigconstant(_bacnet)
PROP_VALUE_BEFORE_CHANGE = _bacnet.PROP_VALUE_BEFORE_CHANGE
_bacnet.PROP_VALUE_SET_swigconstant(_bacnet)
PROP_VALUE_SET = _bacnet.PROP_VALUE_SET
_bacnet.PROP_VALUE_CHANGE_TIME_swigconstant(_bacnet)
PROP_VALUE_CHANGE_TIME = _bacnet.PROP_VALUE_CHANGE_TIME
_bacnet.PROP_ALIGN_INTERVALS_swigconstant(_bacnet)
PROP_ALIGN_INTERVALS = _bacnet.PROP_ALIGN_INTERVALS
_bacnet.PROP_INTERVAL_OFFSET_swigconstant(_bacnet)
PROP_INTERVAL_OFFSET = _bacnet.PROP_INTERVAL_OFFSET
_bacnet.PROP_LAST_RESTART_REASON_swigconstant(_bacnet)
PROP_LAST_RESTART_REASON = _bacnet.PROP_LAST_RESTART_REASON
_bacnet.PROP_LOGGING_TYPE_swigconstant(_bacnet)
PROP_LOGGING_TYPE = _bacnet.PROP_LOGGING_TYPE
_bacnet.PROP_RESTART_NOTIFICATION_RECIPIENTS_swigconstant(_bacnet)
PROP_RESTART_NOTIFICATION_RECIPIENTS = _bacnet.PROP_RESTART_NOTIFICATION_RECIPIENTS
_bacnet.PROP_TIME_OF_DEVICE_RESTART_swigconstant(_bacnet)
PROP_TIME_OF_DEVICE_RESTART = _bacnet.PROP_TIME_OF_DEVICE_RESTART
_bacnet.PROP_TIME_SYNCHRONIZATION_INTERVAL_swigconstant(_bacnet)
PROP_TIME_SYNCHRONIZATION_INTERVAL = _bacnet.PROP_TIME_SYNCHRONIZATION_INTERVAL
_bacnet.PROP_TRIGGER_swigconstant(_bacnet)
PROP_TRIGGER = _bacnet.PROP_TRIGGER
_bacnet.PROP_UTC_TIME_SYNCHRONIZATION_RECIPIENTS_swigconstant(_bacnet)
PROP_UTC_TIME_SYNCHRONIZATION_RECIPIENTS = _bacnet.PROP_UTC_TIME_SYNCHRONIZATION_RECIPIENTS
_bacnet.PROP_NODE_SUBTYPE_swigconstant(_bacnet)
PROP_NODE_SUBTYPE = _bacnet.PROP_NODE_SUBTYPE
_bacnet.PROP_NODE_TYPE_swigconstant(_bacnet)
PROP_NODE_TYPE = _bacnet.PROP_NODE_TYPE
_bacnet.PROP_STRUCTURED_OBJECT_LIST_swigconstant(_bacnet)
PROP_STRUCTURED_OBJECT_LIST = _bacnet.PROP_STRUCTURED_OBJECT_LIST
_bacnet.PROP_SUBORDINATE_ANNOTATIONS_swigconstant(_bacnet)
PROP_SUBORDINATE_ANNOTATIONS = _bacnet.PROP_SUBORDINATE_ANNOTATIONS
_bacnet.PROP_SUBORDINATE_LIST_swigconstant(_bacnet)
PROP_SUBORDINATE_LIST = _bacnet.PROP_SUBORDINATE_LIST
_bacnet.PROP_ACTUAL_SHED_LEVEL_swigconstant(_bacnet)
PROP_ACTUAL_SHED_LEVEL = _bacnet.PROP_ACTUAL_SHED_LEVEL
_bacnet.PROP_DUTY_WINDOW_swigconstant(_bacnet)
PROP_DUTY_WINDOW = _bacnet.PROP_DUTY_WINDOW
_bacnet.PROP_EXPECTED_SHED_LEVEL_swigconstant(_bacnet)
PROP_EXPECTED_SHED_LEVEL = _bacnet.PROP_EXPECTED_SHED_LEVEL
_bacnet.PROP_FULL_DUTY_BASELINE_swigconstant(_bacnet)
PROP_FULL_DUTY_BASELINE = _bacnet.PROP_FULL_DUTY_BASELINE
_bacnet.PROP_REQUESTED_SHED_LEVEL_swigconstant(_bacnet)
PROP_REQUESTED_SHED_LEVEL = _bacnet.PROP_REQUESTED_SHED_LEVEL
_bacnet.PROP_SHED_DURATION_swigconstant(_bacnet)
PROP_SHED_DURATION = _bacnet.PROP_SHED_DURATION
_bacnet.PROP_SHED_LEVEL_DESCRIPTIONS_swigconstant(_bacnet)
PROP_SHED_LEVEL_DESCRIPTIONS = _bacnet.PROP_SHED_LEVEL_DESCRIPTIONS
_bacnet.PROP_SHED_LEVELS_swigconstant(_bacnet)
PROP_SHED_LEVELS = _bacnet.PROP_SHED_LEVELS
_bacnet.PROP_STATE_DESCRIPTION_swigconstant(_bacnet)
PROP_STATE_DESCRIPTION = _bacnet.PROP_STATE_DESCRIPTION
_bacnet.PROP_DOOR_ALARM_STATE_swigconstant(_bacnet)
PROP_DOOR_ALARM_STATE = _bacnet.PROP_DOOR_ALARM_STATE
_bacnet.PROP_DOOR_EXTENDED_PULSE_TIME_swigconstant(_bacnet)
PROP_DOOR_EXTENDED_PULSE_TIME = _bacnet.PROP_DOOR_EXTENDED_PULSE_TIME
_bacnet.PROP_DOOR_MEMBERS_swigconstant(_bacnet)
PROP_DOOR_MEMBERS = _bacnet.PROP_DOOR_MEMBERS
_bacnet.PROP_DOOR_OPEN_TOO_LONG_TIME_swigconstant(_bacnet)
PROP_DOOR_OPEN_TOO_LONG_TIME = _bacnet.PROP_DOOR_OPEN_TOO_LONG_TIME
_bacnet.PROP_DOOR_PULSE_TIME_swigconstant(_bacnet)
PROP_DOOR_PULSE_TIME = _bacnet.PROP_DOOR_PULSE_TIME
_bacnet.PROP_DOOR_STATUS_swigconstant(_bacnet)
PROP_DOOR_STATUS = _bacnet.PROP_DOOR_STATUS
_bacnet.PROP_DOOR_UNLOCK_DELAY_TIME_swigconstant(_bacnet)
PROP_DOOR_UNLOCK_DELAY_TIME = _bacnet.PROP_DOOR_UNLOCK_DELAY_TIME
_bacnet.PROP_LOCK_STATUS_swigconstant(_bacnet)
PROP_LOCK_STATUS = _bacnet.PROP_LOCK_STATUS
_bacnet.PROP_MASKED_ALARM_VALUES_swigconstant(_bacnet)
PROP_MASKED_ALARM_VALUES = _bacnet.PROP_MASKED_ALARM_VALUES
_bacnet.PROP_SECURED_STATUS_swigconstant(_bacnet)
PROP_SECURED_STATUS = _bacnet.PROP_SECURED_STATUS
_bacnet.PROP_ABSENTEE_LIMIT_swigconstant(_bacnet)
PROP_ABSENTEE_LIMIT = _bacnet.PROP_ABSENTEE_LIMIT
_bacnet.PROP_ACCESS_ALARM_EVENTS_swigconstant(_bacnet)
PROP_ACCESS_ALARM_EVENTS = _bacnet.PROP_ACCESS_ALARM_EVENTS
_bacnet.PROP_ACCESS_DOORS_swigconstant(_bacnet)
PROP_ACCESS_DOORS = _bacnet.PROP_ACCESS_DOORS
_bacnet.PROP_ACCESS_EVENT_swigconstant(_bacnet)
PROP_ACCESS_EVENT = _bacnet.PROP_ACCESS_EVENT
_bacnet.PROP_ACCESS_EVENT_AUTHENTICATION_FACTOR_swigconstant(_bacnet)
PROP_ACCESS_EVENT_AUTHENTICATION_FACTOR = _bacnet.PROP_ACCESS_EVENT_AUTHENTICATION_FACTOR
_bacnet.PROP_ACCESS_EVENT_CREDENTIAL_swigconstant(_bacnet)
PROP_ACCESS_EVENT_CREDENTIAL = _bacnet.PROP_ACCESS_EVENT_CREDENTIAL
_bacnet.PROP_ACCESS_EVENT_TIME_swigconstant(_bacnet)
PROP_ACCESS_EVENT_TIME = _bacnet.PROP_ACCESS_EVENT_TIME
_bacnet.PROP_ACCESS_TRANSACTION_EVENTS_swigconstant(_bacnet)
PROP_ACCESS_TRANSACTION_EVENTS = _bacnet.PROP_ACCESS_TRANSACTION_EVENTS
_bacnet.PROP_ACCOMPANIMENT_swigconstant(_bacnet)
PROP_ACCOMPANIMENT = _bacnet.PROP_ACCOMPANIMENT
_bacnet.PROP_ACCOMPANIMENT_TIME_swigconstant(_bacnet)
PROP_ACCOMPANIMENT_TIME = _bacnet.PROP_ACCOMPANIMENT_TIME
_bacnet.PROP_ACTIVATION_TIME_swigconstant(_bacnet)
PROP_ACTIVATION_TIME = _bacnet.PROP_ACTIVATION_TIME
_bacnet.PROP_ACTIVE_AUTHENTICATION_POLICY_swigconstant(_bacnet)
PROP_ACTIVE_AUTHENTICATION_POLICY = _bacnet.PROP_ACTIVE_AUTHENTICATION_POLICY
_bacnet.PROP_ASSIGNED_ACCESS_RIGHTS_swigconstant(_bacnet)
PROP_ASSIGNED_ACCESS_RIGHTS = _bacnet.PROP_ASSIGNED_ACCESS_RIGHTS
_bacnet.PROP_AUTHENTICATION_FACTORS_swigconstant(_bacnet)
PROP_AUTHENTICATION_FACTORS = _bacnet.PROP_AUTHENTICATION_FACTORS
_bacnet.PROP_AUTHENTICATION_POLICY_LIST_swigconstant(_bacnet)
PROP_AUTHENTICATION_POLICY_LIST = _bacnet.PROP_AUTHENTICATION_POLICY_LIST
_bacnet.PROP_AUTHENTICATION_POLICY_NAMES_swigconstant(_bacnet)
PROP_AUTHENTICATION_POLICY_NAMES = _bacnet.PROP_AUTHENTICATION_POLICY_NAMES
_bacnet.PROP_AUTHORIZATION_STATUS_swigconstant(_bacnet)
PROP_AUTHORIZATION_STATUS = _bacnet.PROP_AUTHORIZATION_STATUS
_bacnet.PROP_AUTHORIZATION_MODE_swigconstant(_bacnet)
PROP_AUTHORIZATION_MODE = _bacnet.PROP_AUTHORIZATION_MODE
_bacnet.PROP_BELONGS_TO_swigconstant(_bacnet)
PROP_BELONGS_TO = _bacnet.PROP_BELONGS_TO
_bacnet.PROP_CREDENTIAL_DISABLE_swigconstant(_bacnet)
PROP_CREDENTIAL_DISABLE = _bacnet.PROP_CREDENTIAL_DISABLE
_bacnet.PROP_CREDENTIAL_STATUS_swigconstant(_bacnet)
PROP_CREDENTIAL_STATUS = _bacnet.PROP_CREDENTIAL_STATUS
_bacnet.PROP_CREDENTIALS_swigconstant(_bacnet)
PROP_CREDENTIALS = _bacnet.PROP_CREDENTIALS
_bacnet.PROP_CREDENTIALS_IN_ZONE_swigconstant(_bacnet)
PROP_CREDENTIALS_IN_ZONE = _bacnet.PROP_CREDENTIALS_IN_ZONE
_bacnet.PROP_DAYS_REMAINING_swigconstant(_bacnet)
PROP_DAYS_REMAINING = _bacnet.PROP_DAYS_REMAINING
_bacnet.PROP_ENTRY_POINTS_swigconstant(_bacnet)
PROP_ENTRY_POINTS = _bacnet.PROP_ENTRY_POINTS
_bacnet.PROP_EXIT_POINTS_swigconstant(_bacnet)
PROP_EXIT_POINTS = _bacnet.PROP_EXIT_POINTS
_bacnet.PROP_EXPIRY_TIME_swigconstant(_bacnet)
PROP_EXPIRY_TIME = _bacnet.PROP_EXPIRY_TIME
_bacnet.PROP_EXTENDED_TIME_ENABLE_swigconstant(_bacnet)
PROP_EXTENDED_TIME_ENABLE = _bacnet.PROP_EXTENDED_TIME_ENABLE
_bacnet.PROP_FAILED_ATTEMPT_EVENTS_swigconstant(_bacnet)
PROP_FAILED_ATTEMPT_EVENTS = _bacnet.PROP_FAILED_ATTEMPT_EVENTS
_bacnet.PROP_FAILED_ATTEMPTS_swigconstant(_bacnet)
PROP_FAILED_ATTEMPTS = _bacnet.PROP_FAILED_ATTEMPTS
_bacnet.PROP_FAILED_ATTEMPTS_TIME_swigconstant(_bacnet)
PROP_FAILED_ATTEMPTS_TIME = _bacnet.PROP_FAILED_ATTEMPTS_TIME
_bacnet.PROP_LAST_ACCESS_EVENT_swigconstant(_bacnet)
PROP_LAST_ACCESS_EVENT = _bacnet.PROP_LAST_ACCESS_EVENT
_bacnet.PROP_LAST_ACCESS_POINT_swigconstant(_bacnet)
PROP_LAST_ACCESS_POINT = _bacnet.PROP_LAST_ACCESS_POINT
_bacnet.PROP_LAST_CREDENTIAL_ADDED_swigconstant(_bacnet)
PROP_LAST_CREDENTIAL_ADDED = _bacnet.PROP_LAST_CREDENTIAL_ADDED
_bacnet.PROP_LAST_CREDENTIAL_ADDED_TIME_swigconstant(_bacnet)
PROP_LAST_CREDENTIAL_ADDED_TIME = _bacnet.PROP_LAST_CREDENTIAL_ADDED_TIME
_bacnet.PROP_LAST_CREDENTIAL_REMOVED_swigconstant(_bacnet)
PROP_LAST_CREDENTIAL_REMOVED = _bacnet.PROP_LAST_CREDENTIAL_REMOVED
_bacnet.PROP_LAST_CREDENTIAL_REMOVED_TIME_swigconstant(_bacnet)
PROP_LAST_CREDENTIAL_REMOVED_TIME = _bacnet.PROP_LAST_CREDENTIAL_REMOVED_TIME
_bacnet.PROP_LAST_USE_TIME_swigconstant(_bacnet)
PROP_LAST_USE_TIME = _bacnet.PROP_LAST_USE_TIME
_bacnet.PROP_LOCKOUT_swigconstant(_bacnet)
PROP_LOCKOUT = _bacnet.PROP_LOCKOUT
_bacnet.PROP_LOCKOUT_RELINQUISH_TIME_swigconstant(_bacnet)
PROP_LOCKOUT_RELINQUISH_TIME = _bacnet.PROP_LOCKOUT_RELINQUISH_TIME
_bacnet.PROP_MASTER_EXEMPTION_swigconstant(_bacnet)
PROP_MASTER_EXEMPTION = _bacnet.PROP_MASTER_EXEMPTION
_bacnet.PROP_MAX_FAILED_ATTEMPTS_swigconstant(_bacnet)
PROP_MAX_FAILED_ATTEMPTS = _bacnet.PROP_MAX_FAILED_ATTEMPTS
_bacnet.PROP_MEMBERS_swigconstant(_bacnet)
PROP_MEMBERS = _bacnet.PROP_MEMBERS
_bacnet.PROP_MUSTER_POINT_swigconstant(_bacnet)
PROP_MUSTER_POINT = _bacnet.PROP_MUSTER_POINT
_bacnet.PROP_NEGATIVE_ACCESS_RULES_swigconstant(_bacnet)
PROP_NEGATIVE_ACCESS_RULES = _bacnet.PROP_NEGATIVE_ACCESS_RULES
_bacnet.PROP_NUMBER_OF_AUTHENTICATION_POLICIES_swigconstant(_bacnet)
PROP_NUMBER_OF_AUTHENTICATION_POLICIES = _bacnet.PROP_NUMBER_OF_AUTHENTICATION_POLICIES
_bacnet.PROP_OCCUPANCY_COUNT_swigconstant(_bacnet)
PROP_OCCUPANCY_COUNT = _bacnet.PROP_OCCUPANCY_COUNT
_bacnet.PROP_OCCUPANCY_COUNT_ADJUST_swigconstant(_bacnet)
PROP_OCCUPANCY_COUNT_ADJUST = _bacnet.PROP_OCCUPANCY_COUNT_ADJUST
_bacnet.PROP_OCCUPANCY_COUNT_ENABLE_swigconstant(_bacnet)
PROP_OCCUPANCY_COUNT_ENABLE = _bacnet.PROP_OCCUPANCY_COUNT_ENABLE
_bacnet.PROP_OCCUPANCY_EXEMPTION_swigconstant(_bacnet)
PROP_OCCUPANCY_EXEMPTION = _bacnet.PROP_OCCUPANCY_EXEMPTION
_bacnet.PROP_OCCUPANCY_LOWER_LIMIT_swigconstant(_bacnet)
PROP_OCCUPANCY_LOWER_LIMIT = _bacnet.PROP_OCCUPANCY_LOWER_LIMIT
_bacnet.PROP_OCCUPANCY_LOWER_LIMIT_ENFORCED_swigconstant(_bacnet)
PROP_OCCUPANCY_LOWER_LIMIT_ENFORCED = _bacnet.PROP_OCCUPANCY_LOWER_LIMIT_ENFORCED
_bacnet.PROP_OCCUPANCY_STATE_swigconstant(_bacnet)
PROP_OCCUPANCY_STATE = _bacnet.PROP_OCCUPANCY_STATE
_bacnet.PROP_OCCUPANCY_UPPER_LIMIT_swigconstant(_bacnet)
PROP_OCCUPANCY_UPPER_LIMIT = _bacnet.PROP_OCCUPANCY_UPPER_LIMIT
_bacnet.PROP_OCCUPANCY_UPPER_LIMIT_ENFORCED_swigconstant(_bacnet)
PROP_OCCUPANCY_UPPER_LIMIT_ENFORCED = _bacnet.PROP_OCCUPANCY_UPPER_LIMIT_ENFORCED
_bacnet.PROP_PASSBACK_EXEMPTION_swigconstant(_bacnet)
PROP_PASSBACK_EXEMPTION = _bacnet.PROP_PASSBACK_EXEMPTION
_bacnet.PROP_PASSBACK_MODE_swigconstant(_bacnet)
PROP_PASSBACK_MODE = _bacnet.PROP_PASSBACK_MODE
_bacnet.PROP_PASSBACK_TIMEOUT_swigconstant(_bacnet)
PROP_PASSBACK_TIMEOUT = _bacnet.PROP_PASSBACK_TIMEOUT
_bacnet.PROP_POSITIVE_ACCESS_RULES_swigconstant(_bacnet)
PROP_POSITIVE_ACCESS_RULES = _bacnet.PROP_POSITIVE_ACCESS_RULES
_bacnet.PROP_REASON_FOR_DISABLE_swigconstant(_bacnet)
PROP_REASON_FOR_DISABLE = _bacnet.PROP_REASON_FOR_DISABLE
_bacnet.PROP_SUPPORTED_FORMATS_swigconstant(_bacnet)
PROP_SUPPORTED_FORMATS = _bacnet.PROP_SUPPORTED_FORMATS
_bacnet.PROP_SUPPORTED_FORMAT_CLASSES_swigconstant(_bacnet)
PROP_SUPPORTED_FORMAT_CLASSES = _bacnet.PROP_SUPPORTED_FORMAT_CLASSES
_bacnet.PROP_THREAT_AUTHORITY_swigconstant(_bacnet)
PROP_THREAT_AUTHORITY = _bacnet.PROP_THREAT_AUTHORITY
_bacnet.PROP_THREAT_LEVEL_swigconstant(_bacnet)
PROP_THREAT_LEVEL = _bacnet.PROP_THREAT_LEVEL
_bacnet.PROP_TRACE_FLAG_swigconstant(_bacnet)
PROP_TRACE_FLAG = _bacnet.PROP_TRACE_FLAG
_bacnet.PROP_TRANSACTION_NOTIFICATION_CLASS_swigconstant(_bacnet)
PROP_TRANSACTION_NOTIFICATION_CLASS = _bacnet.PROP_TRANSACTION_NOTIFICATION_CLASS
_bacnet.PROP_USER_EXTERNAL_IDENTIFIER_swigconstant(_bacnet)
PROP_USER_EXTERNAL_IDENTIFIER = _bacnet.PROP_USER_EXTERNAL_IDENTIFIER
_bacnet.PROP_USER_INFORMATION_REFERENCE_swigconstant(_bacnet)
PROP_USER_INFORMATION_REFERENCE = _bacnet.PROP_USER_INFORMATION_REFERENCE
_bacnet.PROP_USER_NAME_swigconstant(_bacnet)
PROP_USER_NAME = _bacnet.PROP_USER_NAME
_bacnet.PROP_USER_TYPE_swigconstant(_bacnet)
PROP_USER_TYPE = _bacnet.PROP_USER_TYPE
_bacnet.PROP_USES_REMAINING_swigconstant(_bacnet)
PROP_USES_REMAINING = _bacnet.PROP_USES_REMAINING
_bacnet.PROP_ZONE_FROM_swigconstant(_bacnet)
PROP_ZONE_FROM = _bacnet.PROP_ZONE_FROM
_bacnet.PROP_ZONE_TO_swigconstant(_bacnet)
PROP_ZONE_TO = _bacnet.PROP_ZONE_TO
_bacnet.PROP_ACCESS_EVENT_TAG_swigconstant(_bacnet)
PROP_ACCESS_EVENT_TAG = _bacnet.PROP_ACCESS_EVENT_TAG
_bacnet.PROP_GLOBAL_IDENTIFIER_swigconstant(_bacnet)
PROP_GLOBAL_IDENTIFIER = _bacnet.PROP_GLOBAL_IDENTIFIER
_bacnet.PROP_VERIFICATION_TIME_swigconstant(_bacnet)
PROP_VERIFICATION_TIME = _bacnet.PROP_VERIFICATION_TIME
_bacnet.PROP_BASE_DEVICE_SECURITY_POLICY_swigconstant(_bacnet)
PROP_BASE_DEVICE_SECURITY_POLICY = _bacnet.PROP_BASE_DEVICE_SECURITY_POLICY
_bacnet.PROP_DISTRIBUTION_KEY_REVISION_swigconstant(_bacnet)
PROP_DISTRIBUTION_KEY_REVISION = _bacnet.PROP_DISTRIBUTION_KEY_REVISION
_bacnet.PROP_DO_NOT_HIDE_swigconstant(_bacnet)
PROP_DO_NOT_HIDE = _bacnet.PROP_DO_NOT_HIDE
_bacnet.PROP_KEY_SETS_swigconstant(_bacnet)
PROP_KEY_SETS = _bacnet.PROP_KEY_SETS
_bacnet.PROP_LAST_KEY_SERVER_swigconstant(_bacnet)
PROP_LAST_KEY_SERVER = _bacnet.PROP_LAST_KEY_SERVER
_bacnet.PROP_NETWORK_ACCESS_SECURITY_POLICIES_swigconstant(_bacnet)
PROP_NETWORK_ACCESS_SECURITY_POLICIES = _bacnet.PROP_NETWORK_ACCESS_SECURITY_POLICIES
_bacnet.PROP_PACKET_REORDER_TIME_swigconstant(_bacnet)
PROP_PACKET_REORDER_TIME = _bacnet.PROP_PACKET_REORDER_TIME
_bacnet.PROP_SECURITY_PDU_TIMEOUT_swigconstant(_bacnet)
PROP_SECURITY_PDU_TIMEOUT = _bacnet.PROP_SECURITY_PDU_TIMEOUT
_bacnet.PROP_SECURITY_TIME_WINDOW_swigconstant(_bacnet)
PROP_SECURITY_TIME_WINDOW = _bacnet.PROP_SECURITY_TIME_WINDOW
_bacnet.PROP_SUPPORTED_SECURITY_ALGORITHM_swigconstant(_bacnet)
PROP_SUPPORTED_SECURITY_ALGORITHM = _bacnet.PROP_SUPPORTED_SECURITY_ALGORITHM
_bacnet.PROP_UPDATE_KEY_SET_TIMEOUT_swigconstant(_bacnet)
PROP_UPDATE_KEY_SET_TIMEOUT = _bacnet.PROP_UPDATE_KEY_SET_TIMEOUT
_bacnet.PROP_BACKUP_AND_RESTORE_STATE_swigconstant(_bacnet)
PROP_BACKUP_AND_RESTORE_STATE = _bacnet.PROP_BACKUP_AND_RESTORE_STATE
_bacnet.PROP_BACKUP_PREPARATION_TIME_swigconstant(_bacnet)
PROP_BACKUP_PREPARATION_TIME = _bacnet.PROP_BACKUP_PREPARATION_TIME
_bacnet.PROP_RESTORE_COMPLETION_TIME_swigconstant(_bacnet)
PROP_RESTORE_COMPLETION_TIME = _bacnet.PROP_RESTORE_COMPLETION_TIME
_bacnet.PROP_RESTORE_PREPARATION_TIME_swigconstant(_bacnet)
PROP_RESTORE_PREPARATION_TIME = _bacnet.PROP_RESTORE_PREPARATION_TIME
_bacnet.PROP_BIT_MASK_swigconstant(_bacnet)
PROP_BIT_MASK = _bacnet.PROP_BIT_MASK
_bacnet.PROP_BIT_TEXT_swigconstant(_bacnet)
PROP_BIT_TEXT = _bacnet.PROP_BIT_TEXT
_bacnet.PROP_IS_UTC_swigconstant(_bacnet)
PROP_IS_UTC = _bacnet.PROP_IS_UTC
_bacnet.PROP_GROUP_MEMBERS_swigconstant(_bacnet)
PROP_GROUP_MEMBERS = _bacnet.PROP_GROUP_MEMBERS
_bacnet.PROP_GROUP_MEMBER_NAMES_swigconstant(_bacnet)
PROP_GROUP_MEMBER_NAMES = _bacnet.PROP_GROUP_MEMBER_NAMES
_bacnet.PROP_MEMBER_STATUS_FLAGS_swigconstant(_bacnet)
PROP_MEMBER_STATUS_FLAGS = _bacnet.PROP_MEMBER_STATUS_FLAGS
_bacnet.PROP_REQUESTED_UPDATE_INTERVAL_swigconstant(_bacnet)
PROP_REQUESTED_UPDATE_INTERVAL = _bacnet.PROP_REQUESTED_UPDATE_INTERVAL
_bacnet.PROP_COVU_PERIOD_swigconstant(_bacnet)
PROP_COVU_PERIOD = _bacnet.PROP_COVU_PERIOD
_bacnet.PROP_COVU_RECIPIENTS_swigconstant(_bacnet)
PROP_COVU_RECIPIENTS = _bacnet.PROP_COVU_RECIPIENTS
_bacnet.PROP_EVENT_MESSAGE_TEXTS_swigconstant(_bacnet)
PROP_EVENT_MESSAGE_TEXTS = _bacnet.PROP_EVENT_MESSAGE_TEXTS
_bacnet.PROP_EVENT_MESSAGE_TEXTS_CONFIG_swigconstant(_bacnet)
PROP_EVENT_MESSAGE_TEXTS_CONFIG = _bacnet.PROP_EVENT_MESSAGE_TEXTS_CONFIG
_bacnet.PROP_EVENT_DETECTION_ENABLE_swigconstant(_bacnet)
PROP_EVENT_DETECTION_ENABLE = _bacnet.PROP_EVENT_DETECTION_ENABLE
_bacnet.PROP_EVENT_ALGORITHM_INHIBIT_swigconstant(_bacnet)
PROP_EVENT_ALGORITHM_INHIBIT = _bacnet.PROP_EVENT_ALGORITHM_INHIBIT
_bacnet.PROP_EVENT_ALGORITHM_INHIBIT_REF_swigconstant(_bacnet)
PROP_EVENT_ALGORITHM_INHIBIT_REF = _bacnet.PROP_EVENT_ALGORITHM_INHIBIT_REF
_bacnet.PROP_TIME_DELAY_NORMAL_swigconstant(_bacnet)
PROP_TIME_DELAY_NORMAL = _bacnet.PROP_TIME_DELAY_NORMAL
_bacnet.PROP_RELIABILITY_EVALUATION_INHIBIT_swigconstant(_bacnet)
PROP_RELIABILITY_EVALUATION_INHIBIT = _bacnet.PROP_RELIABILITY_EVALUATION_INHIBIT
_bacnet.PROP_FAULT_PARAMETERS_swigconstant(_bacnet)
PROP_FAULT_PARAMETERS = _bacnet.PROP_FAULT_PARAMETERS
_bacnet.PROP_FAULT_TYPE_swigconstant(_bacnet)
PROP_FAULT_TYPE = _bacnet.PROP_FAULT_TYPE
_bacnet.PROP_LOCAL_FORWARDING_ONLY_swigconstant(_bacnet)
PROP_LOCAL_FORWARDING_ONLY = _bacnet.PROP_LOCAL_FORWARDING_ONLY
_bacnet.PROP_PROCESS_IDENTIFIER_FILTER_swigconstant(_bacnet)
PROP_PROCESS_IDENTIFIER_FILTER = _bacnet.PROP_PROCESS_IDENTIFIER_FILTER
_bacnet.PROP_SUBSCRIBED_RECIPIENTS_swigconstant(_bacnet)
PROP_SUBSCRIBED_RECIPIENTS = _bacnet.PROP_SUBSCRIBED_RECIPIENTS
_bacnet.PROP_PORT_FILTER_swigconstant(_bacnet)
PROP_PORT_FILTER = _bacnet.PROP_PORT_FILTER
_bacnet.PROP_AUTHORIZATION_EXEMPTIONS_swigconstant(_bacnet)
PROP_AUTHORIZATION_EXEMPTIONS = _bacnet.PROP_AUTHORIZATION_EXEMPTIONS
_bacnet.PROP_ALLOW_GROUP_DELAY_INHIBIT_swigconstant(_bacnet)
PROP_ALLOW_GROUP_DELAY_INHIBIT = _bacnet.PROP_ALLOW_GROUP_DELAY_INHIBIT
_bacnet.PROP_CHANNEL_NUMBER_swigconstant(_bacnet)
PROP_CHANNEL_NUMBER = _bacnet.PROP_CHANNEL_NUMBER
_bacnet.PROP_CONTROL_GROUPS_swigconstant(_bacnet)
PROP_CONTROL_GROUPS = _bacnet.PROP_CONTROL_GROUPS
_bacnet.PROP_EXECUTION_DELAY_swigconstant(_bacnet)
PROP_EXECUTION_DELAY = _bacnet.PROP_EXECUTION_DELAY
_bacnet.PROP_LAST_PRIORITY_swigconstant(_bacnet)
PROP_LAST_PRIORITY = _bacnet.PROP_LAST_PRIORITY
_bacnet.PROP_WRITE_STATUS_swigconstant(_bacnet)
PROP_WRITE_STATUS = _bacnet.PROP_WRITE_STATUS
_bacnet.PROP_PROPERTY_LIST_swigconstant(_bacnet)
PROP_PROPERTY_LIST = _bacnet.PROP_PROPERTY_LIST
_bacnet.PROP_SERIAL_NUMBER_swigconstant(_bacnet)
PROP_SERIAL_NUMBER = _bacnet.PROP_SERIAL_NUMBER
_bacnet.PROP_BLINK_WARN_ENABLE_swigconstant(_bacnet)
PROP_BLINK_WARN_ENABLE = _bacnet.PROP_BLINK_WARN_ENABLE
_bacnet.PROP_DEFAULT_FADE_TIME_swigconstant(_bacnet)
PROP_DEFAULT_FADE_TIME = _bacnet.PROP_DEFAULT_FADE_TIME
_bacnet.PROP_DEFAULT_RAMP_RATE_swigconstant(_bacnet)
PROP_DEFAULT_RAMP_RATE = _bacnet.PROP_DEFAULT_RAMP_RATE
_bacnet.PROP_DEFAULT_STEP_INCREMENT_swigconstant(_bacnet)
PROP_DEFAULT_STEP_INCREMENT = _bacnet.PROP_DEFAULT_STEP_INCREMENT
_bacnet.PROP_EGRESS_TIMER_swigconstant(_bacnet)
PROP_EGRESS_TIMER = _bacnet.PROP_EGRESS_TIMER
_bacnet.PROP_IN_PROGRESS_swigconstant(_bacnet)
PROP_IN_PROGRESS = _bacnet.PROP_IN_PROGRESS
_bacnet.PROP_INSTANTANEOUS_POWER_swigconstant(_bacnet)
PROP_INSTANTANEOUS_POWER = _bacnet.PROP_INSTANTANEOUS_POWER
_bacnet.PROP_LIGHTING_COMMAND_swigconstant(_bacnet)
PROP_LIGHTING_COMMAND = _bacnet.PROP_LIGHTING_COMMAND
_bacnet.PROP_LIGHTING_COMMAND_DEFAULT_PRIORITY_swigconstant(_bacnet)
PROP_LIGHTING_COMMAND_DEFAULT_PRIORITY = _bacnet.PROP_LIGHTING_COMMAND_DEFAULT_PRIORITY
_bacnet.PROP_MAX_ACTUAL_VALUE_swigconstant(_bacnet)
PROP_MAX_ACTUAL_VALUE = _bacnet.PROP_MAX_ACTUAL_VALUE
_bacnet.PROP_MIN_ACTUAL_VALUE_swigconstant(_bacnet)
PROP_MIN_ACTUAL_VALUE = _bacnet.PROP_MIN_ACTUAL_VALUE
_bacnet.PROP_POWER_swigconstant(_bacnet)
PROP_POWER = _bacnet.PROP_POWER
_bacnet.PROP_TRANSITION_swigconstant(_bacnet)
PROP_TRANSITION = _bacnet.PROP_TRANSITION
_bacnet.PROP_EGRESS_ACTIVE_swigconstant(_bacnet)
PROP_EGRESS_ACTIVE = _bacnet.PROP_EGRESS_ACTIVE
_bacnet.MAX_BACNET_PROPERTY_ID_swigconstant(_bacnet)
MAX_BACNET_PROPERTY_ID = _bacnet.MAX_BACNET_PROPERTY_ID
_bacnet.EVENT_LOW_LIMIT_ENABLE_swigconstant(_bacnet)
EVENT_LOW_LIMIT_ENABLE = _bacnet.EVENT_LOW_LIMIT_ENABLE
_bacnet.EVENT_HIGH_LIMIT_ENABLE_swigconstant(_bacnet)
EVENT_HIGH_LIMIT_ENABLE = _bacnet.EVENT_HIGH_LIMIT_ENABLE
_bacnet.ACTION_DIRECT_swigconstant(_bacnet)
ACTION_DIRECT = _bacnet.ACTION_DIRECT
_bacnet.ACTION_REVERSE_swigconstant(_bacnet)
ACTION_REVERSE = _bacnet.ACTION_REVERSE
_bacnet.MIN_BINARY_PV_swigconstant(_bacnet)
MIN_BINARY_PV = _bacnet.MIN_BINARY_PV
_bacnet.BINARY_INACTIVE_swigconstant(_bacnet)
BINARY_INACTIVE = _bacnet.BINARY_INACTIVE
_bacnet.BINARY_ACTIVE_swigconstant(_bacnet)
BINARY_ACTIVE = _bacnet.BINARY_ACTIVE
_bacnet.MAX_BINARY_PV_swigconstant(_bacnet)
MAX_BINARY_PV = _bacnet.MAX_BINARY_PV
_bacnet.BINARY_NULL_swigconstant(_bacnet)
BINARY_NULL = _bacnet.BINARY_NULL
_bacnet.ACTION_BINARY_PV_swigconstant(_bacnet)
ACTION_BINARY_PV = _bacnet.ACTION_BINARY_PV
_bacnet.ACTION_UNSIGNED_swigconstant(_bacnet)
ACTION_UNSIGNED = _bacnet.ACTION_UNSIGNED
_bacnet.ACTION_FLOAT_swigconstant(_bacnet)
ACTION_FLOAT = _bacnet.ACTION_FLOAT
_bacnet.EVENT_STATE_NORMAL_swigconstant(_bacnet)
EVENT_STATE_NORMAL = _bacnet.EVENT_STATE_NORMAL
_bacnet.EVENT_STATE_FAULT_swigconstant(_bacnet)
EVENT_STATE_FAULT = _bacnet.EVENT_STATE_FAULT
_bacnet.EVENT_STATE_OFFNORMAL_swigconstant(_bacnet)
EVENT_STATE_OFFNORMAL = _bacnet.EVENT_STATE_OFFNORMAL
_bacnet.EVENT_STATE_HIGH_LIMIT_swigconstant(_bacnet)
EVENT_STATE_HIGH_LIMIT = _bacnet.EVENT_STATE_HIGH_LIMIT
_bacnet.EVENT_STATE_LOW_LIMIT_swigconstant(_bacnet)
EVENT_STATE_LOW_LIMIT = _bacnet.EVENT_STATE_LOW_LIMIT
_bacnet.EVENT_ENABLE_TO_OFFNORMAL_swigconstant(_bacnet)
EVENT_ENABLE_TO_OFFNORMAL = _bacnet.EVENT_ENABLE_TO_OFFNORMAL
_bacnet.EVENT_ENABLE_TO_FAULT_swigconstant(_bacnet)
EVENT_ENABLE_TO_FAULT = _bacnet.EVENT_ENABLE_TO_FAULT
_bacnet.EVENT_ENABLE_TO_NORMAL_swigconstant(_bacnet)
EVENT_ENABLE_TO_NORMAL = _bacnet.EVENT_ENABLE_TO_NORMAL
_bacnet.STATUS_OPERATIONAL_swigconstant(_bacnet)
STATUS_OPERATIONAL = _bacnet.STATUS_OPERATIONAL
_bacnet.STATUS_OPERATIONAL_READ_ONLY_swigconstant(_bacnet)
STATUS_OPERATIONAL_READ_ONLY = _bacnet.STATUS_OPERATIONAL_READ_ONLY
_bacnet.STATUS_DOWNLOAD_REQUIRED_swigconstant(_bacnet)
STATUS_DOWNLOAD_REQUIRED = _bacnet.STATUS_DOWNLOAD_REQUIRED
_bacnet.STATUS_DOWNLOAD_IN_PROGRESS_swigconstant(_bacnet)
STATUS_DOWNLOAD_IN_PROGRESS = _bacnet.STATUS_DOWNLOAD_IN_PROGRESS
_bacnet.STATUS_NON_OPERATIONAL_swigconstant(_bacnet)
STATUS_NON_OPERATIONAL = _bacnet.STATUS_NON_OPERATIONAL
_bacnet.STATUS_BACKUP_IN_PROGRESS_swigconstant(_bacnet)
STATUS_BACKUP_IN_PROGRESS = _bacnet.STATUS_BACKUP_IN_PROGRESS
_bacnet.MAX_DEVICE_STATUS_swigconstant(_bacnet)
MAX_DEVICE_STATUS = _bacnet.MAX_DEVICE_STATUS
_bacnet.UNITS_METERS_PER_SECOND_PER_SECOND_swigconstant(_bacnet)
UNITS_METERS_PER_SECOND_PER_SECOND = _bacnet.UNITS_METERS_PER_SECOND_PER_SECOND
_bacnet.UNITS_SQUARE_METERS_swigconstant(_bacnet)
UNITS_SQUARE_METERS = _bacnet.UNITS_SQUARE_METERS
_bacnet.UNITS_SQUARE_CENTIMETERS_swigconstant(_bacnet)
UNITS_SQUARE_CENTIMETERS = _bacnet.UNITS_SQUARE_CENTIMETERS
_bacnet.UNITS_SQUARE_FEET_swigconstant(_bacnet)
UNITS_SQUARE_FEET = _bacnet.UNITS_SQUARE_FEET
_bacnet.UNITS_SQUARE_INCHES_swigconstant(_bacnet)
UNITS_SQUARE_INCHES = _bacnet.UNITS_SQUARE_INCHES
_bacnet.UNITS_CURRENCY1_swigconstant(_bacnet)
UNITS_CURRENCY1 = _bacnet.UNITS_CURRENCY1
_bacnet.UNITS_CURRENCY2_swigconstant(_bacnet)
UNITS_CURRENCY2 = _bacnet.UNITS_CURRENCY2
_bacnet.UNITS_CURRENCY3_swigconstant(_bacnet)
UNITS_CURRENCY3 = _bacnet.UNITS_CURRENCY3
_bacnet.UNITS_CURRENCY4_swigconstant(_bacnet)
UNITS_CURRENCY4 = _bacnet.UNITS_CURRENCY4
_bacnet.UNITS_CURRENCY5_swigconstant(_bacnet)
UNITS_CURRENCY5 = _bacnet.UNITS_CURRENCY5
_bacnet.UNITS_CURRENCY6_swigconstant(_bacnet)
UNITS_CURRENCY6 = _bacnet.UNITS_CURRENCY6
_bacnet.UNITS_CURRENCY7_swigconstant(_bacnet)
UNITS_CURRENCY7 = _bacnet.UNITS_CURRENCY7
_bacnet.UNITS_CURRENCY8_swigconstant(_bacnet)
UNITS_CURRENCY8 = _bacnet.UNITS_CURRENCY8
_bacnet.UNITS_CURRENCY9_swigconstant(_bacnet)
UNITS_CURRENCY9 = _bacnet.UNITS_CURRENCY9
_bacnet.UNITS_CURRENCY10_swigconstant(_bacnet)
UNITS_CURRENCY10 = _bacnet.UNITS_CURRENCY10
_bacnet.UNITS_MILLIAMPERES_swigconstant(_bacnet)
UNITS_MILLIAMPERES = _bacnet.UNITS_MILLIAMPERES
_bacnet.UNITS_AMPERES_swigconstant(_bacnet)
UNITS_AMPERES = _bacnet.UNITS_AMPERES
_bacnet.UNITS_AMPERES_PER_METER_swigconstant(_bacnet)
UNITS_AMPERES_PER_METER = _bacnet.UNITS_AMPERES_PER_METER
_bacnet.UNITS_AMPERES_PER_SQUARE_METER_swigconstant(_bacnet)
UNITS_AMPERES_PER_SQUARE_METER = _bacnet.UNITS_AMPERES_PER_SQUARE_METER
_bacnet.UNITS_AMPERE_SQUARE_METERS_swigconstant(_bacnet)
UNITS_AMPERE_SQUARE_METERS = _bacnet.UNITS_AMPERE_SQUARE_METERS
_bacnet.UNITS_DECIBELS_swigconstant(_bacnet)
UNITS_DECIBELS = _bacnet.UNITS_DECIBELS
_bacnet.UNITS_DECIBELS_MILLIVOLT_swigconstant(_bacnet)
UNITS_DECIBELS_MILLIVOLT = _bacnet.UNITS_DECIBELS_MILLIVOLT
_bacnet.UNITS_DECIBELS_VOLT_swigconstant(_bacnet)
UNITS_DECIBELS_VOLT = _bacnet.UNITS_DECIBELS_VOLT
_bacnet.UNITS_FARADS_swigconstant(_bacnet)
UNITS_FARADS = _bacnet.UNITS_FARADS
_bacnet.UNITS_HENRYS_swigconstant(_bacnet)
UNITS_HENRYS = _bacnet.UNITS_HENRYS
_bacnet.UNITS_OHMS_swigconstant(_bacnet)
UNITS_OHMS = _bacnet.UNITS_OHMS
_bacnet.UNITS_OHM_METERS_swigconstant(_bacnet)
UNITS_OHM_METERS = _bacnet.UNITS_OHM_METERS
_bacnet.UNITS_MILLIOHMS_swigconstant(_bacnet)
UNITS_MILLIOHMS = _bacnet.UNITS_MILLIOHMS
_bacnet.UNITS_KILOHMS_swigconstant(_bacnet)
UNITS_KILOHMS = _bacnet.UNITS_KILOHMS
_bacnet.UNITS_MEGOHMS_swigconstant(_bacnet)
UNITS_MEGOHMS = _bacnet.UNITS_MEGOHMS
_bacnet.UNITS_MICROSIEMENS_swigconstant(_bacnet)
UNITS_MICROSIEMENS = _bacnet.UNITS_MICROSIEMENS
_bacnet.UNITS_MILLISIEMENS_swigconstant(_bacnet)
UNITS_MILLISIEMENS = _bacnet.UNITS_MILLISIEMENS
_bacnet.UNITS_SIEMENS_swigconstant(_bacnet)
UNITS_SIEMENS = _bacnet.UNITS_SIEMENS
_bacnet.UNITS_SIEMENS_PER_METER_swigconstant(_bacnet)
UNITS_SIEMENS_PER_METER = _bacnet.UNITS_SIEMENS_PER_METER
_bacnet.UNITS_TESLAS_swigconstant(_bacnet)
UNITS_TESLAS = _bacnet.UNITS_TESLAS
_bacnet.UNITS_VOLTS_swigconstant(_bacnet)
UNITS_VOLTS = _bacnet.UNITS_VOLTS
_bacnet.UNITS_MILLIVOLTS_swigconstant(_bacnet)
UNITS_MILLIVOLTS = _bacnet.UNITS_MILLIVOLTS
_bacnet.UNITS_KILOVOLTS_swigconstant(_bacnet)
UNITS_KILOVOLTS = _bacnet.UNITS_KILOVOLTS
_bacnet.UNITS_MEGAVOLTS_swigconstant(_bacnet)
UNITS_MEGAVOLTS = _bacnet.UNITS_MEGAVOLTS
_bacnet.UNITS_VOLT_AMPERES_swigconstant(_bacnet)
UNITS_VOLT_AMPERES = _bacnet.UNITS_VOLT_AMPERES
_bacnet.UNITS_KILOVOLT_AMPERES_swigconstant(_bacnet)
UNITS_KILOVOLT_AMPERES = _bacnet.UNITS_KILOVOLT_AMPERES
_bacnet.UNITS_MEGAVOLT_AMPERES_swigconstant(_bacnet)
UNITS_MEGAVOLT_AMPERES = _bacnet.UNITS_MEGAVOLT_AMPERES
_bacnet.UNITS_VOLT_AMPERES_REACTIVE_swigconstant(_bacnet)
UNITS_VOLT_AMPERES_REACTIVE = _bacnet.UNITS_VOLT_AMPERES_REACTIVE
_bacnet.UNITS_KILOVOLT_AMPERES_REACTIVE_swigconstant(_bacnet)
UNITS_KILOVOLT_AMPERES_REACTIVE = _bacnet.UNITS_KILOVOLT_AMPERES_REACTIVE
_bacnet.UNITS_MEGAVOLT_AMPERES_REACTIVE_swigconstant(_bacnet)
UNITS_MEGAVOLT_AMPERES_REACTIVE = _bacnet.UNITS_MEGAVOLT_AMPERES_REACTIVE
_bacnet.UNITS_VOLTS_PER_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_VOLTS_PER_DEGREE_KELVIN = _bacnet.UNITS_VOLTS_PER_DEGREE_KELVIN
_bacnet.UNITS_VOLTS_PER_METER_swigconstant(_bacnet)
UNITS_VOLTS_PER_METER = _bacnet.UNITS_VOLTS_PER_METER
_bacnet.UNITS_DEGREES_PHASE_swigconstant(_bacnet)
UNITS_DEGREES_PHASE = _bacnet.UNITS_DEGREES_PHASE
_bacnet.UNITS_POWER_FACTOR_swigconstant(_bacnet)
UNITS_POWER_FACTOR = _bacnet.UNITS_POWER_FACTOR
_bacnet.UNITS_WEBERS_swigconstant(_bacnet)
UNITS_WEBERS = _bacnet.UNITS_WEBERS
_bacnet.UNITS_JOULES_swigconstant(_bacnet)
UNITS_JOULES = _bacnet.UNITS_JOULES
_bacnet.UNITS_KILOJOULES_swigconstant(_bacnet)
UNITS_KILOJOULES = _bacnet.UNITS_KILOJOULES
_bacnet.UNITS_KILOJOULES_PER_KILOGRAM_swigconstant(_bacnet)
UNITS_KILOJOULES_PER_KILOGRAM = _bacnet.UNITS_KILOJOULES_PER_KILOGRAM
_bacnet.UNITS_MEGAJOULES_swigconstant(_bacnet)
UNITS_MEGAJOULES = _bacnet.UNITS_MEGAJOULES
_bacnet.UNITS_WATT_HOURS_swigconstant(_bacnet)
UNITS_WATT_HOURS = _bacnet.UNITS_WATT_HOURS
_bacnet.UNITS_KILOWATT_HOURS_swigconstant(_bacnet)
UNITS_KILOWATT_HOURS = _bacnet.UNITS_KILOWATT_HOURS
_bacnet.UNITS_MEGAWATT_HOURS_swigconstant(_bacnet)
UNITS_MEGAWATT_HOURS = _bacnet.UNITS_MEGAWATT_HOURS
_bacnet.UNITS_WATT_HOURS_REACTIVE_swigconstant(_bacnet)
UNITS_WATT_HOURS_REACTIVE = _bacnet.UNITS_WATT_HOURS_REACTIVE
_bacnet.UNITS_KILOWATT_HOURS_REACTIVE_swigconstant(_bacnet)
UNITS_KILOWATT_HOURS_REACTIVE = _bacnet.UNITS_KILOWATT_HOURS_REACTIVE
_bacnet.UNITS_MEGAWATT_HOURS_REACTIVE_swigconstant(_bacnet)
UNITS_MEGAWATT_HOURS_REACTIVE = _bacnet.UNITS_MEGAWATT_HOURS_REACTIVE
_bacnet.UNITS_BTUS_swigconstant(_bacnet)
UNITS_BTUS = _bacnet.UNITS_BTUS
_bacnet.UNITS_KILO_BTUS_swigconstant(_bacnet)
UNITS_KILO_BTUS = _bacnet.UNITS_KILO_BTUS
_bacnet.UNITS_MEGA_BTUS_swigconstant(_bacnet)
UNITS_MEGA_BTUS = _bacnet.UNITS_MEGA_BTUS
_bacnet.UNITS_THERMS_swigconstant(_bacnet)
UNITS_THERMS = _bacnet.UNITS_THERMS
_bacnet.UNITS_TON_HOURS_swigconstant(_bacnet)
UNITS_TON_HOURS = _bacnet.UNITS_TON_HOURS
_bacnet.UNITS_JOULES_PER_KILOGRAM_DRY_AIR_swigconstant(_bacnet)
UNITS_JOULES_PER_KILOGRAM_DRY_AIR = _bacnet.UNITS_JOULES_PER_KILOGRAM_DRY_AIR
_bacnet.UNITS_KILOJOULES_PER_KILOGRAM_DRY_AIR_swigconstant(_bacnet)
UNITS_KILOJOULES_PER_KILOGRAM_DRY_AIR = _bacnet.UNITS_KILOJOULES_PER_KILOGRAM_DRY_AIR
_bacnet.UNITS_MEGAJOULES_PER_KILOGRAM_DRY_AIR_swigconstant(_bacnet)
UNITS_MEGAJOULES_PER_KILOGRAM_DRY_AIR = _bacnet.UNITS_MEGAJOULES_PER_KILOGRAM_DRY_AIR
_bacnet.UNITS_BTUS_PER_POUND_DRY_AIR_swigconstant(_bacnet)
UNITS_BTUS_PER_POUND_DRY_AIR = _bacnet.UNITS_BTUS_PER_POUND_DRY_AIR
_bacnet.UNITS_BTUS_PER_POUND_swigconstant(_bacnet)
UNITS_BTUS_PER_POUND = _bacnet.UNITS_BTUS_PER_POUND
_bacnet.UNITS_JOULES_PER_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_JOULES_PER_DEGREE_KELVIN = _bacnet.UNITS_JOULES_PER_DEGREE_KELVIN
_bacnet.UNITS_KILOJOULES_PER_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_KILOJOULES_PER_DEGREE_KELVIN = _bacnet.UNITS_KILOJOULES_PER_DEGREE_KELVIN
_bacnet.UNITS_MEGAJOULES_PER_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_MEGAJOULES_PER_DEGREE_KELVIN = _bacnet.UNITS_MEGAJOULES_PER_DEGREE_KELVIN
_bacnet.UNITS_JOULES_PER_KILOGRAM_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_JOULES_PER_KILOGRAM_DEGREE_KELVIN = _bacnet.UNITS_JOULES_PER_KILOGRAM_DEGREE_KELVIN
_bacnet.UNITS_NEWTON_swigconstant(_bacnet)
UNITS_NEWTON = _bacnet.UNITS_NEWTON
_bacnet.UNITS_CYCLES_PER_HOUR_swigconstant(_bacnet)
UNITS_CYCLES_PER_HOUR = _bacnet.UNITS_CYCLES_PER_HOUR
_bacnet.UNITS_CYCLES_PER_MINUTE_swigconstant(_bacnet)
UNITS_CYCLES_PER_MINUTE = _bacnet.UNITS_CYCLES_PER_MINUTE
_bacnet.UNITS_HERTZ_swigconstant(_bacnet)
UNITS_HERTZ = _bacnet.UNITS_HERTZ
_bacnet.UNITS_KILOHERTZ_swigconstant(_bacnet)
UNITS_KILOHERTZ = _bacnet.UNITS_KILOHERTZ
_bacnet.UNITS_MEGAHERTZ_swigconstant(_bacnet)
UNITS_MEGAHERTZ = _bacnet.UNITS_MEGAHERTZ
_bacnet.UNITS_PER_HOUR_swigconstant(_bacnet)
UNITS_PER_HOUR = _bacnet.UNITS_PER_HOUR
_bacnet.UNITS_GRAMS_OF_WATER_PER_KILOGRAM_DRY_AIR_swigconstant(_bacnet)
UNITS_GRAMS_OF_WATER_PER_KILOGRAM_DRY_AIR = _bacnet.UNITS_GRAMS_OF_WATER_PER_KILOGRAM_DRY_AIR
_bacnet.UNITS_PERCENT_RELATIVE_HUMIDITY_swigconstant(_bacnet)
UNITS_PERCENT_RELATIVE_HUMIDITY = _bacnet.UNITS_PERCENT_RELATIVE_HUMIDITY
_bacnet.UNITS_MICROMETERS_swigconstant(_bacnet)
UNITS_MICROMETERS = _bacnet.UNITS_MICROMETERS
_bacnet.UNITS_MILLIMETERS_swigconstant(_bacnet)
UNITS_MILLIMETERS = _bacnet.UNITS_MILLIMETERS
_bacnet.UNITS_CENTIMETERS_swigconstant(_bacnet)
UNITS_CENTIMETERS = _bacnet.UNITS_CENTIMETERS
_bacnet.UNITS_KILOMETERS_swigconstant(_bacnet)
UNITS_KILOMETERS = _bacnet.UNITS_KILOMETERS
_bacnet.UNITS_METERS_swigconstant(_bacnet)
UNITS_METERS = _bacnet.UNITS_METERS
_bacnet.UNITS_INCHES_swigconstant(_bacnet)
UNITS_INCHES = _bacnet.UNITS_INCHES
_bacnet.UNITS_FEET_swigconstant(_bacnet)
UNITS_FEET = _bacnet.UNITS_FEET
_bacnet.UNITS_CANDELAS_swigconstant(_bacnet)
UNITS_CANDELAS = _bacnet.UNITS_CANDELAS
_bacnet.UNITS_CANDELAS_PER_SQUARE_METER_swigconstant(_bacnet)
UNITS_CANDELAS_PER_SQUARE_METER = _bacnet.UNITS_CANDELAS_PER_SQUARE_METER
_bacnet.UNITS_WATTS_PER_SQUARE_FOOT_swigconstant(_bacnet)
UNITS_WATTS_PER_SQUARE_FOOT = _bacnet.UNITS_WATTS_PER_SQUARE_FOOT
_bacnet.UNITS_WATTS_PER_SQUARE_METER_swigconstant(_bacnet)
UNITS_WATTS_PER_SQUARE_METER = _bacnet.UNITS_WATTS_PER_SQUARE_METER
_bacnet.UNITS_LUMENS_swigconstant(_bacnet)
UNITS_LUMENS = _bacnet.UNITS_LUMENS
_bacnet.UNITS_LUXES_swigconstant(_bacnet)
UNITS_LUXES = _bacnet.UNITS_LUXES
_bacnet.UNITS_FOOT_CANDLES_swigconstant(_bacnet)
UNITS_FOOT_CANDLES = _bacnet.UNITS_FOOT_CANDLES
_bacnet.UNITS_MILLIGRAMS_swigconstant(_bacnet)
UNITS_MILLIGRAMS = _bacnet.UNITS_MILLIGRAMS
_bacnet.UNITS_GRAMS_swigconstant(_bacnet)
UNITS_GRAMS = _bacnet.UNITS_GRAMS
_bacnet.UNITS_KILOGRAMS_swigconstant(_bacnet)
UNITS_KILOGRAMS = _bacnet.UNITS_KILOGRAMS
_bacnet.UNITS_POUNDS_MASS_swigconstant(_bacnet)
UNITS_POUNDS_MASS = _bacnet.UNITS_POUNDS_MASS
_bacnet.UNITS_TONS_swigconstant(_bacnet)
UNITS_TONS = _bacnet.UNITS_TONS
_bacnet.UNITS_GRAMS_PER_SECOND_swigconstant(_bacnet)
UNITS_GRAMS_PER_SECOND = _bacnet.UNITS_GRAMS_PER_SECOND
_bacnet.UNITS_GRAMS_PER_MINUTE_swigconstant(_bacnet)
UNITS_GRAMS_PER_MINUTE = _bacnet.UNITS_GRAMS_PER_MINUTE
_bacnet.UNITS_KILOGRAMS_PER_SECOND_swigconstant(_bacnet)
UNITS_KILOGRAMS_PER_SECOND = _bacnet.UNITS_KILOGRAMS_PER_SECOND
_bacnet.UNITS_KILOGRAMS_PER_MINUTE_swigconstant(_bacnet)
UNITS_KILOGRAMS_PER_MINUTE = _bacnet.UNITS_KILOGRAMS_PER_MINUTE
_bacnet.UNITS_KILOGRAMS_PER_HOUR_swigconstant(_bacnet)
UNITS_KILOGRAMS_PER_HOUR = _bacnet.UNITS_KILOGRAMS_PER_HOUR
_bacnet.UNITS_POUNDS_MASS_PER_SECOND_swigconstant(_bacnet)
UNITS_POUNDS_MASS_PER_SECOND = _bacnet.UNITS_POUNDS_MASS_PER_SECOND
_bacnet.UNITS_POUNDS_MASS_PER_MINUTE_swigconstant(_bacnet)
UNITS_POUNDS_MASS_PER_MINUTE = _bacnet.UNITS_POUNDS_MASS_PER_MINUTE
_bacnet.UNITS_POUNDS_MASS_PER_HOUR_swigconstant(_bacnet)
UNITS_POUNDS_MASS_PER_HOUR = _bacnet.UNITS_POUNDS_MASS_PER_HOUR
_bacnet.UNITS_TONS_PER_HOUR_swigconstant(_bacnet)
UNITS_TONS_PER_HOUR = _bacnet.UNITS_TONS_PER_HOUR
_bacnet.UNITS_MILLIWATTS_swigconstant(_bacnet)
UNITS_MILLIWATTS = _bacnet.UNITS_MILLIWATTS
_bacnet.UNITS_WATTS_swigconstant(_bacnet)
UNITS_WATTS = _bacnet.UNITS_WATTS
_bacnet.UNITS_KILOWATTS_swigconstant(_bacnet)
UNITS_KILOWATTS = _bacnet.UNITS_KILOWATTS
_bacnet.UNITS_MEGAWATTS_swigconstant(_bacnet)
UNITS_MEGAWATTS = _bacnet.UNITS_MEGAWATTS
_bacnet.UNITS_BTUS_PER_HOUR_swigconstant(_bacnet)
UNITS_BTUS_PER_HOUR = _bacnet.UNITS_BTUS_PER_HOUR
_bacnet.UNITS_KILO_BTUS_PER_HOUR_swigconstant(_bacnet)
UNITS_KILO_BTUS_PER_HOUR = _bacnet.UNITS_KILO_BTUS_PER_HOUR
_bacnet.UNITS_HORSEPOWER_swigconstant(_bacnet)
UNITS_HORSEPOWER = _bacnet.UNITS_HORSEPOWER
_bacnet.UNITS_TONS_REFRIGERATION_swigconstant(_bacnet)
UNITS_TONS_REFRIGERATION = _bacnet.UNITS_TONS_REFRIGERATION
_bacnet.UNITS_PASCALS_swigconstant(_bacnet)
UNITS_PASCALS = _bacnet.UNITS_PASCALS
_bacnet.UNITS_HECTOPASCALS_swigconstant(_bacnet)
UNITS_HECTOPASCALS = _bacnet.UNITS_HECTOPASCALS
_bacnet.UNITS_KILOPASCALS_swigconstant(_bacnet)
UNITS_KILOPASCALS = _bacnet.UNITS_KILOPASCALS
_bacnet.UNITS_MILLIBARS_swigconstant(_bacnet)
UNITS_MILLIBARS = _bacnet.UNITS_MILLIBARS
_bacnet.UNITS_BARS_swigconstant(_bacnet)
UNITS_BARS = _bacnet.UNITS_BARS
_bacnet.UNITS_POUNDS_FORCE_PER_SQUARE_INCH_swigconstant(_bacnet)
UNITS_POUNDS_FORCE_PER_SQUARE_INCH = _bacnet.UNITS_POUNDS_FORCE_PER_SQUARE_INCH
_bacnet.UNITS_MILLIMETERS_OF_WATER_swigconstant(_bacnet)
UNITS_MILLIMETERS_OF_WATER = _bacnet.UNITS_MILLIMETERS_OF_WATER
_bacnet.UNITS_CENTIMETERS_OF_WATER_swigconstant(_bacnet)
UNITS_CENTIMETERS_OF_WATER = _bacnet.UNITS_CENTIMETERS_OF_WATER
_bacnet.UNITS_INCHES_OF_WATER_swigconstant(_bacnet)
UNITS_INCHES_OF_WATER = _bacnet.UNITS_INCHES_OF_WATER
_bacnet.UNITS_MILLIMETERS_OF_MERCURY_swigconstant(_bacnet)
UNITS_MILLIMETERS_OF_MERCURY = _bacnet.UNITS_MILLIMETERS_OF_MERCURY
_bacnet.UNITS_CENTIMETERS_OF_MERCURY_swigconstant(_bacnet)
UNITS_CENTIMETERS_OF_MERCURY = _bacnet.UNITS_CENTIMETERS_OF_MERCURY
_bacnet.UNITS_INCHES_OF_MERCURY_swigconstant(_bacnet)
UNITS_INCHES_OF_MERCURY = _bacnet.UNITS_INCHES_OF_MERCURY
_bacnet.UNITS_DEGREES_CELSIUS_swigconstant(_bacnet)
UNITS_DEGREES_CELSIUS = _bacnet.UNITS_DEGREES_CELSIUS
_bacnet.UNITS_DEGREES_KELVIN_swigconstant(_bacnet)
UNITS_DEGREES_KELVIN = _bacnet.UNITS_DEGREES_KELVIN
_bacnet.UNITS_DEGREES_KELVIN_PER_HOUR_swigconstant(_bacnet)
UNITS_DEGREES_KELVIN_PER_HOUR = _bacnet.UNITS_DEGREES_KELVIN_PER_HOUR
_bacnet.UNITS_DEGREES_KELVIN_PER_MINUTE_swigconstant(_bacnet)
UNITS_DEGREES_KELVIN_PER_MINUTE | |
from cam_functions import extract_feat_cam
from utils import create_folders, save_data, preprocess_images, load_data, print_classes
from pooling_functions import weighted_cam_pooling, descriptor_aggregation, retrieve_n_descriptors, compute_pca, sum_pooling
from scipy.misc import imread
import math
from reranking import re_ranking
import pickle
import resnet
import densenet
import numpy as np
import os
import h5py
import sys
import getopt
import evaluate_oxford_paris as eval
import time
import torch
import torchvision
import torch.nn.parallel
import torch.backends.cudnn as cudnn
imagenet_dictionary = pickle.load(open("../imagenet1000_clsid_to_human.pkl", "rb"))
# Instructions Arguments: python script.py -d 'Oxford/Paris' -nc_q 32 -pca 1 -qe 10 -re 100 -nc_re 6
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:m:', ['nc_q=', 'pca=', 'qe=', 're=', 'nc_re='])
flag_nc_q = False
flag_pca = False
flag_d = False
flag_nc_re = False
flag_qe = False
flag_re = False
flag_m = False
except getopt.GetoptError:
print 'script.py -d <dataset> --nc_q <nclasses_query> --pca <n_classes_pca> --qe <n_query_exp> --re <n_re_ranking> ' \
'--nc_re <n_classes_re_ranking> -m <model_name>'
sys.exit(2)
for opt, arg in opts:
if opt == '-d':
if arg == 'Oxford' or arg == 'Paris' or arg == 'Oxford105k' or arg == 'Paris106k':
dataset = arg
flag_d = True
elif opt == '--nc_q':
num_cams = int(arg)
flag_nc_q = True
elif opt == '--pca':
num_classes_pca = int(arg)
flag_pca = True
elif opt == '-m':
if arg == 'ResNet50' or arg == 'DenseNet161':
model_name = arg
flag_m = True
elif opt == '--qe':
n_expand = int(arg)
query_expansion = True
flag_qe = True
elif opt == '--re':
do_re_ranking = True
top_n_ranking = int(arg)
flag_re = True
elif opt == '--nc_re':
num_cams2 = int(arg)
flag_nc_re = True
if not flag_pca:
num_classes_pca = 1
print 'Default pca_classes: ', num_classes_pca
# N Class Activation Maps
if not flag_nc_q:
num_cams = 64
print 'Default classes: ', num_cams
# Num_cams2 --> Used to compute the descriptors when re-ranking
if not flag_nc_re:
num_cams_re = 6
print 'Default classes for re-ranking: ', num_cams_re
# Re-ranking
if not flag_re:
do_re_ranking = False
top_n_ranking = 0
print 'Not doing Re-ranking'
# Query Expansion
if not flag_qe:
# Re-ranking
query_expansion = False
n_expand = 0
print 'Not doing Query Expansion'
if not flag_m:
model_name = 'ResNet50'
print 'Default model: ', model_name
# Num classes stored in the precomputed --> Have to be set up
num_prec_classes = 64
# SET FOR RE-RANKING
batch_size_re = 6
# Global params
n_images_distractors = 100070
n_images_oxford = 5063
n_images_paris = 6392
n_queries = 55
# Descriptors for Re-ranking (Size W x H)
dim = '1024x720'
size_v = [720, 1024]
size_h = [1024, 720]
stats = list()
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
stats.append(mean)
stats.append(std)
# Parameters to set
# Dataset
if not flag_d:
dataset = 'Oxford'
print 'Default dataset: ', dataset
cudnn.benchmark = True
if model_name == 'ResNet50':
# ResNet50
model = resnet.resnet50(pretrained=True)
model = torch.nn.DataParallel(model)
dim_descriptor = 2048
pca_dim = 2048
elif model_name == 'DenseNet161':
# DenseNet161
model = densenet.densenet161(pretrained=True)
model.features = torch.nn.DataParallel(model.features)
dim_descriptor = 2208
pca_dim = 2208
model.cuda()
# PCA Parameters
apply_pca = True
print 'Dataset: ', dataset
print 'Num_cams ', num_cams
print 'PCA with ', num_classes_pca
print 'Model: ', model_name
if do_re_ranking:
print 'Re-ranking with first ', top_n_ranking
if query_expansion:
print 'Applying query expansion using the first ', n_expand
if dataset == 'Oxford':
image_path = '/data/jim011/datasets_retrieval/Oxford5k/images/'
ranking_path = '/flush2/jim011/results/oxford/' + model_name + '/' + dim + '/'
ranking_image_names_list = '../lists/list_oxford_rank.txt'
create_folders(ranking_path)
cam_descriptors_path = '/data/jim011/oxford/descriptors/' + model_name + '/' + dim + '/' + 'oxford_all_64_wp.h5'
pca_descriptors_path = '/data/jim011/paris/descriptors/' + model_name + '/1024x720/' + 'paris_all_64_wp.h5'
t = time.time()
image_names = list()
with open(ranking_image_names_list, "r") as f:
for line in f:
image_names.append(line)
num_images = n_images_oxford
num_img_pca = n_images_paris
image_names = np.array(image_names)
path_gt = "/data/jim011/datasets_retrieval/Oxford5k/ground_truth/"
query_names = ["all_souls", "ashmolean", "balliol", "bodleian", "christ_church", "cornmarket", "hertford", "keble",
"magdalen", "pitt_rivers", "radcliffe_camera"]
elif dataset == 'Paris':
ranking_path = '/flush2/jim011/results/paris/' + model_name + '/' + dim + '/'
ranking_image_names_list = '../lists/list_paris_rank.txt'
create_folders(ranking_path)
descriptors_path = '/flush2/jim011/paris/descriptors/' + model_name + '/1024x720/'
descriptors_name = 'paris_32_pca_2208_oxford_1.h5'
cam_descriptors_path = '/flush2/jim011/paris/descriptors/' + model_name + '/' + dim + '/' \
'paris_all2_sp.h5'
pca_descriptors_path = '/flush2/jim011/oxford/descriptors/' + model_name + '/1024x720/' \
'oxford_all2_sp.h5'
image_path = '/data/jim011/datasets_retrieval/Paris6k/images/'
num_images = n_images_paris
num_img_pca = n_images_oxford
path_gt = "/data/jim011/datasets_retrieval/Paris6k/ground_truth/"
query_names = ["defense", "eiffel", "invalides", "louvre", "moulinrouge", "museedorsay", "notredame", "pantheon",
"pompidou", "sacrecoeur", "triomphe"]
t = time.time()
image_names = list()
with open(ranking_image_names_list, "r") as f:
for line in f:
image_names.append(line)
image_names = np.array(image_names)
elif dataset == 'Oxford105k':
image_path = '/data/jim011/datasets_retrieval/Oxford5k/images/'
ranking_path = '/home/jim011/workspace/retrieval-2017-icmr/results/oxford105k/' + model_name + '/' \
+ dim + '/' + '/R' + str(top_n_ranking) + 'QE' + str(n_expand)+'/'
ranking_image_names_list = '/home/jim011/workspace/retrieval-2017-icmr/lists/list_oxford_rank.txt'
ranking_distractors_list = '/home/jim011/workspace/retrieval-2017-icmr/lists/list_oxford_105k_rank.txt'
create_folders(ranking_path)
descriptors_path = '/data/jim011/oxford/descriptors/Vgg_16_CAM/relu5_1/1024x720/'
distractors_path = '/data/jim011/oxford/descriptors/Vgg_16_CAM/relu5_1/1024x720/' \
'oxford_105k_32_pca_512_paris_1.h5'
# descriptors_name = 'oxford_fusion_8_th_0_pca_paris_8_wp_wp.h5'
descriptors_name = 'oxford_32_pca_512_paris_1.h5'
pca_descriptors_path = '/data/jim011/paris/descriptors/Vgg_16_CAM/relu5_1/1024x720/' \
'paris_all_64_wp.h5'
cam_distractors_path = '/data/jim011/descriptors100k/descriptors/' + model_name + '/' + '/' + dim + '/' \
'distractor_all_64_wp_'
num_images = n_images_distractors
num_img_pca = n_images_paris
t = time.time()
image_names = list()
with open(ranking_image_names_list, "r") as f:
for line in f:
image_names.append(line)
with open(ranking_distractors_list, "r") as f:
for line in f:
image_names.append(line)
image_names = np.array(image_names)
path_gt = "/data/jim011/datasets_retrieval/Oxford5k/ground_truth/"
query_names = ["all_souls", "ashmolean", "balliol", "bodleian", "christ_church", "cornmarket", "hertford", "keble",
"magdalen", "pitt_rivers", "radcliffe_camera"]
elif dataset == 'Paris106k':
ranking_path = '/home/jim011/workspace/retrieval-2017-icmr/results/paris106k/' + model_name + '/' + layer + '/' \
+ dim + '/' + '/R' + str(top_n_ranking) + 'QE' + str(n_expand)+'/'
ranking_image_names_list = '/home/jim011/workspace/retrieval-2017-icmr/lists/list_paris_rank.txt'
ranking_distractors_list = '/home/jim011/workspace/retrieval-2017-icmr/lists/list_oxford_105k_rank.txt'
create_folders(ranking_path)
descriptors_path = '/data/jim011/oxford/descriptors/Vgg_16_CAM/relu5_1/1024x720/'
descriptors_name = 'paris_32_pca_512_oxford_1.h5'
distractors_path = '/data/jim011/'
pca_descriptors_path = '/data/jim011/oxford/descriptors/Vgg_16_CAM/relu5_1/1024x720/' \
'oxford_all_32_wp.h5'
image_path = '/data/jim011/datasets_retrieval/Paris6k/images/'
num_images = n_images_distractors
num_img_pca = n_images_oxford
cam_distractors_path = '/data/jim011/descriptors100k/descriptors/' + model_name + '/' + '/' + dim + '/' \
'distractor_all_64_wp_'
t = time.time()
image_names = list()
with open(ranking_image_names_list, "r") as f:
for line in f:
image_names.append(line)
with open(ranking_distractors_list, "r") as f:
for line in f:
image_names.append(line)
image_names = np.array(image_names)
path_gt = "/data/jim011/datasets_retrieval/Paris6k/ground_truth/"
query_names = ["defense", "eiffel", "invalides", "louvre", "moulinrouge", "museedorsay", "notredame", "pantheon",
"pompidou", "sacrecoeur", "triomphe"]
maps = list()
# Compute PCA
if apply_pca:
tpca = time.time()
pca_desc = retrieve_n_descriptors(num_classes_pca, num_img_pca, load_data(pca_descriptors_path))
pca_matrix = compute_pca(pca_desc, pca_dim=pca_dim, whiten=True)
print 'PCA matrix shape:', pca_matrix.components_.shape
print 'Time elapsed PCA: ', time.time() - tpca
else:
pca_matrix = None
if dataset == 'Oxford105k' or dataset == 'Paris106k':
n_chunks = 10
distractors = np.zeros((0, 512), dtype=np.float32)
for n_in in range(0, n_chunks+1):
desc = load_data(cam_distractors_path + str(n_in) + '.h5')
print desc.shape
distractors = np.concatenate((distractors, descriptor_aggregation(desc, desc.shape[0]/num_prec_classes,
num_cams, pca_matrix)))
print distractors.shape
t = time.time()
cam_descriptors = load_data(cam_descriptors_path)
print 'Time elapsed loading: ', time.time() - t
data = descriptor_aggregation(cam_descriptors, num_images, num_cams, pca_matrix)
data = np.concatenate((data, distractors))
elif dataset == 'Oxford' or dataset == 'Paris':
t = time.time()
cam_descriptors = load_data(cam_descriptors_path)
print 'Time elapsed loading: ', time.time() - t
#data = cam_descriptors
data = descriptor_aggregation(cam_descriptors, num_images, num_cams, pca_matrix)
for query_name in query_names:
for i in range(1, 6):
f = open(path_gt + query_name + '_' + str(i) + '_query.txt').readline()
if dataset == 'Oxford' or dataset == 'Oxford105k':
f = f.replace("oxc1_", "")
f_list = f.split(" ")
for k in range(1, 5):
f_list[k] = (int(math.floor(float(f_list[k]))))
query_img_name = f_list[0]
img = imread(image_path + query_img_name + '.jpg')
print 'Image Shape: ' + str(img.shape[0]) + 'x' + str(img.shape[1])
# Query bounding box
x, y, dx, dy = f_list[1], f_list[2], f_list[3], f_list[4]
# Feature map query bounding box
f_x, f_y, f_dx, f_dy = int((x - (x % 32)) / 32), int((y - (y % 32)) / 32), \
int((dx - (dx % 32)) / 32), int((dy - (dy % 32)) / 32)
img_cropped = img[y:dy, x:dx]
print 'Name of the query: ', query_img_name
h = img_cropped.shape[0] - (img_cropped.shape[0] % 32)
w = img_cropped.shape[1] - (img_cropped.shape[1] % 32)
img_tensor = preprocess_images(img_cropped, w, h, stats[0], stats[1])
img_tensor = torch.autograd.Variable(img_tensor, volatile=True)
# Cropped Query
features_c, cams_c, class_list, _ = extract_feat_cam(model, model_name, 1, img_tensor, num_cams)
if img.shape[0] > img.shape[1]:
size = size_v
else:
size = size_h
img_tensor = preprocess_images(img, size[0], size[1], stats[0], stats[1])
img_tensor = torch.autograd.Variable(img_tensor, volatile=True)
# All image query (With bounding box classes, to be implemented in one step...)
features, cams, _ = extract_feat_cam(model, model_name, 1, img_tensor, num_cams, class_list[0, 0:num_cams], roi=False)
# Features that fall inside Bounding box query
d_wp = weighted_cam_pooling(features[:, :, f_y:f_dy, f_x:f_dx], cams[:, :, f_y:f_dy, f_x:f_dx])
#d_wp = weighted_cam_pooling(features_c, cams_c, max_pool=False)
# Compute Query Descriptor
desc = descriptor_aggregation(d_wp, 1, num_cams, pca_matrix)
# desc = sum_pooling(features_c)
# desc = sum_pooling(features[:, :, f_y:f_dy, f_x:f_dx])
indices_local, data_local = eval.save_ranking_one_query(data, desc, image_names, ranking_path, query_img_name)
if do_re_ranking:
# When re-ranking descriptor for the query computed with less CAMs, as we know the relevant objects
desc = descriptor_aggregation(d_wp, 1, num_cams_re, pca_matrix)
t_rerank = time.time()
indices_re_ranking, data_re_ranking = re_ranking(desc, class_list[0, | |
offset = -2.5*np.log10(res1[:,1])
if full_output:
print( "Impossible to return proper residuals" )
residuals = None
else: # Calculate the residuals in the magnitude domain
res1 = np.array([ Utils.Misc.Fit_linear(self.data['mag'][i]-pred_flux[i], err=self.data['mag_err'][i], m=0., inline=True) for i in np.arange(self.ndataset) ])
offset = res1[:,0]
if full_output:
residuals = [ ((self.data['mag'][i]-pred_flux[i]) - offset[i])/self.data['mag_err'][i] for i in np.arange(self.ndataset) ]
chi2_data = res1[:,2].sum()
# Fit for the best offset between the observed and theoretical flux given the DM and A_V
res2 = Utils.Misc.Fit_linear(offset, x=self.data['ext'], err=self.data['calib'], b=par[7], m=par[8], inline=True)
par[7], par[8] = res2[0], res2[1]
chi2_band = res2[2]
# Here we add the chi2 of the data from that of the offsets for the bands.
chi2 = chi2_data + chi2_band
# Update the offset to be the actual offset between the data and the band (i.e. minus the DM and A_V contribution)
offset -= self.data['ext']*par[8] + par[7]
# Output results
if verbose:
print('chi2: {:.3f}, chi2 (data): {:.3f}, chi2 (band offset): {:.3f}, DM: {:.3f}, A_V: {:.3f}'.format(chi2, chi2_data, chi2_band, par[7], par[8]))
if full_output:
return chi2, {'offset':offset, 'par':par, 'res':residuals}
else:
return chi2
def Get_flux(self, par, flat=False, func_par=None, DM_AV=False, nsamples=None, verbose=False):
"""Get_flux(par, flat=False, func_par=None, DM_AV=False, nsamples=None, verbose=False)
Returns the predicted flux (in magnitude) by the model evaluated
at the observed values in the data set.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus (optional).
[8]: Absorption A_V (optional).
Note: Can also be a dictionary:
par.keys() = ['av', 'corotation', 'dm', 'filling',
'gravdark', 'incl','k1','tday','tnight']
flat (False): If True, the values are returned in a 1D vector.
If False, predicted values are grouped by data set left in a list.
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
DM_AV (False): If true, will include the DM and A_V in the flux.
nsamples (None): Number of points for the lightcurve sampling.
If None, the lightcurve will be sampled at the observed data
points.
Note: tirr = (par[6]**4 - par[3]**4)**0.25
>>> self.Get_flux([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.])
"""
# func_par
if func_par is not None:
par = func_par(par)
# check if we are dealing with a dictionary
if isinstance(par, dict):
par = [par['incl'], par['corotation'], par['filling'], par['tnight'], par['gravdark'], par['k1'], par['tday'], par['dm'], par['av']]
# We call Make_surface to make the companion's surface.
self.Make_surface(par, verbose=verbose)
# If nsamples is None we evaluate the lightcurve at each data point.
if nsamples is None:
phases = self.data['phase']
# If nsamples is set, we evaluate the lightcurve at nsamples
else:
phases = (np.arange(nsamples, dtype=float)/nsamples).repeat(self.ndataset).reshape((nsamples,self.ndataset)).T
# If DM_AV, we take into account the DM and AV into the flux here.
if DM_AV:
DM_AV = self.data['ext']*par[8] + par[7]
else:
DM_AV = self.data['ext']*0.
# Calculate the actual lightcurves
flux = []
for i in np.arange(self.ndataset):
# If we use the interpolation method and if the filter is the same as a previously
# calculated one, we do not recalculate the fluxes and simply copy them.
if nsamples is not None and self.grouping[i] < i:
flux.append(flux[self.grouping[i]])
else:
flux.append( np.array([self.star.Mag_flux(phase, atmo_grid=self.atmo_grid[i]) for phase in phases[i]]) + DM_AV[i] )
# If nsamples is set, we interpolate the lightcurve at nsamples.
if nsamples is not None:
for i in np.arange(self.ndataset):
ws, inds = Utils.Series.Getaxispos_vector(phases[i], self.data['phase'][i])
flux[i] = flux[i][inds]*(1-ws) + flux[i][inds+1]*ws
# We can flatten the flux array to simplify some of the calculations in the Calc_chi2 function
if flat:
return np.hstack(flux)
else:
return flux
def Get_flux_theoretical(self, par, phases, func_par=None, verbose=False):
"""Get_flux_theoretical(par, phases, func_par=None, verbose=False)
Returns the predicted flux (in magnitude) by the model evaluated at the
observed values in the data set.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus.
[8]: Absorption A_V.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
phases: A list of orbital phases at which the model should be
evaluated. The list must have the same length as the
number of data sets, each element can contain many phases.
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
verbose (False)
Note: tirr = (par[6]**4 - par[3]**4)**0.25
>>> self.Get_flux_theoretical([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.], [[0.,0.25,0.5,0.75]]*4)
"""
# func_par
if func_par is not None:
par = func_par(par)
# check if we are dealing with a dictionary
if isinstance(par, dict):
par = [par['incl'], par['corotation'], par['filling'], par['tnight'], par['gravdark'], par['k1'], par['tday'], par['dm'], par['av']]
# We call Make_surface to make the companion's surface.
self.Make_surface(par, verbose=verbose)
DM_AV = self.data['ext']*par[8] + par[7]
flux = []
for i in np.arange(self.ndataset):
# If the filter is the same as a previously calculated one
# we do not recalculate the fluxes and simply copy them.
if self.grouping[i] < i:
flux.append( flux[self.grouping[i]] )
else:
flux.append( np.array([self.star.Mag_flux(phase, atmo_grid=self.atmo_grid[i]) for phase in phases[i]]) + DM_AV[i] )
return flux
def Get_Keff(self, par, nphases=20, atmo_grid=0, func_par=None, make_surface=False, verbose=False):
"""
Returns the effective projected velocity semi-amplitude of the star in m/s.
The luminosity-weighted average velocity of the star is returned for
nphases, for the specified dataset, and a sin wave is fitted to them.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature.
[7]: Distance modulus.
[8]: Absorption A_V.
nphases (int): Number of phases to evaluate the velocity at.
atmo_grid (int, AtmoGridPhot): The atmosphere grid to use for the velocity
calculation. Can be an integer that represents the index of the atmosphere
grid object in self.atmo_grid, and it can be an AtmoGridPhot instance.
func_par (function): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
make_surface (bool): Whether lightcurve.make_surface should be called
or not. If the flux has been evaluate before and the parameters have
not changed, False is fine.
verbose (bool): Verbosity. Will plot the velocities and the sin fit.
"""
# If it is required to recalculate the stellar surface.
if make_surface:
self.Make_surface(par, func_par=func_par, verbose=verbose)
# Deciding which atmosphere grid we use to evaluate Keff
if isinstance(atmo_grid, int):
atmo_grid = self.atmo_grid[atmo_grid]
# Get the Keffs and fluxes
phases = np.arange(nphases)/float(nphases)
Keffs = np.array( [self.star.Keff(phase, atmo_grid=atmo_grid) for phase in phases] )
tmp = Utils.Misc.Fit_linear(Keffs, np.sin(cts.TWOPI*(phases)), inline=True)
if verbose:
pylab.plot(np.linspace(0.,1.), tmp[1]*np.sin(np.linspace(0.,1.)*cts.TWOPI)+tmp[0])
pylab.scatter(phases, Keffs)
Keff = tmp[1]
return Keff
def _Init_lightcurve(self, ndiv, read=False):
"""_Init_lightcurve(ndiv, read=False)
Call the appropriate Lightcurve class and initialize
the stellar array.
>>> self._Init_lightcurve(ndiv)
"""
self.star = Core.Star(ndiv, read=read)
return
def Make_surface(self, par, func_par=None, verbose=False):
"""Make_surface(par, func_par=None, verbose=False)
This function gets the parameters to construct to companion
surface model and calls the Make_surface function from the
Lightcurve object.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus (optional). Not needed here.
[8]: Absorption A_V (optional). Not needed here.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have | |
tz
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp
@with_cursor(protocol.DataPointCursor, protocol.MultiPoint)
def read_multi_rollups(self, key, start, end, rollups, period,
tz=None, interpolationf=None,
interpolation_period=None, limit=5000):
"""Read data from a single series with multiple rollups applied.
The rollups parameter should be a list of rollup names.
:param string key: the series key to read from
:param list rollups: the rollup functions to use
:param list keys: (optional) filter by one or more series keys
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string period: (optional) downsampling rate for the data
:param string tz: (optional) the timezone to place the data into
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.MultiPoint`
objects"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'data/rollups/segment')
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'limit': limit,
'rollup.fold': rollups,
'rollup.period': period,
'interpolation.function': interpolationf,
'interpolation.period': interpolation_period,
'tz': tz
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp
@with_cursor(protocol.DataPointCursor, protocol.DataPointFound)
def find_data(self, key, start, end, predicate, period, tz=None,
limit=1000):
"""Finds data from a given series according to a defined predicate
function. Start and end times must be supplied. They can either be
ISO8601 encoded strings (i.e. 2012-01-08T00:21:54.000+0000) or Python
Datetime objects, which will be converted for you.
The predicate and period must be supplied. The period specifies
sub-intervals from start to end in which the search will be performed
(i.e. 1min will search over each minute within the interval). The
predicate can be one of "max", "min", "first", or "last" and will
return the point over the given period that satisfies that predicate.
Finally, the optional tz parameter can be used to specify a time zone
for your output. Please see
`here <https://tempo-db.com/docs/api/timezone/>`_ for a list of a
valid timezone values.
:param string key: the series key to use
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string predicate: the name of a search function to use
:param string period: downsampling rate for the data
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.DataPointFound`
objects."""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'find')
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'predicate.function': predicate,
'predicate.period': period,
'tz': tz,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp
@with_cursor(protocol.DataPointCursor, protocol.DataPoint)
def aggregate_data(self, start, end, aggregation, keys=[], tags=[],
attrs={}, rollup=None, period=None, interpolationf=None,
interpolation_period=None, tz=None, limit=1000):
"""Read data from multiple series according to a filter and apply a
function across all the returned series to put the datapoints together
into one aggregrate series.
See the :meth:`list_series` method for a description of how the filter
criteria are applied, and the :meth:`read_data` method for how to
work with the start, end, and tz parameters.
Valid aggregation functions are the same as valid rollup functions.
:param string aggregation: the aggregation to perform
:param keys: (optional) filter by one or more series keys
:type keys: list or string
:param tags: (optional) filter by one or more tags
:type tags: list or string
:param dict attrs: (optional) filter by one or more key-value
attributes
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string rollup: (optional) the name of a rollup function to use
:param string period: (optional) downsampling rate for the data
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.DataPoint`
objects"""
url = 'segment'
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'key': keys,
'tag': tags,
'attr': attrs,
'aggregation.fold': aggregation,
'rollup.fold': rollup,
'rollup.period': period,
'interpolation.function': interpolationf,
'interpolation.period': interpolation_period,
'tz': tz,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp
@with_cursor(protocol.DataPointCursor, protocol.MultiPoint)
def read_multi(self, start, end, keys=None, rollup=None, period=None,
tz=None, tags=None, attrs=None, interpolationf=None,
interpolation_period=None, limit=5000):
"""Read data from multiple series given filter criteria. See the
:meth:`list_series` method for a description of how the filter
criteria are applied, and the :meth:`read_data` method for how to
work with the start, end, function, interval, and tz parameters.
:param keys: (optional) filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: (optional) filter by one or more key-value
attributes
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string rollup: (optional) the name of a rollup function to use
:param string period: (optional) downsampling rate for the data
:param string tz: (optional) the timezone to place the data into
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.MultiPoint`
objects"""
url = 'multi'
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'start': vstart,
'end': vend,
'limit': limit,
'rollup.fold': rollup,
'rollup.period': period,
'interpolation.function': interpolationf,
'interpolation.period': interpolation_period,
'tz': tz
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp
#WRITE DATA METHODS
@with_response_type('Nothing')
def write_data(self, key, data, tags=[], attrs={}):
"""Write a set a datapoints into a series by its key. For now,
the tags and attributes arguments are ignored.
:param string key: the series to write data into
:param list data: a list of DataPoints to write
:rtype: :class:`tempodb.response.Response` object"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'data')
#revisit later if there are server changes to take these into
#account
#params = {
# 'tag': tag,
# 'attr': attr,
#}
#url_args = endpoint.make_url_args(params)
#url = '?'.join([url, url_args])
dlist = [d.to_dictionary() for d in data]
body = json.dumps(dlist)
resp = self.session.post(url, body)
return resp
@with_response_type('Nothing')
def write_multi(self, data):
"""Write a set a datapoints into multiple series by key or series ID.
Each :class:`tempodb.protocol.objects.DataPoint` object should have
either a key or id attribute set that indicates which series it will
be written into::
[
{"t": "2012-...", "key": "foo", "v": 1},
{"t": "2012-...", "id": "bar", "v": 1}
]
If a non-existent key or ID is passed in, a series will be created
for that key/ID and the data point written in to the new series.
:param list data: a list of DataPoints to write
:rtype: :class:`tempodb.response.Response` object"""
url = 'multi/'
dlist = [d.to_dictionary() for d in data]
body = json.dumps(dlist)
resp = self.session.post(url, body)
return resp
#INCREMENT METHODS
#@with_response_type('Nothing')
#def increment(self, key, data=[]):
# """Increment a series a data points by the specified amount. For
# instance, incrementing with the following data::
#
# data = [{"t": "2012-01-08T00:21:54.000+0000", "v": 4.164}]
#
# would increment the value at that time by 4.
#
# **Note:** all floating point values are converted to longs before
# the increment takes place.
#
# :param string key: the series whose value to increment
# :param list data: the data points to incrememnt
# :rtype: :class:`tempodb.response.Response` object"""
# url = make_series_url(key)
# url = urlparse.urljoin(url + '/', 'increment')
# dlist = [d.to_dictionary() for d in data]
# body = json.dumps(dlist)
# resp = self.session.post(url, body)
# return resp
| |
from math import sqrt
from collections import namedtuple
import torch
from e3nn import o3
from e3nn.util import eval_code
def _prod(x):
out = 1
for a in x:
out *= a
return out
class TensorProduct(torch.nn.Module):
r"""Tensor Product with parametrizable paths
Parameters
----------
in1 : `Irreps` or list of tuple
List of first inputs ``(multiplicity, irrep[, variance])``.
in2 : `Irreps` or list of tuple
List of second inputs ``(multiplicity, irrep[, variance])``.
out : `Irreps` or list of tuple
List of outputs ``(multiplicity, irrep[, variance])``.
instructions : list of tuple
List of instructions ``(i_1, i_2, i_out, mode, train[, path_weight])``
it means: Put ``in1[i_1]`` :math:`\otimes` ``in2[i_2]`` into ``out[i_out]``
* mode: determines the way the multiplicities are treated, "uvw" is fully connected
* train: `True` of `False` if this path is weighed by a parameter
* path weight: how much this path should contribute to the output
normalization : {'component', 'norm'}
the way it is assumed the representation are normalized. If it is set to "norm":
.. math::
\| x \| = \| y \| = 1 \Longrightarrow \| x \otimes y \| = 1
internal_weights : bool
does the instance of the class contains the parameters
shared_weights : bool
are the parameters shared among the inputs extra dimensions
* `True` :math:`z_i = w x_i \otimes y_i`
* `False` :math:`z_i = w_i x_i \otimes y_i`
where here :math:`i` denotes a *batch-like* index
Examples
--------
Create a module that computes elementwise the cross-product of 16 vectors with 16 vectors :math:`z_u = x_u \wedge y_u`
>>> module = TensorProduct(
... "16x1o", "16x1o", "16x1e",
... [
... (0, 0, 0, "uuu", False)
... ]
... )
Now mix all 16 vectors with all 16 vectors to makes 16 pseudo-vectors :math:`z_w = \sum_{u,v} w_{uvw} x_u \wedge y_v`
>>> module = TensorProduct(
... [(16, (1, -1))],
... [(16, (1, -1))],
... [(16, (1, 1))],
... [
... (0, 0, 0, "uvw", True)
... ]
... )
With custom input variance and custom path weights:
>>> module = TensorProduct(
... "8x0o + 8x1o",
... [(16, "1o", 1/16)],
... "16x1e",
... [
... (0, 0, 0, "uvw", True, 3),
... (1, 0, 0, "uvw", True, 1),
... ]
... )
Example of a dot product:
>>> irreps = o3.Irreps("3x0e + 4x0o + 1e + 2o + 3o")
>>> module = TensorProduct(irreps, irreps, "0e", [
... (i, i, 0, 'uuw', False)
... for i, (mul, ir) in enumerate(irreps)
... ])
Implement :math:`z_u = x_u \otimes (\sum_v w_{uv} y_v)`
>>> module = TensorProduct(
... "8x0o + 7x1o + 3x2e",
... "10x0e + 10x1e + 10x2e",
... "8x0o + 7x1o + 3x2e",
... [
... # paths for the l=0:
... (0, 0, 0, "uvu", True), # 0x0->0
... # paths for the l=1:
... (1, 0, 1, "uvu", True), # 1x0->1
... (1, 1, 1, "uvu", True), # 1x1->1
... (1, 2, 1, "uvu", True), # 1x2->1
... # paths for the l=2:
... (2, 0, 2, "uvu", True), # 2x0->2
... (2, 1, 2, "uvu", True), # 2x1->2
... (2, 2, 2, "uvu", True), # 2x2->2
... ]
... )
Tensor Product using the xavier uniform initialization:
>>> irreps_1 = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> irreps_2 = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> irreps_out = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> # create a Fully Connected Tensor Product
>>> module = o3.TensorProduct(
... irreps_1,
... irreps_2,
... irreps_out,
... [
... (i_1, i_2, i_out, "uvw", True, mul_1 * mul_2)
... for i_1, (mul_1, ir_1) in enumerate(irreps_1)
... for i_2, (mul_2, ir_2) in enumerate(irreps_2)
... for i_out, (mul_out, ir_out) in enumerate(irreps_out)
... if ir_out in ir_1 * ir_2
... ]
... )
>>> with torch.no_grad():
... for weight in module.parameters():
... # formula from torch.nn.init.xavier_uniform_
... mul_1, mul_2, mul_out = weight.shape
... a = (6 / (mul_1 * mul_2 + mul_out))**0.5
... _ = weight.uniform_(-a, a) # `_ = ` is only here because of pytest
>>> n = 1_000
>>> vars = module(irreps_1.randn(n, -1), irreps_2.randn(n, -1)).var(0)
>>> assert vars.min() > 1 / 3
>>> assert vars.max() < 3
"""
def __init__(
self,
in1,
in2,
out,
instructions,
normalization='component',
internal_weights=None,
shared_weights=None,
_specialized_code=True,
):
super().__init__()
assert normalization in ['component', 'norm'], normalization
if shared_weights is False and internal_weights is None:
internal_weights = False
if shared_weights is None:
shared_weights = True
if internal_weights is None:
internal_weights = True
assert shared_weights or not internal_weights
try:
in1 = o3.Irreps(in1)
except AssertionError:
pass
try:
in2 = o3.Irreps(in2)
except AssertionError:
pass
try:
out = o3.Irreps(out)
except AssertionError:
pass
in1 = [x if len(x) == 3 else x + (1.0,) for x in in1]
in2 = [x if len(x) == 3 else x + (1.0,) for x in in2]
out = [x if len(x) == 3 else x + (1.0,) for x in out]
self.irreps_in1 = o3.Irreps([(mul, ir) for mul, ir, _var in in1])
self.irreps_in2 = o3.Irreps([(mul, ir) for mul, ir, _var in in2])
self.irreps_out = o3.Irreps([(mul, ir) for mul, ir, _var in out])
in1_var = [var for _, _, var in in1]
in2_var = [var for _, _, var in in2]
out_var = [var for _, _, var in out]
self.shared_weights = shared_weights
z = '' if self.shared_weights else 'z'
# == TorchScript main operation templates ==
# The if-else block is needed to avoid an internal TorchScript compiler bug related to the early return.
code_out = f"""
from typing import List
import torch
from e3nn.util import broadcast_tensors
@torch.jit.script
def main(x1: torch.Tensor, x2: torch.Tensor, ws: List[torch.Tensor], w3j: List[torch.Tensor]) -> torch.Tensor:
x1, x2 = broadcast_tensors(x1, x2)
size = x1.shape[:-1]
outsize = size + ({self.irreps_out.dim},)
assert x1.shape[-1] == {self.irreps_in1.dim}, "Incorrect feature dimension for x1"
assert x2.shape[-1] == {self.irreps_in2.dim}, "Incorrect feature dimension for x2"
x1 = x1.reshape(-1, {self.irreps_in1.dim})
x2 = x2.reshape(-1, {self.irreps_in2.dim})
if x1.shape[0] == 0:
return x1.new_zeros(outsize)
else:
batch = x1.shape[0]
out = x1.new_zeros((batch, {self.irreps_out.dim}))
ein = torch.einsum
"""
code_right = f"""
from typing import List
import torch
from e3nn.util import broadcast_tensors
@torch.jit.script
def main(x2: torch.Tensor, ws: List[torch.Tensor], w3j: List[torch.Tensor]) -> torch.Tensor:
size = x2.shape[:-1]
outsize = size + ({self.irreps_in1.dim}, {self.irreps_out.dim},)
assert x2.shape[-1] == {self.irreps_in2.dim}, "Incorrect feature dimension for x2"
x2 = x2.reshape(-1, {self.irreps_in2.dim})
if x2.shape[0] == 0:
return x2.new_zeros(outsize)
else:
batch = x2.shape[0]
out = x2.new_zeros((batch, {self.irreps_in1.dim}, {self.irreps_out.dim}))
ein = torch.einsum
"""
# == end TorchScript templates ==
# Put everything in the else block
base_indent = 2
def indent_for_level(indent_level):
return ((base_indent + indent_level) * 4) * " "
s = indent_for_level(0)
wigners = []
for i_1, (mul_1, (l_1, p_1)) in enumerate(self.irreps_in1):
index_1 = self.irreps_in1[:i_1].dim
dim_1 = mul_1 * (2 * l_1 + 1)
code_out += f"{s}x1_{i_1} = x1[:, {index_1}:{index_1+dim_1}].reshape(batch, {mul_1}, {2 * l_1 + 1})\n"
code_out += "\n"
for i_2, (mul_2, (l_2, p_2)) in enumerate(self.irreps_in2):
index_2 = self.irreps_in2[:i_2].dim
dim_2 = mul_2 * (2 * l_2 + 1)
line = f"{s}x2_{i_2} = x2[:, {index_2}:{index_2+dim_2}].reshape(batch, {mul_2}, {2 * l_2 + 1})\n"
code_out += line
code_right += line
code_out += "\n"
code_right += "\n"
last_ss = None
Instruction = namedtuple("Instruction", "i_in1, i_in2, i_out, connection_mode, has_weight, path_weight, weight_shape")
instructions = [x if len(x) == 6 else x + (1.0,) for x in instructions]
self.instructions = [
Instruction(
i_in1, i_in2, i_out, connection_mode, has_weight, path_weight,
{
'uvw': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul, self.irreps_out[i_out].mul),
'uvu': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
'uvv': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
'uuw': (self.irreps_in1[i_in1].mul, self.irreps_out[i_out].mul),
'uuu': (self.irreps_in1[i_in1].mul,),
'uvuv': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
}[connection_mode] if has_weight else None
)
for i_in1, i_in2, i_out, connection_mode, has_weight, path_weight in instructions
]
index_w = -1
for ins in self.instructions:
mul_1, (l_1, p_1) = self.irreps_in1[ins.i_in1]
mul_2, (l_2, p_2) = self.irreps_in2[ins.i_in2]
mul_out, (l_out, p_out) = self.irreps_out[ins.i_out]
dim_1 = mul_1 * (2 * l_1 + 1)
dim_2 = mul_2 * (2 * l_2 + 1)
dim_out = mul_out * (2 * l_out + 1)
index_1 = self.irreps_in1[:ins.i_in1].dim
index_2 = self.irreps_in2[:ins.i_in2].dim
index_out = self.irreps_out[:ins.i_out].dim
assert p_1 * p_2 == p_out
assert abs(l_1 - l_2) <= l_out <= l_1 + l_2
if dim_1 == 0 or dim_2 == 0 or dim_out == 0:
continue
alpha = ins.path_weight * out_var[ins.i_out] / sum(in1_var[i.i_in1] * | |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip, range
from future.utils import viewkeys, viewitems
from collections import Counter, defaultdict, OrderedDict
from warnings import warn
import numpy as np
from scipy.stats import entropy
from skbio.stats.distance import DistanceMatrix
from skbio.io.util import open_file
from ._exception import SequenceCollectionError, StockholmParseError
class SequenceCollection(object):
"""Class for storing collections of biological sequences.
Parameters
----------
seqs : list of `skbio.sequence.BiologicalSequence` objects
The `skbio.sequence.BiologicalSequence` objects to load into
a new `SequenceCollection` object.
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
Alignment
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
@classmethod
def from_fasta_records(cls, fasta_records, seq_constructor,
validate=False):
r"""Initialize a `SequenceCollection` object
Parameters
----------
fasta_records : iterator of tuples
The records to load into a new `SequenceCollection` object. These
should be tuples of ``(sequence_id, sequence)``.
seq_constructor : skbio.sequence.BiologicalSequence
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
Returns
-------
SequenceCollection (or a derived class)
The new `SequenceCollection` object.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
Alignment
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.parse.sequences import parse_fasta
>>> from StringIO import StringIO
>>> from skbio.sequence import DNA
>>> fasta_f = StringIO('>seq1\nACCGT\n>seq2\nAACCGGT\n')
>>> s1 = SequenceCollection.from_fasta_records(
... parse_fasta(fasta_f), DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
>>> records = [('seq1', 'ACCGT'), ('seq2', 'AACCGGT')]
>>> s1 = SequenceCollection.from_fasta_records(records, DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
data = []
for seq_id, seq in fasta_records:
try:
id, description = seq_id.split(None, 1)
except ValueError:
id = seq_id.strip()
description = None
data.append(seq_constructor(seq, id=id,
description=description))
return cls(data, validate=validate)
def __init__(self, seqs, validate=False):
self._data = seqs
self._id_to_index = {}
for i, seq in enumerate(self._data):
id = seq.id
if id in self:
raise SequenceCollectionError(
"All sequence ids must be unique, but "
"id %s is present multiple times." % id)
else:
self._id_to_index[seq.id] = i
# This is bad because we're making a second pass through the sequence
# collection to validate. We'll want to avoid this, but it's tricky
# because different subclasses will want to define their own is_valid
# methods.
if validate and not self.is_valid():
raise SequenceCollectionError(
"%s failed to validate." % self.__class__.__name__)
def __contains__(self, id):
r"""The in operator.
Parameters
----------
id : str
The id to look up in the `SequenceCollection`.
Returns
-------
bool
Indicates whether `id` corresponds to a sequence id
in the `SequenceCollection`.
.. shownumpydoc
"""
return id in self._id_to_index
def __eq__(self, other):
r"""The equality operator.
Parameters
----------
other : `SequenceCollection`
The `SequenceCollection` to test for equality against.
Returns
-------
bool
Indicates whether `self` and `other` are equal.
Notes
-----
`SequenceCollection` objects are equal if they are the same type,
contain the same number of sequences, and if each of the
`skbio.sequence.BiologicalSequence` objects, in order, are equal.
.. shownumpydoc
"""
if self.__class__ != other.__class__:
return False
elif len(self) != len(other):
return False
else:
for self_seq, other_seq in zip(self, other):
if self_seq != other_seq:
return False
return True
def __getitem__(self, index):
r"""The indexing operator.
Parameters
----------
index : int, str
The position or sequence id of the
`skbio.sequence.BiologicalSequence` to return from the
`SequenceCollection`.
Returns
-------
`skbio.sequence.BiologicalSequence`
The `skbio.sequence.BiologicalSequence` at the specified
index in the `SequenceCollection`.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1[0]
<DNASequence: ACCGT (length: 5)>
>>> s1["seq1"]
<DNASequence: ACCGT (length: 5)>
.. shownumpydoc
"""
if isinstance(index, str):
return self.get_seq(index)
else:
return self._data[index]
def __iter__(self):
r"""The iter operator.
Returns
-------
iterator
`skbio.sequence.BiologicalSequence` iterator for the
`SequenceCollection`.
.. shownumpydoc
"""
return iter(self._data)
def __len__(self):
r"""The len operator.
Returns
-------
int
The number of sequences in the `SequenceCollection`.
.. shownumpydoc
"""
return self.sequence_count()
def __ne__(self, other):
r"""The inequality operator.
Parameters
----------
other : `SequenceCollection`
Returns
-------
bool
Indicates whether self and other are not equal.
Notes
-----
See `SequenceCollection.__eq__` for a description of what it means for
a pair of `SequenceCollection` objects to be equal.
.. shownumpydoc
"""
return not self.__eq__(other)
def __repr__(self):
r"""The repr method.
Returns
-------
str
Returns a string representation of the object.
Notes
-----
String representation contains the class name, the number of sequences
in the `SequenceCollection` (n), and the mean and standard deviation
sequence length.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(repr(s1))
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
.. shownumpydoc
"""
cn = self.__class__.__name__
count, center, spread = self.distribution_stats()
return "<%s: n=%d; mean +/- std length=%.2f +/- %.2f>" \
% (cn, count, center, spread)
def __reversed__(self):
"""The reversed method.
Returns
-------
iterator
`skbio.sequence.BiologicalSequence` iterator for the
`SequenceCollection` in reverse order.
.. shownumpydoc
"""
return reversed(self._data)
def __str__(self):
r"""The str method.
Returns
-------
str
Fasta-formatted string of all sequences in the object.
.. shownumpydoc
"""
return self.to_fasta()
def distances(self, distance_fn):
"""Compute distances between all pairs of sequences
Parameters
----------
distance_fn : function
Function for computing the distance between a pair of sequences.
This must take two sequences as input (as
`skbio.sequence.BiologicalSequence` objects) and return a
single integer or float value.
Returns
-------
skbio.DistanceMatrix
Matrix containing the distances between all pairs of sequences.
Raises
------
skbio.util.exception.BiologicalSequenceError
If ``len(self) != len(other)`` and ``distance_fn`` ==
``scipy.spatial.distance.hamming``.
See Also
--------
skbio.DistanceMatrix
scipy.spatial.distance.hamming
Examples
--------
>>> from scipy.spatial.distance import hamming
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> seqs = [DNA("ACCGGGTT", id="s1"),
... DNA("ACTTGGTT", id="s2"),
... DNA("ACTAGGTT", id="s3")]
>>> a1 = SequenceCollection(seqs)
>>> print(a1.distances(hamming))
3x3 distance matrix
IDs:
s1, s2, s3
Data:
[[ 0. 0.25 0.25 ]
[ 0.25 0. 0.125]
[ 0.25 0.125 0. ]]
"""
sequence_count = self.sequence_count()
dm = np.zeros((sequence_count, sequence_count))
ids = []
for i in range(sequence_count):
self_i = self[i]
ids.append(self_i.id)
for j in range(i):
dm[i, j] = dm[j, i] = self_i.distance(self[j], distance_fn)
return DistanceMatrix(dm, ids)
def distribution_stats(self, center_f=np.mean, spread_f=np.std):
r"""Return sequence count, and center and spread of sequence lengths
Parameters
----------
center_f : function
Should take a list-like object and return a single value
representing the center of the distribution.
spread_f : function
Should take a list-like object and return a single value
representing the spread of the distribution.
Returns
-------
tuple of (int, float, float)
The sequence count, center of length distribution, spread of length
distribution.
Notes
-----
Alternatives for `center_f` and `spread_f` could be median and median
absolute deviation.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1.distribution_stats()
(2, 6.0, 1.0)
"""
if self.is_empty():
return (0, 0.0, 0.0)
else:
sequence_count = self.sequence_count()
sequence_lengths = self.sequence_lengths()
return (sequence_count, center_f(sequence_lengths),
spread_f(sequence_lengths))
def degap(self):
r"""Return a new `SequenceCollection` with all gap characters removed.
Returns
-------
SequenceCollection
A new `SequenceCollection` where
`skbio.sequence.BiologicalSequence.degap` has been called on
each sequence.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT.', id="seq1"),
... DNA('.AACCG-GT.', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s2 = s1.degap()
>>> s2
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
return SequenceCollection([seq.degap() for seq in self])
def get_seq(self, id):
r"""Return a sequence from the `SequenceCollection` by its id.
Parameters
----------
id, str
The id of the sequence to return.
Returns
-------
skbio.sequence.BiologicalSequence
The `skbio.sequence.BiologicalSequence` with `id`.
Raises
------
KeyError
If | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File game_pong.py created on 12:56 2018/1/6
@author: <NAME>
@version: 1.0
"""
from math import sqrt
from interfaces import *
import pygame
class PongState(State):
def __init__(self, s, flag=0):
super(PongState, self).__init__(s)
self.flag = flag
@staticmethod
def get_initial_state():
d = {
'ball_x': 0.5, 'ball_y': 0.5,
'v_x': random.choice([0.03, -0.03]), 'v_y': np.random.randn()*0.01,
'p1_y': 0.5, 'p2_y': 0.5,
}
return PongState(d)
@staticmethod
def _feature_one_hot(ball_y, ball_x, v_x, v_y, p1_y, p2_y) -> list:
h_idx = int((ball_y + 0.05) * 10) # [0 - 10] (-0.05 0.05 0.15 ... 0.95 1.05)
b_idx = int((ball_x + 0.05) * 10) # [0 - 10] (-0.05 0.05 0.15 ... 0.95 1.05)
p_idx = int(p1_y * 5) # [0 - 4] (0 0.2 0.4 0.6 0.8 1)
u_idx = 1 if v_x > 0 else -1
v_idx = min(int(abs(v_y) * 100), 5) # [0 - 5]
arr = [0] * (11 + 11 + 5 + 2 + 6)
arr[h_idx] = 1
arr[11 + b_idx] = 1
arr[11 + 11 + p_idx] = 1
arr[11 + 11 + 5 + u_idx] = 1
arr[11 + 11 + 5 + 2 + v_idx] = 1
return arr
def feature_for_player1(self) -> np.ndarray:
return PongState._feature(**self.s)
def feature_for_player2(self) -> np.ndarray:
d = self.s
return PongState._feature(1-d['ball_x'], d['ball_y'], -d['v_x'],
d['v_y'], d['p2_y'], d['p1_y'])
def feature_func(self, view: int = 1) -> np.ndarray:
""" common: ball_y, v_y
different: ball_x, v_x, p_self_x (player1's view)
"""
ball_x = self.s['ball_x']
ball_y = self.s['ball_y']
vx = self.s['v_x']
vy = self.s['v_y']
p1y = self.s['p1_y']
p2y = self.s['p2_y']
common = []
term = -1 if ball_x < 0 else 1 if ball_x > 1 else 0
v1 = [ball_y, ball_x, vx, vy, p1y, p2y]
v2 = [ball_y, 1-ball_x, -vx, vy, p2y, p1y]
if view == 1:
one_hot = PongState._feature_one_hot(*v1)
return np.array(common + v1 + one_hot + [term, abs(p1y - ball_y)])
else:
one_hot = PongState._feature_one_hot(*v2)
return np.array(common + v2 + one_hot + [-term, abs(p2y - ball_y)])
def stableQ(self):
return False
def terminateQ(self):
return self.flag != 0
def reward(self):
return self.flag
def __str__(self):
if self.flag == 1:
return '-|-|W|I|N|-|-'
elif self.flag == -1:
return '-|-|L|O|S|-|-'
a = int((self.s['ball_x'] + 0.05) * 10)
k = [' '] * 13
k[1] = k[-2] = '|'
k[a+1] = 'o'
if self.s['v_x'] > 0:
k[a+2] = '>'
else:
k[a] = '<'
return ''.join(k)
def visual(self, w=None, nn=None):
s = str(self)
if w is not None:
d = np.dot(self.feature_func(), w)
if nn is None:
s += '(%+.2f)' % d
else:
nv1 = self.score_by(nn, view=1)
nv2 = self.score_by(nn, view=2)
s += '(%+.2f|%+.2f|%+.2f)' % (d, nv1, nv2)
return s
class PongAction(Action):
"""PongAction: 0 - None, 1 - Down, -1 - Up"""
def __init__(self, a):
super(PongAction, self).__init__(a)
self.a = a
@staticmethod
def get_action_spaces():
return (PongAction(0), PongAction(1), PongAction(-1))
def __str__(self):
return '-' if self.a == 0 else 'v' if self.a == 1 else '^'
class FollowBallPolicy(Policy):
def __init__(self):
super(FollowBallPolicy, self).__init__(PongAction)
def action(self, state: State, player=1):
b = state.s['ball_y']
p = state.s['p1_y'] if player == 1 else state.s['p2_y']
if b > p + 0.01:
return PongAction(1)
elif b < p - 0.01:
return PongAction(-1)
else:
return PongAction(0)
class AccuratePongPolicy(Policy):
def __init__(self):
super(AccuratePongPolicy, self).__init__(PongAction)
def action(self, state: State, player=1):
by = state.s['ball_y']
py = state.s['p1_y'] if player == 1 else state.s['p2_y']
bx = state.s['ball_x'] if player == 1 else 1 - state.s['ball_x']
vx = state.s['v_x'] if player == 1 else -state.s['v_x']
vy = state.s['v_y']
if vx > 0 or bx > 0.4:
if by > py + 0.01:
return PongAction(1)
elif by < py - 0.01:
return PongAction(-1)
else:
return PongAction(0)
t = int(bx / abs(vx))
dy = vy*t + by
# print('bx=%.4f vx=%.4f t=%d vy=%.4f dy=%.4f' % (bx, vx, t, vy, dy))
while dy > 1 or dy < 0:
if dy < 0:
dy = -dy
elif dy > 1:
dy = 2 - dy
else:
break
if py > dy:
return PongAction(-1)
else:
return PongAction(1)
class PongSimulator(BaseSimulator):
"""PongSimulator: Simulator of Pong Game"""
def __init__(self):
super(PongSimulator, self).__init__()
self.gui = False
self.win = 0
self.lose = 0
self.helper = None
self.GAME_FPS = 3
def initial_state(self):
return PongState.get_initial_state()
def _step_env(self, state: PongState, copy=True):
self.step_cnt += 1
TIME_DELTA = 1
HALF_PH = 0.1 # Half of paddle height
s = state.s # name alis
flag = 0
if copy:
s = s.copy()
s['ball_x'] += s['v_x'] * TIME_DELTA
s['ball_y'] += s['v_y'] * TIME_DELTA
if s['ball_y'] < 0: # Hit the top
s['ball_y'] = -s['ball_y']
s['v_y'] = -s['v_y']
elif s['ball_y'] > 1: # Hit the bottom
s['ball_y'] = 2 - s['ball_y']
s['v_y'] = -s['v_y']
# ball in left
if s['ball_x'] < 0:
if s['p1_y'] - HALF_PH - 0.04 <= s['ball_y'] <= s['p1_y'] + HALF_PH + 0.04:
# Hit the left paddle
offset = (s['ball_y'] - s['p1_y']) / HALF_PH
U = np.random.uniform(-0.015, 0.015)
V = (np.random.randn() + offset*0.1) * 0.03
s['ball_x'] = -s['ball_x']
s['v_x'] = max(-s['v_x'] + U, 0.03) # v_x should > 0
s['v_y'] = s['v_y'] + V
else:
flag = -1
# ball in right
if s['ball_x'] > 1:
if s['p2_y'] - HALF_PH - 0.04 <= s['ball_y'] <= s['p2_y'] + HALF_PH + 0.04:
# Hit the right paddle
offset = (s['ball_y'] - s['p2_y']) / HALF_PH
U = np.random.uniform(-0.015, 0.015)
V = (np.random.randn() + offset*0.1) * 0.03
s['ball_x'] = 2 - s['ball_x']
s['v_x'] = min(-s['v_x'] + U, -0.03) # v_x should < 0
s['v_y'] = s['v_y'] + V
else:
flag = 1
mag = sqrt(s['v_x'] ** 2 + s['v_y'] ** 2)
if mag > 0.08:
s['v_x'] *= 0.08 / mag
s['v_y'] *= 0.08 / mag
if copy:
return PongState(s, flag=flag) # wrap s with State
else:
state.flag = flag
return state # return the parameter, since the reference has changed
def _step_act(self, state: PongState, a1: PongAction, a2: PongAction, copy=False):
self.act_cnt += 1
new_state = self.next2(self.next1(state, a1, copy), a2, copy)
if self.gui:
self.update(new_state)
return new_state
def next1(self, state: PongState, a1: PongAction, copy=False):
s = state.s # name alis
if copy:
s = s.copy()
s['p1_y'] += a1.a * 0.04
s['p1_y'] = max(0.1, min(0.9, s['p1_y']))
if copy:
return PongState(s)
else:
return state
def next2(self, state: PongState, a2: PongAction, copy=False):
s = state.s # name alis
if copy:
s = s.copy()
s['p2_y'] += a2.a * 0.04
s['p2_y'] = max(0.1, min(0.9, s['p2_y']))
if copy:
return PongState(s)
else:
return state
def use_gui(self, status=False):
self.gui = status
if status:
self.gui_init()
def gui_init(self):
WIDTH = 600
HEIGHT = 600
pygame.init()
self.fps = pygame.time.Clock()
self.window = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
pygame.display.set_caption('Pong GUI')
def update(self, state):
WHITE = (255, 255, 255)
BALL_COLOR = (44, 62, 80)
PAD_COLOR = (41, 128, 185)
BACKGROUND_COLOR = (207, 216, 220)
SCORE_COLOR = (25, 118, 210)
WIDTH = 600
HEIGHT = 600
BALL_RADIUS = 8
PAD_WIDTH = 8
PAD_HEIGHT = HEIGHT * 0.2
HALF_PAD_WIDTH = PAD_WIDTH // 2
HALF_PAD_HEIGHT = PAD_HEIGHT // 2
paddle1_x = HALF_PAD_WIDTH - 1
paddle2_x = WIDTH + 1 - HALF_PAD_WIDTH
canvas = self.window
canvas.fill(BACKGROUND_COLOR)
pygame.draw.line(canvas, WHITE, [WIDTH // 2, 0], [WIDTH // 2, HEIGHT], 1)
pygame.draw.line(canvas, WHITE, [PAD_WIDTH, 0], [PAD_WIDTH, HEIGHT], 1)
pygame.draw.line(canvas, WHITE, [WIDTH - PAD_WIDTH, 0], [WIDTH - PAD_WIDTH, HEIGHT], 1)
ball_pos = (int(state.s['ball_x'] * WIDTH), int(state.s['ball_y'] * HEIGHT))
paddle1_pos = (paddle1_x, int(state.s['p1_y'] * HEIGHT))
paddle2_pos = (paddle2_x, int(state.s['p2_y'] * HEIGHT))
pygame.draw.circle(canvas, BALL_COLOR, ball_pos, BALL_RADIUS, 0)
pygame.draw.polygon(canvas, PAD_COLOR, [[paddle1_pos[0] - HALF_PAD_WIDTH, paddle1_pos[1] - HALF_PAD_HEIGHT],
[paddle1_pos[0] - HALF_PAD_WIDTH, paddle1_pos[1] + HALF_PAD_HEIGHT],
[paddle1_pos[0] + HALF_PAD_WIDTH, paddle1_pos[1] + HALF_PAD_HEIGHT],
[paddle1_pos[0] + HALF_PAD_WIDTH, paddle1_pos[1] - HALF_PAD_HEIGHT]], 0)
pygame.draw.polygon(canvas, PAD_COLOR, [[paddle2_pos[0] - HALF_PAD_WIDTH, paddle2_pos[1] - HALF_PAD_HEIGHT],
[paddle2_pos[0] - HALF_PAD_WIDTH, paddle2_pos[1] + HALF_PAD_HEIGHT],
[paddle2_pos[0] + HALF_PAD_WIDTH, paddle2_pos[1] + HALF_PAD_HEIGHT],
[paddle2_pos[0] + HALF_PAD_WIDTH, paddle2_pos[1] - HALF_PAD_HEIGHT]], 0)
font1 = pygame.font.SysFont("Comic Sans MS", 25)
label1 = font1.render("Score %s" % str(self.win)[:5], 1, SCORE_COLOR)
canvas.blit(label1, (70, 20))
font2 = pygame.font.SysFont("Comic Sans MS", 25)
label2 = font2.render("Score %s" % str(self.lose)[:5], 1, SCORE_COLOR)
canvas.blit(label2, (WIDTH - 50 - 120, 20))
if self.helper is not None:
nn_score, sim_score, state_score = self.helper.analysis_state(state)
label3 = font1.render("NN: %+.2f" % nn_score, 1, SCORE_COLOR)
canvas.blit(label3, (70, 70))
label4 = font1.render("Sim: %+.2f" % sim_score, 1, SCORE_COLOR)
canvas.blit(label4, | |
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.lcdNumber_waterTemperature.setPalette(palette)
self.lcdNumber_waterTemperature.setFrameShadow(QtGui.QFrame.Raised)
self.lcdNumber_waterTemperature.setLineWidth(1)
self.lcdNumber_waterTemperature.setObjectName(_fromUtf8("lcdNumber_waterTemperature"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.lcdNumber_waterTemperature)
self.label_9 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_9)
self.label_10 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_10.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_10)
self.lcdNumber_waterConductivity = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.lcdNumber_waterConductivity.setPalette(palette)
self.lcdNumber_waterConductivity.setObjectName(_fromUtf8("lcdNumber_waterConductivity"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.FieldRole, self.lcdNumber_waterConductivity)
self.label_7 = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_7)
self.lcdNumber_waterpH = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 63, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.lcdNumber_waterpH.setPalette(palette)
self.lcdNumber_waterpH.setFrameShadow(QtGui.QFrame.Raised)
self.lcdNumber_waterpH.setLineWidth(1)
self.lcdNumber_waterpH.setObjectName(_fromUtf8("lcdNumber_waterpH"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.FieldRole, self.lcdNumber_waterpH)
self.horizontalLayout_2.addLayout(self.formLayout_3)
self.line = QtGui.QFrame(self.horizontalLayoutWidget_2)
self.line.setEnabled(True)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(128, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 212, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(42, 85, | |
import numpy as np
from .._wcs_utils import WCSInversionInterpolator, WCSGridScalarInterpolator
from ...wcs import FastHashingWCS
COADD_WCS = FastHashingWCS({
'xtension': 'BINTABLE',
'bitpix': 8,
'naxis': 2,
'naxis1': 24,
'naxis2': 10000,
'pcount': 83807884,
'gcount': 1,
'tfields': 3,
'ttype1': 'COMPRESSED_DATA',
'tform1': '1PB(8590)',
'ttype2': 'ZSCALE ',
'tform2': '1D ',
'ttype3': 'ZZERO ',
'tform3': '1D ',
'zimage': True,
'ztile1': 10000,
'ztile2': 1,
'zcmptype': 'RICE_ONE',
'zname1': 'BLOCKSIZE',
'zval1': 32,
'zname2': 'BYTEPIX ',
'zval2': 4,
'zsimple': True,
'zbitpix': -32,
'znaxis': 2,
'znaxis1': 10000,
'znaxis2': 10000,
'zextend': True,
'extname': 'SCI ',
'equinox': 2000.0,
'mjd-obs': 56545.15853046,
'radesys': 'ICRS ',
'ctype1': 'RA---TAN',
'cunit1': 'deg ',
'crval1': 320.688891,
'crpix1': 5000.5,
'cd1_1': -7.305555555556e-05,
'cd1_2': 0.0,
'ctype2': 'DEC--TAN',
'cunit2': 'deg ',
'crval2': 0.016667,
'crpix2': 5000.5,
'cd2_1': 0.0,
'cd2_2': 7.305555555556e-05,
'exptime': 450.0,
'gain': 19.93043199192,
'saturate': 31274.22430892,
'softname': 'SWarp ',
'softvers': '2.40.0 ',
'softdate': '2016-09-19',
'softauth': '2010-2012 IAP/CNRS/UPMC',
'softinst': 'IAP http://www.iap.fr',
'author': 'unknown ',
'origin': 'nid18189',
'date': '2016-10-13T00:30:52',
'combinet': 'WEIGHTED',
'bunit': 'electrons',
'filter': 'i DECam SDSS c0003 7835.0 1470.0',
'band': 'i ',
'tilename': 'DES2122+0001',
'tileid': 90126,
'resampt1': 'LANCZOS3',
'centert1': 'MANUAL ',
'pscalet1': 'MANUAL ',
'resampt2': 'LANCZOS3',
'centert2': 'MANUAL ',
'pscalet2': 'MANUAL ',
'desfname': 'DES2122+0001_r2601p01_i.fits',
'pipeline': 'multiepoch',
'unitname': 'DES2122+0001',
'attnum': 1,
'eupsprod': 'MEPipeline',
'eupsver': 'Y3A1+0 ',
'reqnum': 2601,
'des_ext': 'IMAGE ',
'fzalgor': 'RICE_1 ',
'fzdthrsd': 'CHECKSUM',
'fzqvalue': 16,
'fzqmethd': 'SUBTRACTIVE_DITHER_2',
'ra_cent': 320.688927527779,
'dec_cent': 0.016630472222219,
'rac1': 321.054126640963,
'decc1': -0.348562220896466,
'rac2': 320.323655359037,
'decc2': -0.348562220896466,
'rac3': 320.323654004521,
'decc3': 0.381895543631536,
'rac4': 321.054127995479,
'decc4': 0.381895543631536,
'racmin': 320.323654004521,
'racmax': 321.054127995479,
'deccmin': -0.348562220896466,
'deccmax': 0.381895543631536,
'crossra0': 'N ',
'magzero': 30.0,
'history': "'SUBTRACTIVE_DITHER_2' / Pixel Quantization Algorithm",
'zquantiz': 'SUBTRACTIVE_DITHER_2',
'zdither0': 5591,
'checksum': 'ZJHKaGGKUGGKZGGK',
'datasum': 1452922543})
SE_WCS = FastHashingWCS({
'xtension': 'BINTABLE',
'bitpix': 8,
'naxis': 2,
'naxis1': 24,
'naxis2': 4096,
'pcount': 7424352,
'gcount': 1,
'tfields': 3,
'ttype1': 'COMPRESSED_DATA',
'tform1': '1PB(2305)',
'ttype2': 'ZSCALE ',
'tform2': '1D ',
'ttype3': 'ZZERO ',
'tform3': '1D ',
'zimage': True,
'ztile1': 2048,
'ztile2': 1,
'zcmptype': 'RICE_ONE',
'zname1': 'BLOCKSIZE',
'zval1': 32,
'zname2': 'BYTEPIX ',
'zval2': 4,
'zsimple': True,
'zbitpix': -32,
'znaxis': 2,
'znaxis1': 2048,
'znaxis2': 4096,
'zextend': True,
'extname': 'SCI ',
'proctype': 'RAW ',
'prodtype': 'image ',
'pixscal1': 0.27,
'pixscal2': 0.27,
'obs-long': 70.81489,
'telescop': 'CTIO 4.0-m telescope',
'observat': 'CTIO ',
'obs-lat': -30.16606,
'obs-elev': 2215.0,
'instrume': 'DECam ',
'expreq': 90.0,
'exptime': 90.0,
'darktime': 91.1224601,
'obsid': 'ct4m20130910t034817',
'date-obs': '2013-09-10T03:48:17.031944',
'time-obs': '03:48:17.031944',
'mjd-obs': 56545.15853046,
'openshut': '2013-09-10T03:48:17.084340',
'timesys': 'UTC ',
'expnum': 232321,
'object': 'DES survey hex -395+6 tiling 1',
'obstype': 'raw_obj ',
'camshut': 'Open ',
'program': 'survey ',
'observer': 'TE, ES ',
'proposer': 'Frieman ',
'dtpi': 'Frieman ',
'propid': '2012B-0001',
'excluded': ' ',
'hex': 4235,
'tiling': 1,
'seqid': '-395+6 enqueued on 2013-09-10 03:30:33Z by SurveyTactician',
'seqnum': 1,
'seqtot': 1,
'aos': True,
'bcam': True,
'guider': 1,
'skystat': True,
'filter': 'i DECam SDSS c0003 7835.0 1470.0',
'filtpos': 'cassette_1',
'instance': 'DECam_20130909',
'errors': None,
'telequin': 2000.0,
'telstat': 'Track ',
'ra': '21:21:57.696',
'dec': '00:36:52.200',
'telra': '21:21:57.697',
'teldec': '00:36:48.100',
'ha': '00:59:45.460',
'zd': 33.95,
'az': 332.6,
'telfocus': (1479.03, -2947.54, 1803.5, 127.89, -57.91, -0.0),
'vsub': True,
'gskyphot': False,
'lskyphot': True,
'windspd': 12.231,
'winddir': 179.0,
'humidity': 15.0,
'pressure': 781.0,
'dimmsee': 0.617,
'outtemp': 18.3,
'airmass': 1.21,
'gskyvar': 0.05,
'gskyhot': 0.18,
'lskyvar': 0.01,
'lskyhot': 0.0,
'lskypow': 0.01,
'msurtemp': 17.875,
'mairtemp': 18.3,
'uptrtemp': 19.589,
'lwtrtemp': -999.0,
'pmostemp': 18.3,
'utn-temp': 19.48,
'uts-temp': 19.545,
'utw-temp': 19.8,
'ute-temp': 19.53,
'pmn-temp': 17.2,
'pms-temp': 17.8,
'pmw-temp': 18.7,
'pme-temp': 17.8,
'domelow': 19.465,
'domehigh': -999.0,
'domeflor': 17.5,
'g-meanx': 0.0463,
'g-meany': 0.0632,
'donutfs4': [0.64, 1.17, -8.86, -0.5, 0.11, 0.07, 0.46, 0.0, -0.48],
'donutfs3': [-0.21, 0.93, 8.75, -0.5, 0.23, 0.06, 0.3, 0.2, -0.59],
'donutfs2': [0.11, 0.65, -9.73, -0.19, 0.11, -0.02, 0.3, 0.35, -0.1],
'donutfs1': [0.12, 1.25, 8.62, -0.34, 0.23, 0.18, 0.34, 0.34, -0.0],
'g-flxvar': 18593751.61,
'g-meanxy': -0.005538,
'donutfn1': [-0.05, -0.2, -9.58, -0.26, 0.64, -0.09, 0.2, 0.47, -0.35],
'donutfn2': [1.98, 0.88, 8.96, -0.87, -0.33, 0.0, 0.07, 0.35, -0.13],
'donutfn3': [0.46, 0.52, -9.07, -0.39, -0.34, 0.12, 0.07, 0.15, 0.18],
'donutfn4': [-1.54, -0.2, 8.42, -0.3, -0.86, 0.15, 0.17, 0.16, -0.11],
'time_recorded': '2013-09-10T03:50:09.207158',
'g-feedbk': 10,
'g-ccdnum': 4,
'doxt': 0.02,
'g-maxx': 0.2885,
'fadz': 53.82,
'fady': 275.96,
'fadx': 262.36,
'g-mode': 'auto ',
'fayt': -4.23,
'dodz': 53.82,
'dody': -0.19,
'dodx': -0.63,
'bcamaz': 0.0,
'multiexp': False,
'bcamax': -32.47,
'bcamay': -12.147,
'bcamdy': 1020.038,
'bcamdx': -511.087,
'skyupdat': '2013-09-10T03:46:31',
'g-seeing': 1.985,
'g-transp': 0.796,
'g-meany2': 0.017566,
'doyt': 0.42,
'g-latenc': 1.308,
'lutver': 'working-trunk',
'faxt': -9.11,
'g-maxy': 0.3037,
'g-meanx2': 0.013485,
'sispiver': 'trunk ',
'constver': 'DECAM:19',
'hdrver': 13,
'dtsite': 'ct ',
'dttelesc': 'ct4m ',
'dtinstru': 'decam ',
'dtcaldat': '2013-09-09 ',
'odateobs': ' ',
'dtutc': '2013-09-10T03:50:41',
'dtobserv': 'NOAO ',
'dtpropid': '2012B-0001 ',
'dtpiaffl': ' ',
'dttitle': ' ',
'dtcopyri': 'AURA ',
'dtacquis': 'pipeline3.ctio.noao.edu',
'dtaccoun': 'sispi ',
'dtacqnam': '/data_local/images/DTS/2012B-0001/DECam_00232321.fits.fz',
'dtnsanam': 'dec103900.fits ',
'dtqueue': 'des ',
'dtstatus': 'done ',
'sb_host': 'pipeline3.ctio.noao.edu',
'sb_accou': 'sispi ',
'sb_site': 'ct ',
'sb_local': 'dec ',
'sb_dir1': 20130909,
'sb_dir2': 'ct4m ',
'sb_dir3': '2012B-0001 ',
'sb_recno': 103900,
'sb_id': 'dec103900 ',
'sb_name': 'dec103900.fits ',
'rmcount': 0,
'recno': 103900,
'bunit': 'electrons',
'wcsaxes': 2,
'detsize': '[1:29400,1:29050]',
'datasec': '[1:2048,1:4096]',
'detsec': '[22529:24576,10240:14335]',
'ccdsec': '[1:2048,1:4096]',
'detseca': '[22529:23552,10240:14335]',
'ccdseca': '[1:1024,1:4096]',
'ampseca': '[1:1024,1:4096]',
'dataseca': '[1:1024,1:4096]',
'detsecb': '[23553:24576,10240:14335]',
'ccdsecb': '[1025:2048,1:4096]',
'ampsecb': '[2048:1025,1:4096]',
'datasecb': '[1025:2048,1:4096]',
'detector': 'S3-119_123194-11-3',
'ccdnum': 57,
'detpos': 'N26 ',
'gaina': 1.00274243961024,
'rdnoisea': 5.932,
'saturata': 137491.780096181,
'gainb': 0.983317669448972,
'rdnoiseb': 5.725,
'saturatb': 123794.490612799,
'crpix1': -9120.8,
'crpix2': 4177.667,
'fpa': 'DECAM_BKP5',
'ccdbin1': 1,
'ccdbin2': 1,
'dheinf': 'MNSN fermi hardware',
'dhefirm': 'demo30 ',
'slot00': 'MCB 7 5.210000',
'slot01': 'DESCB 23 4.010000',
'slot02': 'DESCB 0x0 4.010000',
'slot03': 'CCD12 3 4.080000',
'slot04': 'CCD12 23 4.080000',
'slot05': 'CCD12 13 4.080000',
'radesys': 'ICRS ',
'equinox': 2000.0,
'pv1_7': -0.001131856392163,
'cunit1': 'deg ',
'pv2_8': 0.001018303032252,
'pv2_9': 0.002319394606743,
'cd1_1': -1.48270437561e-07,
'ltm2_2': 1.0,
'ltm2_1': 0.0,
'pv2_0': -0.003399720238217,
'pv2_1': 0.9864515588353,
'pv2_2': 0.0009454823496124,
'pv2_3': 0.0,
'pv2_4': -0.02314806967003,
'pv2_5': 0.001877677471197,
'pv2_6': 0.004309589780532,
'pv2_7': -0.01227383889951,
'ltm1_1': 1.0,
'pv1_6': -0.01361136561823,
'pv2_10': 0.0009498695718565,
'pv1_4': 0.003530898113869,
'pv1_3': 0.0,
'pv1_2': -0.01014864986384,
'pv1_1': 1.008025318525,
'pv1_0': -0.002359709297272,
'ltm1_2': 0.0,
'pv1_9': 0.000779072746685,
'pv1_8': 0.003705666166824,
'cd1_2': 7.285803899392e-05,
'pv1_5': 0.006384496695735,
'cunit2': 'deg ',
'cd2_1': -7.285403390983e-05,
'cd2_2': -1.476988018249e-07,
'ltv2': 0.0,
'ltv1': 0.0,
'pv1_10': -0.006122290458248,
'ctype2': 'DEC--TPV',
'ctype1': 'RA---TPV',
'crval1': 320.4912462427,
'crval2': 0.6171111312777,
'valida': True,
'validb': True,
'ndonuts': 0,
'': '',
'photflag': 1,
'desdcxtk': 'Thu Mar 31 15:15:52 2016',
'xtalkfil': 'DECam_20130606.xtalk',
'desoscn': 'Thu Mar 31 15:15:52 2016',
'fzalgor': 'RICE_1 ',
'fzqmethd': 'SUBTRACTIVE_DITHER_2',
'fzqvalue': 16,
'fzdthrsd': 'CHECKSUM',
'band': 'i ',
'camsym': 'D ',
'nite': 20130909,
'desfname': 'D00232321_i_c57_r2357p01_immasked.fits',
'pipeline': 'finalcut',
'unitname': 'D00232321',
'attnum': 1,
'eupsprod': 'finalcut',
'eupsver': 'Y2A1+5 ',
'reqnum': 2357,
'biasfil': 'D_n20130916t0926_c57_r1999p06_biascor.fits',
'desbias': 'Thu Mar 31 15:34:10 2016',
'lincfil': 'lin_tbl_v0.4.fits',
'deslinc': 'Thu Mar 31 15:34:12 2016',
'desbpm': 'Thu Mar 31 15:34:13 2016',
'bpmfil': 'D_n20130916t0926_c57_r2083p01_bpm.fits',
'dessat': 'Thu Mar 31 15:34:13 2016',
'nsatpix': 13261,
'flatmeda': 1.00274243961024,
'flatmedb': 0.983317669448972,
'saturate': 137491.780096181,
'desgainc': 'Thu Mar 31 15:34:13 2016',
'bfcfil': 'D_n20150305_r1428p01_bf.fits',
'desbfc': 'Thu Mar 31 15:34:15 2016',
'flatfil': 'D_n20130916t0926_i_c57_r1999p06_norm-dflatcor.fits',
'desflat': 'Thu Mar 31 15:34:15 2016',
'fixcfil': 'D_n20130916t0926_c57_r2083p01_bpm.fits',
'desfixc': 'Thu Mar 31 15:34:15 2016',
'ra_cent': 320.334208216425,
'dec_cent': -0.122655583198652,
'rac1': 320.184653593733,
'decc1': -0.0476310761279146,
'rac2': 320.184559081817,
'decc2': -0.196824403983542,
'rac3': 320.48379991099,
'decc3': -0.197603840171634,
'rac4': 320.484084276791,
'decc4': -0.0484244712764058,
'racmin': 320.184559081817,
'racmax': 320.484084276791,
'deccmin': -0.197603840171634,
'deccmax': -0.0476310761279146,
'crossra0': 'N ',
'fwhm': 3.8783,
'scampchi': 2.8819,
'elliptic': 0.0591,
'scampnum': 1317,
'scampref': 'UCAC-4 ',
'desbleed': 'Thu Mar 31 16:16:51 2016',
'nbleed': 45027,
'starmask': 'Thu Mar 31 16:16:51 2016',
'des_ext': 'IMAGE ',
'skysbfil': 'Y2A1_20130801t1128_i_c57_r2044p01_skypca-tmpl.fits',
'skypc00': 2339.17986012236,
'skypc01': -5.69772070194055,
'skypc02': 10.3202256124756,
'skypc03': -1.68147589700257,
'skyvara': 2351.65820178295,
'skyvarb': 2391.03565464325,
'skysigma': 48.6973713882834,
'skybrite': 2317.88702996854,
'desskysb': 'Thu Mar 31 16:59:03 2016',
'starfil': 'Y2A1_20130801t1128_i_c57_r2046p01_starflat.fits',
'desstar': 'Thu Mar 31 17:59:13 2016',
'desncray': 133,
'desnstrk': 0,
'desimmsk': 'Thu Mar 31 23:39:36 2016',
'zquantiz': 'SUBTRACTIVE_DITHER_2',
'history': "'SUBTRACTIVE_DITHER_2' / Pixel Quantization Algorithm",
'zdither0': 3543,
'checksum': '5SXRASVQ7SVQASVQ',
'datasum': 1755117338})
def test_wcs_inversion():
rng = np.random.RandomState(seed=10)
dim = 64
delta = 8
y_out, x_out = np.mgrid[:dim+delta:delta, 0:dim+delta:delta]
y_out = y_out.ravel()
x_out = x_out.ravel()
x, y = COADD_WCS.sky2image(*SE_WCS.image2sky(x_out, y_out))
wcs_inv = WCSInversionInterpolator(x, y, x_out, y_out)
for _ in range(1000):
se_pos = rng.uniform(size=2)*63 + 1
se_pos = (se_pos[0], se_pos[1])
coadd_pos = COADD_WCS.sky2image(*SE_WCS.image2sky(*se_pos))
inv_se_pos = SE_WCS.sky2image(*COADD_WCS.image2sky(*coadd_pos))
interp_se_pos = wcs_inv(*coadd_pos)
assert np.allclose(inv_se_pos, interp_se_pos)
assert np.allclose(se_pos, interp_se_pos)
def test_wcs_scalar_interp_se():
rng = np.random.RandomState(seed=10)
dimx = 64
dimy = 512
delta = 8
y, x = np.mgrid[:dimy+delta:delta, 0:dimx+delta:delta]
shape = y.shape
y = y.ravel()
x = x.ravel()
tup = SE_WCS.get_jacobian(x, y)
area = np.abs(tup[0] * tup[3] - tup[1] * tup[2])
area = area.reshape(shape).T
wcs_area = WCSGridScalarInterpolator(
np.mgrid[:dimx+delta:delta],
np.mgrid[:dimy+delta:delta],
area,
)
for _ in range(1000):
se_pos = rng.uniform(size=2)*63 + 1
tup = SE_WCS.get_jacobian(se_pos[0], se_pos[1])
| |
<reponame>thuleqaid/boost_study<gh_stars>1-10
# voom_mode_asciidoc.py
# Last Modified: 2014-05-21
# VOoM -- Vim two-pane outliner, plugin for Python-enabled Vim 7.x
# Website: http://www.vim.org/scripts/script.php?script_id=2657
# Author: <NAME> (<EMAIL> DOT <EMAIL> AT <EMAIL> DOT <EMAIL>)
# License: CC0, see http://creativecommons.org/publicdomain/zero/1.0/
"""
VOoM markup mode for AsciiDoc document and section titles.
See |voom-mode-asciidoc|, ../../doc/voom.txt#*voom-mode-asciidoc*
"""
### NOTES
#
# When outline operation changes level, it has to deal with two ambiguities:
# a) Level 1-5 headline can use 2-style (underline) or 1-style (=).
# b) 1-style can have or not have closing ='s.
# To determine current preferences: check first headline at level <6 and check
# first headline with =. This must be done in hook_makeOutline().
# (Save in VO, similar to reST mode.) Cannot be done during outline operation,
# that is in hook_doBodyAfterOop().
# Defaults: use underline, use closing ='s.
try:
import vim
if vim.eval('exists("g:voom_asciidoc_do_blanks")')=='1' and vim.eval("g:voom_asciidoc_do_blanks")=='0':
DO_BLANKS = False
else:
DO_BLANKS = True
except ImportError:
DO_BLANKS = True
import re
# regex for 1-style headline, assumes there is no trailing whitespace
HEAD_MATCH = re.compile(r'^(=+)(\s+\S.*?)(\s+\1)?$').match
#---------------------------------------------------------------------
# Characters used as underlines in two-line headlines.
ADS_LEVELS = {'=' : 1, '-' : 2, '~' : 3, '^' : 4, '+' : 5}
# Characters for Delimited Blocks. Headines are ignored inside such blocks.
BLOCK_CHARS = {'/' : 0, '+' : 0, '-' : 0, '.' : 0, '*' : 0, '_' : 0, '=' : 0}
#LEVELS_ADS = {1:'=', 2:'-', 3:'~', 4:'^', 5:'+'}
LEVELS_ADS = {}
for k in ADS_LEVELS:
LEVELS_ADS[ADS_LEVELS[k]] = k
# Combine all signficant chars. Need one of these at start of line for a headline or DelimitedBlock to occur.
CHARS = {}
for k in ADS_LEVELS:
CHARS[k] = 0
for k in BLOCK_CHARS:
CHARS[k] = 0
#---------------------------------------------------------------------
def hook_makeOutline(VO, blines):
"""Return (tlines, bnodes, levels) for Body lines blines.
blines is either Vim buffer object (Body) or list of buffer lines.
"""
ENC = VO.enc
Z = len(blines)
tlines, bnodes, levels = [], [], []
tlines_add, bnodes_add, levels_add = tlines.append, bnodes.append, levels.append
# trailing whitespace is always removed with rstrip()
# if headline is precedeed by [AAA] and/or [[AAA]], bnode is set to their lnum
#
# 1-style, overrides 2-style
# [[AAA]] L3, blines[i-2]
# [yyy] L2, blines[i-1]
# == head == L1, blines[i] -- current line, closing = are optional
#
# 2-style (underline)
# [[AAA]] L4, blines[i-3]
# [yyy] L3, blines[i-2]
# head L2, blines[i-1] -- title line, many restrictions on the format
# ---- L1, blines[i] -- current line
# Set this the first time a headline with level 1-5 is encountered.
# 0 or 1 -- False, use 2-style (default); 2 -- True, use 1-style
useOne = 0
# Set this the first time headline in 1-style is encountered.
# 0 or 1 -- True, use closing ='s (default); 2 -- False, do not use closing ='s
useOneClose = 0
isHead = False
isFenced = False # True if inside DelimitedBlock, the value is the char
headI = -2 # idx of the last line that is part of a headline
blockI = -2 # idx of the last line where a DelimitedBlock ended
m = None # match object for 1-style regex
for i in xrange(Z):
L1 = blines[i].rstrip()
if not L1 or not L1[0] in CHARS:
continue
ch = L1[0]
if isFenced:
if isFenced==ch and len(L1)>3 and L1.lstrip(ch)=='':
isFenced = False
blockI = i
continue
# 1-style headline
if ch == '=' and L1.strip('='):
m = HEAD_MATCH(L1)
if m:
isHead = True
headI_ = headI
headI = i
lev = len(m.group(1))
head = m.group(2).strip()
bnode = i+1
# current line is an underline
# the previous, underlined line (L2) is not a headline if it:
# is not exactly the length of underline +/- 2
# is already part of in the previous headline
# looks like an underline or a delimited block line
# is [[AAA]] or [AAA] (BlockID or Attribute List)
# starts with . (Block Title, they have no level)
# starts with // (comment line)
# starts with tab (don't know why, spaces are ok)
# is only 1 chars (avoids confusion with --, as in Vim syntax, not as in AsciiDoc)
if not isHead and ch in ADS_LEVELS and L1.lstrip(ch)=='' and i > 0:
L2 = blines[i-1].rstrip()
z2 = len(L2.decode(ENC,'replace'))
z1 = len(L1)
if (L2 and
(-3 < z2 - z1 < 3) and z1 > 1 and z2 > 1 and
headI != i-1 and
not ((L2[0] in CHARS) and L2.lstrip(L2[0])=='') and
not (L2.startswith('[') and L2.endswith(']')) and
not L2.startswith('.') and
not L2.startswith('\t') and
not (L2.startswith('//') and not L2.startswith('///'))
):
isHead = True
headI_ = headI
headI = i
lev = ADS_LEVELS[ch]
head = L2.strip()
bnode = i # lnum of previous line (L2)
if isHead and bnode > 1:
# decrement bnode if preceding lines are [[AAA]] or [AAA] lines
# that is set bnode to the topmost [[AAA]] or [AAA] line number
j_ = bnode-2 # idx of line before the title line
L3 = blines[bnode-2].rstrip()
while L3.startswith('[') and L3.endswith(']'):
bnode -= 1
if bnode > 1:
L3 = blines[bnode-2].rstrip()
else:
break
# headline must be preceded by a blank line unless:
# it's line 1 (j == -1)
# headline is preceded by [AAA] or [[AAA]] lines (j != j_)
# previous line is a headline (headI_ == j)
# previous line is the end of a DelimitedBlock (blockI == j)
j = bnode-2
if DO_BLANKS and j==j_ and j > -1:
L3 = blines[j].rstrip()
if L3 and headI_ != j and blockI != j:
# skip over any adjacent comment lines
while L3.startswith('//') and not L3.startswith('///'):
j -= 1
if j > -1:
L3 = blines[j].rstrip()
else:
L3 = ''
if L3 and headI_ != j and blockI != j:
isHead = False
headI = headI_
# start of DelimitedBlock
if not isHead and ch in BLOCK_CHARS and len(L1)>3 and L1.lstrip(ch)=='':
isFenced = ch
continue
if isHead:
isHead = False
# save style info for first headline and first 1-style headline
if not useOne and lev < 6:
if m:
useOne = 2
else:
useOne = 1
if not useOneClose and m:
if m.group(3):
useOneClose = 1
else:
useOneClose = 2
# make outline
tline = ' %s|%s' %('. '*(lev-1), head)
tlines_add(tline)
bnodes_add(bnode)
levels_add(lev)
# don't clobber these when parsing clipboard during Paste
# which is the only time blines is not Body
if blines is VO.Body:
VO.useOne = useOne == 2
VO.useOneClose = useOneClose < 2
return (tlines, bnodes, levels)
def hook_newHeadline(VO, level, blnum, tlnum):
"""Return (tree_head, bodyLines).
tree_head is new headline string in Tree buffer (text after |).
bodyLines is list of lines to insert in Body buffer.
"""
tree_head = 'NewHeadline'
if level < 6 and not VO.useOne:
bodyLines = [tree_head, LEVELS_ADS[level]*11, '']
else:
lev = '='*level
if VO.useOneClose:
bodyLines = ['%s %s %s' %(lev, tree_head, lev), '']
else:
bodyLines = ['%s %s' %(lev, tree_head), '']
# Add blank line when inserting after non-blank Body line.
if VO.Body[blnum-1].strip():
bodyLines[0:0] = ['']
return (tree_head, bodyLines)
#def hook_changeLevBodyHead(VO, h, levDelta):
# DO NOT CREATE THIS HOOK
def hook_doBodyAfterOop(VO, oop, levDelta, blnum1, tlnum1, blnum2, tlnum2, blnumCut, tlnumCut):
# this is instead of hook_changeLevBodyHead()
# Based on Markdown mode function.
# Inserts blank separator lines if missing.
#print oop, levDelta, blnum1, tlnum1, blnum2, tlnum2, tlnumCut, blnumCut
Body = VO.Body
Z = len(Body)
bnodes, levels = VO.bnodes, VO.levels
ENC = VO.enc
# blnum1 blnum2 is first and last lnums of Body region pasted, inserted
# during up/down, or promoted/demoted.
if blnum1:
assert blnum1 == bnodes[tlnum1-1]
if tlnum2 < len(bnodes):
assert blnum2 == bnodes[tlnum2]-1
else:
assert blnum2 == Z
# blnumCut is Body lnum after which a region was removed during 'cut',
# 'up', 'down'. Need this to check if there is blank line between nodes
# used to be separated by the cut/moved region.
if blnumCut:
if tlnumCut < len(bnodes):
assert blnumCut == bnodes[tlnumCut]-1
else:
assert blnumCut == Z
# Total number of added lines minus number of deleted lines.
| |
sequence=None):
items = []
if sequence is None:
sequence = method
method = obj
for item in sequence:
items.append(method(item))
else:
for item in sequence:
items.append(method.call(obj, item))
return items
def reduce(func, iterable, initializer=JS("(function(){return;})()")):
try:
iterable = iter(iterable)
except:
raise TypeError, "reduce() arg 2 must support iteration"
empty = True
for value in iterable:
empty = False
if JS("typeof @{{initializer}}== 'undefined'"):
initializer = value
else:
initializer = func(initializer, value)
if empty:
if JS("typeof @{{initializer}}== 'undefined'"):
raise TypeError, "reduce() of empty sequence with no initial value"
return initializer
return initializer
def zip(*iterables):
n = len(iterables)
if n == 0:
return []
lst = []
iterables = [iter(i) for i in iterables]
try:
while True:
t = []
i = 0
while i < n:
t.append(iterables[i].next())
i += 1
lst.append(tuple(t))
except StopIteration:
pass
return lst
def sorted(iterable, cmp=None, key=None, reverse=False):
lst = list(iterable)
lst.sort(cmp, key, reverse)
return lst
def reversed(iterable):
if hasattr(iterable, '__reversed__'):
return iterable.__reversed__()
if hasattr(iterable, '__len__') and hasattr(iterable, '__getitem__'):
if len(iterable) == 0:
l = []
return l.__iter__()
try:
v = iterable[0]
return _reversed(iterable)
except:
pass
raise TypeError("argument to reversed() must be a sequence")
def _reversed(iterable):
i = len(iterable)
while i > 0:
i -= 1
yield iterable[i]
def enumerate(seq):
JS("""
if (typeof @{{seq}}.__enumerate__ == 'function') {
return @{{seq}}.__enumerate__();
}
""")
return _enumerate(seq)
def _enumerate(sequence):
nextIndex = 0
for item in sequence:
yield (nextIndex, item)
nextIndex += 1
def iter(iterable, sentinel=None):
if sentinel is None:
if isIteratable(iterable):
return iterable.__iter__()
if hasattr(iterable, '__getitem__'):
return _iter_getitem(iterable)
raise TypeError("object is not iterable")
if isFunction(iterable):
return _iter_callable(iterable, sentinel)
raise TypeError("iter(v, w): v must be callable")
def _iter_getitem(object):
i = 0
try:
while True:
yield object[i]
i += 1
except IndexError:
pass
def _iter_callable(callable, sentinel):
while True:
nextval = callable()
if nextval == sentinel:
break
yield nextval
def min(*sequence):
if len(sequence) == 1:
sequence = sequence[0]
minValue = None
for item in sequence:
if minValue is None:
minValue = item
elif cmp(item, minValue) == -1:
minValue = item
return minValue
def max(*sequence):
if len(sequence) == 1:
sequence = sequence[0]
maxValue = None
for item in sequence:
if maxValue is None:
maxValue = item
elif cmp(item, maxValue) == 1:
maxValue = item
return maxValue
def sum(iterable, start=None):
if start is None:
start = 0
for i in iterable:
start += i
return start
class complex:
def __init__(self, real, imag):
self.real = float(real)
self.imag = float(imag)
def __repr__(self):
if self.real:
return "(%s+%sj)" % (self.real, self.imag)
else:
return "%sj" % self.imag
__str__ = __repr__
def __add__(self, b):
if isinstance(b, complex):
return complex(self.real + b.real, self.imag + b.imag)
elif JS("typeof @{{b}}.__number__ != 'undefined'"):
return complex(self.real + b, self.imag)
else:
raise TypeError("unsupported operand type(s) for +: '%r', '%r'" % (self, b))
__radd__ = __add__
JS("@{{complex}}.toString = function() { return this.__is_instance__ ? this.__repr__() : '<type complex>'; };")
# hash(obj) == (obj === null? null : (obj.hasOwnProperty("$H") ? obj.$H : (typeof obj == 'string' ? '$s' + obj : (obj.__number__ ? '$n' + obj : @{{__hash}}(obj)))))
if JS("typeof 'a'[0] == 'undefined'"):
# IE: cannot do "abc"[idx]
# IE has problems with setting obj.$H on certain DOM objects
#def __hash(obj):
JS("""@{{__hash}} = function(obj) {
switch (obj.constructor) {
case String:
return '$s' + obj;
case Number:
return '$n' + obj;
case Date:
return '$d' + obj;
}
if (obj.__is_instance__ !== false && typeof obj.__hash__ == 'function') {
return obj.__hash__();
}
if (typeof obj.nodeType != 'number') {
try {
obj.$H = ++@{{next_hash_id}};
} catch (e) {
return obj;
}
return obj.$H;
}
if (typeof obj.setAttribute == 'undefined') {
return obj;
}
var $H;
if (obj.hasOwnProperty("$H")) {
if ($H = obj.getAttribute('$H')) {
return $H;
}
}
obj.setAttribute('$H', ++@{{next_hash_id}});
return @{{next_hash_id}};
};
""")
#def hash(obj):
JS("""@{{hash}} = function(obj) {
if (obj === null) return null;
if (obj.hasOwnProperty("$H")) return obj.$H;
if (typeof obj == 'string') {
return '$s' + obj;
} else if (obj.__number__) {
return '$n' + obj;
}
switch (obj.constructor) {
case String:
return '$s' + obj;
case Number:
return '$n' + obj;
case Date:
return '$d' + obj;
}
if (obj.__is_instance__ !== false && typeof obj.__hash__ == 'function') {
return obj.__hash__();
}
if (typeof obj.nodeType != 'number') {
try {
obj.$H = ++@{{next_hash_id}};
} catch (e) {
return obj;
}
return obj.$H;
}
if (typeof obj.setAttribute == 'undefined') {
return obj;
}
var $H;
if (obj.hasOwnProperty("$H")) {
if ($H = obj.getAttribute('$H')) {
return $H;
}
}
obj.setAttribute('$H', ++@{{next_hash_id}});
return @{{next_hash_id}};
};
""")
else:
#def __hash(obj):
JS("""@{{__hash}} = function(obj) {
switch (obj.constructor) {
case String:
return '$s' + obj;
case Number:
return '$n' + obj;
case Date:
return '$d' + obj;
}
if (obj.__is_instance__ !== false && typeof obj.__hash__ == 'function') {
return obj.__hash__();
}
obj.$H = ++@{{next_hash_id}};
return obj.$H;
};
""")
#def hash(obj):
JS("""@{{hash}} = function(obj) {
if (obj === null) return null;
if (obj.hasOwnProperty("$H")) return obj.$H;
if (typeof obj == 'string') {
return '$s' + obj;
} else if(obj.__number__) {
return '$n' + obj;
}
switch (obj.constructor) {
case String:
return '$s' + obj;
case Number:
return '$n' + obj;
case Date:
return '$d' + obj;
}
if (obj.__is_instance__ !== false && typeof obj.__hash__ == 'function') {
return obj.__hash__();
}
obj.$H = ++@{{next_hash_id}};
return obj.$H;
};
""")
# type functions from Douglas Crockford's Remedial Javascript: http://www.crockford.com/javascript/remedial.html
def isObject(a):
JS("""
return (@{{a}} !== null && (typeof @{{a}} == 'object')) || typeof @{{a}} == 'function';
""")
def isFunction(a):
JS("""
return typeof @{{a}} == 'function';
""")
def callable(func):
JS("""
return typeof @{{a}} == 'function' || @{{hasattr}}(func, '__call__');
""")
def isString(a):
JS("""
return typeof @{{a}} == 'string';
""")
def isNull(a):
JS("""
return typeof @{{a}} == 'object' && !@{{a}};
""")
def isArray(a):
JS("""
return @{{isObject}}(@{{a}}) && @{{a}}.constructor === Array;
""")
def isUndefined(a):
JS("""
return typeof @{{a}} == 'undefined';
""")
def isIteratable(a):
JS("""
if (@{{a}}=== null) return false;
return typeof @{{a}}.__iter__ == 'function';
""")
def isNumber(a):
JS("""
return @{{a}}!== null && @{{a}}.__number__ &&
(@{{a}}.__number__ != 0x01 || isFinite(@{{a}}));
""")
def isInteger(a):
JS("""
switch (@{{a}}.__number__) {
case 0x01:
if (@{{a}} != Math.floor(@{{a}})) break;
case 0x02:
case 0x04:
return true;
}
return false;
""")
def isSet(a):
JS("""
if (@{{a}}=== null) return 0;
if (typeof @{{a}}.__object == 'undefined') return 0;
if (@{{a}}.__class__ === @{{set}}) return 1;
if (@{{a}}.__class__ === @{{frozenset}}) return 2;
return 0;
""")
def toJSObjects(x):
"""
Convert the pyjs pythonic list and dict objects into javascript Object and Array
objects, recursively.
"""
if isArray(x):
JS("""
var result = [];
for(var k=0; k < @{{x}}.length; k++) {
var v = @{{x}}[k];
var tv = @{{toJSObjects}}(v);
result.push(tv);
}
return result;
""")
if isObject(x):
if getattr(x, '__number__', None):
return x.valueOf()
elif isinstance(x, dict):
JS("""
var o = @{{x}}.getObject();
var result = {};
for (var i in o) {
result[o[i][0].toString()] = @{{toJSObjects}}(o[i][1]);
}
return result;
""")
elif isinstance(x, list):
return toJSObjects(x.__array)
elif hasattr(x, '__class__'):
# we do not have a special implementation for custom
# classes, just pass it on
return x
elif isFunction(x):
return x
if isObject(x):
JS("""
var result = {};
for(var k in @{{x}}) {
var v = @{{x}}[k];
var tv = @{{toJSObjects}}(v);
result[k] = tv;
}
return result;
""")
if isString(x):
return str(x)
return x
def sprintf(strng, args):
# See http://docs.python.org/library/stdtypes.html
JS(r"""
var re_dict = /([^%]*)%[(]([^)]+)[)]([#0\x20\x2B-]*)(\d+)?(\.\d+)?[hlL]?(.)((.|\n)*)/;
var re_list = /([^%]*)%([#0\x20\x2B-]*)(\*|(\d+))?(\.\d+)?[hlL]?(.)((.|\n)*)/;
var re_exp = /(.*)([+-])(.*)/;
var argidx = 0;
var nargs = 0;
var result = [];
var remainder = @{{strng}};
function formatarg(flags, minlen, precision, conversion, param) {
var subst = '';
var numeric = true;
var left_padding = 1;
var padchar = ' ';
if (minlen === null || minlen == 0 || !minlen) {
minlen=0;
} else {
minlen = parseInt(minlen);
}
if (!precision) {
precision = null;
} else {
precision = parseInt(precision.substr(1));
}
if (flags.indexOf('-') >= 0) {
left_padding = 0;
}
switch (conversion) {
case '%':
numeric = false;
subst = '%';
break;
case 'c':
numeric = false;
subst = String.fromCharCode(parseInt(param));
break;
case 'd':
case 'i':
case 'u':
subst = '' + parseInt(param);
break;
case 'e':
if (precision === null) {
precision = 6;
}
subst = re_exp.exec(String(param.toExponential(precision)));
if (subst[3].length == 1) {
subst = subst[1] + subst[2] + '0' + subst[3];
} | |
<gh_stars>1-10
from __future__ import annotations
import os
from os.path import join
from typing import Union
import numpy as np
import pandas as pd
from astropy.cosmology import Planck18 as cosmo # noqa
from redback.get_data.directory import afterglow_directory_structure
from redback.transient.transient import Transient
from redback.utils import logger
dirname = os.path.dirname(__file__)
class Afterglow(Transient):
DATA_MODES = ['luminosity', 'flux', 'flux_density', 'magnitude']
def __init__(
self, name: str, data_mode: str = 'flux', time: np.ndarray = None, time_err: np.ndarray = None,
time_mjd: np.ndarray = None, time_mjd_err: np.ndarray = None, time_rest_frame: np.ndarray = None,
time_rest_frame_err: np.ndarray = None, Lum50: np.ndarray = None, Lum50_err: np.ndarray = None,
flux: np.ndarray = None, flux_err: np.ndarray = None, flux_density: np.ndarray = None,
flux_density_err: np.ndarray = None, magnitude: np.ndarray = None, magnitude_err: np.ndarray = None,
redshift: float = np.nan, photon_index: float = np.nan, frequency: np.ndarray = None,
bands: np.ndarray = None, system: np.ndarray = None, active_bands: Union[np.ndarray, str] = 'all',
use_phase_model: bool = False, **kwargs: None) -> None:
"""
This is a general constructor for the Afterglow class. Note that you only need to give data corresponding to
the data mode you are using. For luminosity data provide times in the rest frame, if using a phase model
provide time in MJD, else use the default time (observer frame).
:param name: Telephone number of GRB, e.g., 'GRB140903A' or '140903A' are valid inputs
:type name: str
:param data_mode: Data mode. Must be one from `Afterglow.DATA_MODES`.
:type data_mode: str, optional
:param time: Times in the observer frame.
:type time: np.ndarray, optional
:param time_err: Time errors in the observer frame.
:type time_err: np.ndarray, optional
:param time_mjd: Times in MJD. Used if using phase model.
:type time_mjd: np.ndarray, optional
:param time_mjd_err: Time errors in MJD. Used if using phase model.
:type time_mjd_err: np.ndarray, optional
:param time_rest_frame: Times in the rest frame. Used for luminosity data.
:type time_rest_frame: np.ndarray, optional
:param time_rest_frame_err: Time errors in the rest frame. Used for luminosity data.
:type time_rest_frame_err: np.ndarray, optional
:param Lum50: Luminosity values.
:type Lum50: np.ndarray, optional
:param Lum50_err: Luminosity error values.
:type Lum50_err: np.ndarray, optional
:param flux: Flux values.
:type flux: np.ndarray, optional
:type flux_err: np.ndarray, optional
:param flux_err: Flux error values.
:param flux_density:Flux density values.
:type flux_density: np.ndarray, optional
:param flux_density_err: Flux density error values.
:type flux_density_err: np.ndarray, optional
:param magnitude: Magnitude values for photometry data.
:type magnitude: np.ndarray, optional
:param magnitude_err: Magnitude error values for photometry data.
:type magnitude_err: np.ndarray, optional
:param redshift: Redshift value. Will be read from the metadata table if not given.
:type redshift: float
:param photon_index: Photon index value. Will be read from the metadata table if not given.
:type photon_index: float
:param use_phase_model: Whether we are using a phase model.
:type use_phase_model: bool
:param frequency: Array of band frequencies in photometry data.
:type frequency: np.ndarray, optional
:param system: System values.
:type system: np.ndarray, optional
:param bands: Band values.
:type bands: np.ndarray, optional
:param active_bands: List or array of active bands to be used in the analysis. Use all available bands if 'all' is given.
:type active_bands: Union[list, np.ndarray]
:param kwargs:
Additional classes that can be customised to fulfil the truncation on flux to luminosity conversion:
FluxToLuminosityConverter: Conversion class to convert fluxes to luminosities.
If not given use `FluxToLuminosityConverter` in this module.
Truncator: Truncation class that truncates the data. If not given use `Truncator` in this module.
:type kwargs: None, optional
"""
name = f"GRB{name.lstrip('GRB')}"
self.FluxToLuminosityConverter = kwargs.get('FluxToLuminosityConverter', FluxToLuminosityConverter)
self.Truncator = kwargs.get('Truncator', Truncator)
super().__init__(name=name, data_mode=data_mode, time=time, time_mjd=time_mjd, time_mjd_err=time_mjd_err,
time_err=time_err, time_rest_frame=time_rest_frame, time_rest_frame_err=time_rest_frame_err,
Lum50=Lum50, Lum50_err=Lum50_err, flux=flux, flux_err=flux_err, flux_density=flux_density,
flux_density_err=flux_density_err, use_phase_model=use_phase_model, magnitude=magnitude,
magnitude_err=magnitude_err, frequency=frequency, redshift=redshift, photon_index=photon_index,
system=system, bands=bands, active_bands=active_bands, **kwargs)
self._set_data()
self._set_photon_index()
self._set_t90()
self._get_redshift()
self.directory_structure = afterglow_directory_structure(grb=self.name, data_mode=self.data_mode, instrument="")
@classmethod
def from_swift_grb(
cls, name: str, data_mode: str = 'flux', truncate: bool = True,
truncate_method: str = 'prompt_time_error', **kwargs) -> Afterglow:
"""
:param name: Telephone number of SGRB, e.g., 'GRB140903A' or '140903A' are valid inputs
:type name: str
:param data_mode: Data mode. Must be one from `Afterglow.DATA_MODES`. (Default value = 'flux')
:type data_mode: str, optional
:param truncate: Whether to truncate the data. (Default value = True)
:type truncate: bool
:param truncate_method: Must be from `Truncator.TRUNCATE_METHODS`. (Default value = 'prompt_time_error')
:type truncate_method: str
:param kwargs: Additional keywords to pass into Afterglow.__init__
:type kwargs: dict
:return: The Afterglow object.
:rtype: Afterglow
"""
afterglow = cls(name=name, data_mode=data_mode)
afterglow._set_data()
afterglow._set_photon_index()
afterglow._set_t90()
afterglow._get_redshift()
afterglow.load_and_truncate_data(truncate=truncate, truncate_method=truncate_method, data_mode=data_mode)
return afterglow
@property
def _stripped_name(self) -> str:
return self.name.lstrip('GRB')
@property
def data_mode(self) -> str:
"""
:return: The currently active data mode (one in `Transient.DATA_MODES`)
:rtype: str
"""
return self._data_mode
@data_mode.setter
def data_mode(self, data_mode: str) -> None:
"""
:return: One of the data modes in `Transient.DATA_MODES`.
:rtype: str
"""
if data_mode in self.DATA_MODES or data_mode is None:
self._data_mode = data_mode
try:
self.directory_structure = afterglow_directory_structure(
grb=self.name, data_mode=self.data_mode, instrument="")
except AttributeError:
pass
else:
raise ValueError("Unknown data mode.")
def load_and_truncate_data(
self, truncate: bool = True, truncate_method: str = 'prompt_time_error', data_mode: str = 'flux') -> None:
"""Read data of SGRB from given path and GRB telephone number.
Truncate the data to get rid of all but the last prompt emission point
make a cut based on the size of the temporal error; ie if t_error < 1s, the data point is
part of the prompt emission
:param truncate: Whether to truncate the data.
:type truncate: bool
:param truncate_method: Must be from `Truncator.TRUNCATE_METHODS`. (Default value = 'prompt_time_error')
:type truncate_method: str
:param data_mode: Data mode. Must be one from `Afterglow.DATA_MODES`. (Default value = 'flux')
:type data_mode: str, optional
"""
self.data_mode = data_mode
self.x, self.x_err, self.y, self.y_err = self.load_data(name=self.name, data_mode=self.data_mode)
if truncate:
self.truncate(truncate_method=truncate_method)
@staticmethod
def load_data(name: str, data_mode: str = None) -> tuple:
"""Loads and returns data from a csv file
:param name: Telephone number of SGRB, e.g., 'GRB140903A' or '140903A' are valid inputs
:type name: str
:param data_mode: Data mode. Must be one from `Afterglow.DATA_MODES`. (Default value = None)
:type data_mode: str, optional
:return: A tuple with x, x_err, y, y_err data
:rtype: tuple
"""
directory_structure = afterglow_directory_structure(grb=f"GRB{name.lstrip('GRB')}", data_mode=data_mode)
data = np.genfromtxt(directory_structure.processed_file_path, delimiter=",")[1:]
x = data[:, 0]
x_err = data[:, 1:3].T
y = np.array(data[:, 3])
y_err = np.array(np.abs(data[:, 4:6].T))
return x, x_err, y, y_err
def truncate(self, truncate_method: str = 'prompt_time_error') -> None:
"""Truncate the data using the specified method. See `redback.transient.afterglow.Truncator` for
documentation of the truncation methods.
:param truncate_method: Must be from `Truncator.TRUNCATE_METHODS`. (Default value = 'prompt_time_error')
:type truncate_method: str
"""
truncator = self.Truncator(x=self.x, x_err=self.x_err, y=self.y, y_err=self.y_err, time=self.time,
time_err=self.time_err, truncate_method=truncate_method)
self.x, self.x_err, self.y, self.y_err = truncator.truncate()
@property
def event_table(self) -> str:
"""
:return: Relative path to the event table.
:rtype: str
"""
return os.path.join(dirname, f'../tables/{self.__class__.__name__}_table.txt')
def _save_luminosity_data(self) -> None:
"""Saves luminosity data to a csv file."""
filename = f"{self.name}.csv"
data = {"Time in restframe [s]": self.time_rest_frame,
"Pos. time err in restframe [s]": self.time_rest_frame_err[0, :],
"Neg. time err in restframe [s]": self.time_rest_frame_err[1, :],
"Luminosity [10^50 erg s^{-1}]": self.Lum50,
"Pos. luminosity err [10^50 erg s^{-1}]": self.Lum50_err[0, :],
"Neg. luminosity err [10^50 erg s^{-1}]": self.Lum50_err[1, :]}
df = pd.DataFrame(data=data)
df.to_csv(join(self.directory_structure.directory_path, filename), index=False)
def _set_data(self) -> None:
"""Loads data from the meta data table and sets it to the respective attribute."""
try:
meta_data = pd.read_csv(self.event_table, header=0, error_bad_lines=False, delimiter='\t', dtype='str')
meta_data['BAT Photon Index (15-150 keV) (PL = simple power-law, CPL = cutoff power-law)'] = meta_data[
'BAT Photon Index (15-150 keV) (PL = simple power-law, CPL = cutoff power-law)'].fillna(0)
self.meta_data = meta_data
except FileNotFoundError:
logger.warning("Meta data does not exist for this event.")
self.meta_data = None
def _set_photon_index(self) -> None:
"""Set the photon index attribute from the metadata table."""
if not np.isnan(self.photon_index):
return
if self.magnitude_data or self.flux_density_data:
self.photon_index = np.nan
try:
photon_index = self.meta_data.query('GRB == @self._stripped_name')[
'BAT Photon Index (15-150 keV) (PL = simple power-law, CPL = cutoff power-law)'].values[0]
self.photon_index = self.__clean_string(photon_index)
except (AttributeError, IndexError):
self.photon_index = np.nan
def _get_redshift(self) -> None:
"""Set redshift from metadata table. Some GRBs do not have measurements."""
if not np.isnan(self.redshift):
return
try:
redshift = self.meta_data.query('GRB == @self._stripped_name')['Redshift'].values[0]
if isinstance(redshift, str):
self.redshift = self.__clean_string(redshift)
else:
self.redshift = redshift
except (AttributeError, IndexError):
self.redshift = np.nan
def _get_redshift_for_luminosity_calculation(self) -> Union[float, None]:
"""Gets redshift | |
delta_o_2 = N.zeros((self.timeSteps,self.no))
delta_h_1 = N.zeros((self.timeSteps,self.nh1))
delta_h_2 = N.zeros((self.timeSteps,self.nh2))
delta_c_1 = N.zeros((self.timeSteps,self.nh1))
delta_c_2 = N.zeros((self.timeSteps,self.nh2))
delta_p_1 = N.zeros((self.timeSteps,self.np1))
delta_p_2 = N.zeros((self.timeSteps,self.np2))
delta_hh_1 = N.zeros((self.timeSteps,self.nhh))
# and dW's
dWih_1 = N.zeros((self.nh1,self.ni))
dWih_2 = N.zeros((self.nh2,self.ni))
dWho_1 = N.zeros((self.no,self.nh1))
dWho_2 = N.zeros((self.no,self.nh2))
dWhh_1 = N.zeros((self.nh1,self.nhh))
dWch_1 = N.zeros((self.nh1,self.nh1))
dWch_2 = N.zeros((self.nh2,self.nh2))
if self.recurrent:
dWhc_1 = N.zeros((self.nh1,self.nh1))
dWhc_2 = N.zeros((self.nh2,self.nh2))
dWph_1 = N.zeros((self.nh1,self.np1))
dWph_2 = N.zeros((self.nh2,self.np2))
for t in range(self.timeSteps-1,-1,-1): # going backward in time ...
# print dWch_1
# compute deltas
# error term output units
#print t
delta_o[t] = (signalOut[t] - self.act_o[t])
#print signalOut[t]
#print delta_o[t]
# print 'signalOut', signalOut[t]
# print 'act_o', self.act_o[t]
#print signalOut[t]
#print self.act_o[t]
#print "delta_o", delta_o[t]
delta_o[t] *=self.dSigmoid(self.act_o[t],'linear')
delta_o_1[t] = delta_o[t] * self.z_2[t]
delta_o_2[t] = delta_o[t] * self.z_1[t]
# --- error term hidden units 1 ---
delta_h_1[t] = N.dot(N.transpose(self.Who_1),delta_o_1[t])
#delta_h_1[t] = N.dot(N.transpose(self.Who_1),delta_o[t])
if t < self.timeSteps-1:
delta_h_1[t] += N.dot(N.transpose(self.Whc_1),delta_c_1[t+1])
else:
delta_h_1[t] += N.dot(N.transpose(self.Whc_1),N.zeros(self.nh1))
delta_h_1[t] *= self.dSigmoid(self.act_h_1[t],'logistic')
# --- error term hidden units 2 ---
delta_h_2[t] = N.dot(N.transpose(self.Who_2),delta_o_2[t])
#delta_h_2[t] = N.dot(N.transpose(self.Who_2),delta_o[t])
#delta_hh_1[t] = N.dot(N.transpose(self.Whh_1),delta_h_1[t])
#delta_hh_1[t] *= self.dSigmoid(self.act_hh_1[t],'linear')
if t < self.timeSteps-1:
delta_h_2[t] += N.dot(N.transpose(self.Whc_2),delta_c_2[t+1])
else:
delta_h_2[t] += N.dot(N.transpose(self.Whc_2),N.zeros(self.nh2))
delta_h_2[t] *= self.dSigmoid(self.act_h_2[t],'logistic')
#print "delta_c_2", delta_c_2
#print "delta_h_1", delta_h_1
#print "delta_h_2", delta_h_2
# error term parametric bias 1
delta_p_1[t] = N.dot(N.transpose(self.Wph_1),delta_h_1[t])
delta_p_1[t] *= self.dSigmoid(self.act_p_1, 'tanhOpt')
# error term parametric bias 2
delta_p_2[t] = N.dot(N.transpose(self.Wph_2),delta_h_2[t])
delta_p_2[t] *= self.dSigmoid(self.act_p_2, 'tanhOpt')
# --- error term context units 1 ---
delta_c_1[t] = N.dot(N.transpose(self.Wch_1),delta_h_1[t])
delta_c_1[t] *= self.dSigmoid(self.act_c_1[t],'linear')
# --- error term context units 2 ---
delta_c_2[t] = N.dot(N.transpose(self.Wch_2),delta_h_2[t])
delta_c_2[t] *= self.dSigmoid(self.act_c_2[t],'linear')
# compute weight change at time t
# hidden to output weights
tmpD = N.transpose((N.kron(N.ones((self.nh1,1)),
delta_o_1[t])).reshape(self.nh1,self.no))
tmpAct = N.transpose((N.repeat(self.act_h_1[t],self.no,
axis=0)).reshape(self.nh1,self.no))
dWho_1 += tmpAct*tmpD
#dWho_1 = tmpAct*tmpD
#self.Who_1 += self.eta_ho_1*(dWho_1 + self.momentum * dWho_1)
tmpD = N.transpose((N.kron(N.ones((self.nh2,1)),
delta_o_2[t])).reshape(self.nh2,self.no))
tmpAct = N.transpose((N.repeat(self.act_h_2[t],self.no,
axis=0)).reshape(self.nh2,self.no))
dWho_2 += tmpAct*tmpD
#dWho_2 = tmpAct*tmpD
#self.Who_2 += self.eta_ho_2*(dWho_2 + self.momentum * dWho_2)
# input to hidden weights
tmpD = N.transpose((N.kron(N.ones((self.ni,1)),
delta_h_1[t])).reshape(self.ni,self.nh1))
tmpAct = N.transpose((N.repeat(signalIn[t],self.nh1,
axis=0)).reshape(self.ni,self.nh1))
dWih_1 += tmpAct*tmpD
#dWih_1 = tmpAct*tmpD
#self.Wih_1 += self.eta_ih_1*(dWih_1 + self.momentum * dWih_1)
tmpD = N.transpose((N.kron(N.ones((self.ni,1)),
delta_h_2[t])).reshape(self.ni,self.nh2))
tmpAct = N.transpose((N.repeat(signalIn[t],self.nh2,
axis=0)).reshape(self.ni,self.nh2))
dWih_2 += tmpAct*tmpD
#dWih_2 = tmpAct * tmpD
#self.Wih_2 += self.eta_ih_2*(dWih_2 + self.momentum * dWih_2)
# context to hidden weights
tmpD = N.transpose((N.kron(N.ones((self.nh1,1)),
delta_h_1[t])).reshape(self.nh1,self.nh1))
tmpAct = N.transpose((N.repeat(self.act_c_1[t],self.nh1,
axis=0)).reshape(self.nh1,self.nh1))
dWch_1 += tmpAct*tmpD
#dWch_1 = tmpAct*tmpD
#self.Wch_1 += self.eta_ch_1*(dWch_1 + self.momentum * dWch_1)
tmpD = N.transpose((N.kron(N.ones((self.nh2,1)),
delta_h_2[t])).reshape(self.nh2,self.nh2))
tmpAct = N.transpose((N.repeat(self.act_c_2[t],self.nh2,
axis=0)).reshape(self.nh2,self.nh2))
dWch_2 += tmpAct*tmpD
#dWch_2 = tmpAct*tmpD
#self.Wch_2 += self.eta_ch_2*(dWch_2 + self.momentum * dWch_2)
if self.recurrent:
# hidden to context weights
tmpD = N.transpose((N.kron(N.ones((self.nh,1)),
delta_c[t])).reshape(self.nh,self.nh))
tmpAct = N.transpose((N.repeat(self.act_h[t],self.nh,
axis=0)).reshape(self.nh,self.nh))
dWhc += tmpAct*tmpD
# parametric bias to hidden
tmpD = N.transpose((N.kron(N.ones((self.np1,1)),
delta_h_1[t])).reshape(self.np1,self.nh1))
tmpAct = N.transpose((N.repeat(self.act_p_1,self.nh1,
axis=0)).reshape(self.np1,self.nh1))
dWph_1 += tmpAct*tmpD
#dWph_1 = tmpAct*tmpD
#self.Wph_1 += self.eta_ph_1*(dWph_1 + self.momentum * dWph_1)
tmpD = N.transpose((N.kron(N.ones((self.np2,1)),
delta_h_2[t])).reshape(self.np2,self.nh2))
tmpAct = N.transpose((N.repeat(self.act_p_2,self.nh2,
axis=0)).reshape(self.np2,self.nh2))
dWph_2 += tmpAct*tmpD
#dWph_2 = tmpAct*tmpD
#self.Wph_2 += self.eta_ph_2*(dWph_2 + self.momentum * dWph_2)
#tmpD = N.transpose((N.kron(N.ones((self.nhh,1)),
# delta_h_1[t])).reshape(self.nhh,self.nh1))
#tmpAct = N.transpose((N.repeat(self.act_hh_1[t],self.nh1,
# axis=0)).reshape(self.nhh,self.nh1))
#dWhh_1 += tmpAct*tmpD
# adaptive learning rate.
'''
temp_mul_ho_1 = self.dWho_1_old * dWho_1
self.eta_ho_1[(temp_mul_ho_1 < 0).nonzero()] = N.maximum((self.eta_ho_1[(temp_mul_ho_1 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ho_1[(temp_mul_ho_1 > 0).nonzero()] = N.minimum((self.eta_ho_1[(temp_mul_ho_1 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#self.dWho_1[(temp_mul_ho_1 < 0).nonzero()] = -self.dWho_1[(temp_mul_ho_1 < 0).nonzero()]
temp_mul_ho_2 = self.dWho_2_old * dWho_2
self.eta_ho_2[(temp_mul_ho_2 < 0).nonzero()] = N.maximum((self.eta_ho_2[(temp_mul_ho_2 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ho_2[(temp_mul_ho_2 > 0).nonzero()] = N.minimum((self.eta_ho_2[(temp_mul_ho_2 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#if temp_mul_ho_2 < 0:
# self.eta_ho_2 = max((self.eta_ho_2*self.negative_fac_eta),self.eta_min)
#elif temp_mul_ho_2 > 0:
# self.eta_ho_2 = min((self.eta_ho_2*self.positive_fac_eta),self.eta_max)
#self.dWho_2[(temp_mul_ho_2 < 0).nonzero()] = -self.dWho_2[(temp_mul_ho_2 < 0).nonzero()]
#self.dWho[(temp_mul_ho >= 0).nonzero()] = self.eta_ho*dWho[(temp_mul_ho >= 0).nonzero()] + self.momentum * self.dWho[(temp_mul_ho >= 0).nonzero()]
#temp_mul_hh_1 = self.dWhh_1_old * dWhh_1
#self.eta_hh_1[(temp_mul_hh_1 < 0).nonzero()] = N.maximum((self.eta_hh_1[(temp_mul_hh_1 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
#self.eta_hh_1[(temp_mul_hh_1 > 0).nonzero()] = N.minimum((self.eta_hh_1[(temp_mul_hh_1 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
temp_mul_ih_1 = self.dWih_1_old * dWih_1
self.eta_ih_1[(temp_mul_ih_1 < 0).nonzero()] = N.maximum((self.eta_ih_1[(temp_mul_ih_1 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ih_1[(temp_mul_ih_1 > 0).nonzero()] = N.minimum((self.eta_ih_1[(temp_mul_ih_1 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#if temp_mul_ih_1 < 0:
# self.eta_ih_1 = max((self.eta_ih_1*self.negative_fac_eta),self.eta_min)
#elif temp_mul_ih_1 > 0:
# self.eta_ih_1 = min((self.eta_ho_1*self.positive_fac_eta),self.eta_max)
#self.dWih_1[(temp_mul_ih_1 < 0).nonzero()] = -self.dWih_1[(temp_mul_ih_1 < 0).nonzero()]
temp_mul_ih_2 = self.dWih_2_old * dWih_2
self.eta_ih_2[(temp_mul_ih_2 < 0).nonzero()] = N.maximum((self.eta_ih_2[(temp_mul_ih_2 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ih_2[(temp_mul_ih_2 > 0).nonzero()] = N.minimum((self.eta_ih_2[(temp_mul_ih_2 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#if temp_mul_ih_2 < 0:
# self.eta_ih_2 = max((self.eta_ih_2*self.negative_fac_eta),self.eta_min)
#elif temp_mul_ih_2 > 0:
# self.eta_ih_2 = min((self.eta_ih_2*self.positive_fac_eta),self.eta_max)
#self.dWih_2[(temp_mul_ih_2 < 0).nonzero()] = -self.dWih_2[(temp_mul_ih_2 < 0).nonzero()]
#self.dWih[(temp_mul_ih >= 0).nonzero()] = self.eta_ih*dWih[(temp_mul_ih >= 0).nonzero()] + self.momentum * self.dWih[(temp_mul_ih >= 0).nonzero()]
temp_mul_ch_1 = self.dWch_1_old * dWch_1
self.eta_ch_1[(temp_mul_ch_1 < 0).nonzero()] = N.maximum((self.eta_ch_1[(temp_mul_ch_1 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ch_1[(temp_mul_ch_1 > 0).nonzero()] = N.minimum((self.eta_ch_1[(temp_mul_ch_1 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#if temp_mul_ch_1 < 0:
# self.eta_ch_1 = max((self.eta_ch_1*self.negative_fac_eta),self.eta_min)
#elif temp_mul_ch_1 > 0:
# self.eta_ch_1 = min((self.eta_ch_1*self.positive_fac_eta),self.eta_max)
#self.dWch_1[(temp_mul_ch_1 < 0).nonzero()] = -self.dWch_1[(temp_mul_ch_1 < 0).nonzero()]
temp_mul_ch_2 = self.dWch_2_old * dWch_2
self.eta_ch_2[(temp_mul_ch_2 < 0).nonzero()] = N.maximum((self.eta_ch_2[(temp_mul_ch_2 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ch_2[(temp_mul_ch_2 > 0).nonzero()] = N.minimum((self.eta_ch_2[(temp_mul_ch_2 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#if temp_mul_ch_2 < 0:
# self.eta_ch_2 = max((self.eta_ch_2*self.negative_fac_eta),self.eta_min)
#elif temp_mul_ch_2 > 0:
# self.eta_ch_2 = min((self.eta_ch_2*self.positive_fac_eta),self.eta_max)
#self.dWch_2[(temp_mul_ch_2 < 0).nonzero()] = -self.dWch_2[(temp_mul_ch_2 < 0).nonzero()]
#self.dWch[(temp_mul_ch >= 0).nonzero()] = self.eta_ch*dWch[(temp_mul_ch >= 0).nonzero()] + self.momentum * self.dWch[(temp_mul_ch >= 0).nonzero()]
temp_mul_ph_1 = self.dWph_1_old * dWph_1
self.eta_ph_1[(temp_mul_ph_1 < 0).nonzero()] = N.maximum((self.eta_ph_1[(temp_mul_ph_1 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ph_1[(temp_mul_ph_1 > 0).nonzero()] = N.minimum((self.eta_ph_1[(temp_mul_ph_1 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#if temp_mul_ph_1 < 0:
# self.eta_ph_1 = max((self.eta_ph_1*self.negative_fac_eta),self.eta_min)
#elif temp_mul_ph_1 > 0:
# self.eta_ph_1 = min((self.eta_ph_1*self.positive_fac_eta),self.eta_max)
#self.dWph_1[(temp_mul_ph_1 < 0).nonzero()] = -self.dWph_1[(temp_mul_ph_1 < 0).nonzero()]
temp_mul_ph_2 = self.dWph_2_old * dWph_2
self.eta_ph_2[(temp_mul_ph_2 < 0).nonzero()] = N.maximum((self.eta_ph_2[(temp_mul_ph_2 < 0).nonzero()]*self.negative_fac_eta),self.eta_min)
self.eta_ph_2[(temp_mul_ph_2 > 0).nonzero()] = N.minimum((self.eta_ph_2[(temp_mul_ph_2 > 0).nonzero()]*self.positive_fac_eta),self.eta_max)
#if temp_mul_ph_2 < 0:
# self.eta_ph_2 = max((self.eta_ph_2*self.negative_fac_eta),self.eta_min)
#elif temp_mul_ph_2 > 0:
# self.eta_ph_2 = min((self.eta_ph_2*self.positive_fac_eta),self.eta_max)
#self.dWph_2[(temp_mul_ph_2 < 0).nonzero()] = -self.dWph_2[(temp_mul_ph_2 < 0).nonzero()]
#self.dWph[(temp_mul_ph >= 0).nonzero()] = self.eta_ph*dWph[(temp_mul_ph >= 0).nonzero()] + self.momentum * self.dWph[(temp_mul_ph >= 0).nonzero()]
if self.recurrent:
temp_mul_hc = self.dWhc_old * dWhc
self.dWhc[(temp_mul_hc < 0).nonzero()] = -self.dWhc[(temp_mul_hc < 0).nonzero()]
#self.dWhc[(temp_mul_hc >= 0).nonzero()] = self.eta_hc*dWhc[(temp_mul_hc >= 0).nonzero()] + self.momentum * self.dWhc[(temp_mul_hc >= 0).nonzero()]
'''
# end going back through time
# compute weight change over time series
self.dWho_1 = self.eta_ho_1*(dWho_1 + self.momentum * self.dWho_1)
self.dWho_2 = self.eta_ho_2*(dWho_2 + self.momentum * self.dWho_2)
self.dWih_1 = self.eta_ih_1*(dWih_1 + self.momentum * self.dWih_1)
#self.dWhh_1 = self.eta_hh_1*(dWhh_1 + self.momentum * self.dWhh_1)
self.dWih_2 = self.eta_ih_2*(dWih_2 + self.momentum * self.dWih_2)
self.dWch_1 = self.eta_ch_1*(dWch_1 + self.momentum * self.dWch_1)
self.dWch_2 = self.eta_ch_2*(dWch_2 + self.momentum * self.dWch_2)
self.dWph_1 = self.eta_ph_1*(dWph_1 + self.momentum * self.dWph_1)
self.dWph_2 = self.eta_ph_2*(dWph_2 + self.momentum * self.dWph_2)
if self.recurrent:
self.dWhc = self.eta_hc*dWhc #+ self.momentum * self.dWhc
aver_delta_p_1 = N.average(delta_p_1, axis=0)
aver_delta_p_2 = N.average(delta_p_2, axis=0)
self.gamma_1 = self.gamma_factor * N.absolute(aver_delta_p_1)
self.gamma_2 = self.gamma_factor * N.absolute(aver_delta_p_2)
# compute activity PB
self.rho_1 += self.gamma_1*N.sum(delta_p_1,axis=0)
self.act_p_1 = self.sigmoid(self.rho_1,'tanhOpt')
self.rho_2 += self.gamma_2*N.sum(delta_p_2,axis=0)
self.act_p_2 = self.sigmoid(self.rho_2,'tanhOpt')
# save for next run
self.dWho_1_old = N.copy(dWho_1)
self.dWho_2_old = N.copy(dWho_2)
self.dWih_1_old = N.copy(dWih_1)
self.dWhh_1_old = N.copy(dWhh_1)
self.dWih_2_old = N.copy(dWih_2)
self.dWch_1_old = N.copy(dWch_1)
self.dWch_2_old = N.copy(dWch_2)
self.dWph_1_old = N.copy(dWph_1)
self.dWph_2_old = N.copy(dWph_2)
if self.recurrent:
self.dWhc_old = N.copy(dWhc)
def updateWeights(self, range=5.0):
self.Who_1 += self.dWho_1
self.Who_2 += self.dWho_2
self.Wih_1 += self.dWih_1
#self.Whh_1 += self.dWhh_1
self.Wih_2 += self.dWih_2
self.Wch_1 += self.dWch_1
#self.Wch_2 += self.dWch_2
self.Wph_1 += self.dWph_1
#self.Wph_2 += self.dWph_2
if self.recurrent:
self.Whc += self.dWhc
#print self.Wih_1
for i in xrange(self.nh1):
#self.Wph_1[i] /= N.linalg.norm(self.Wph_1[i])
self.Wih_1[i] /= N.linalg.norm(self.Wih_1[i])
self.Wch_1[i] /= N.linalg.norm(self.Wch_1[i])
for i in xrange(self.nh2):
#self.Wph_2[i] /= N.linalg.norm(self.Wph_2[i])
self.Wih_2[i] /= N.linalg.norm(self.Wih_2[i])
self.Wch_2[i] /= N.linalg.norm(self.Wch_2[i])
outOfRangeDetected = False
'''
range = 8.0
if N.any(N.nonzero(self.Who < -range)) or \
N.any(N.nonzero(self.Who > range)):
self.Who = N.clip(self.Who,-range,range)
self.logger.info("Who out of range")
if N.any(N.nonzero(self.Wih < -range)) or \
N.any(N.nonzero(self.Wih > range)):
self.logger.info("Wih out of range")
self.Wih = N.clip(self.Wih,-range,range)
if N.any(N.nonzero(self.Wch < -range)) or \
N.any(N.nonzero(self.Wch > range)):
self.logger.info("Wch out of range")
self.Wch = N.clip(self.Wch,-range,range)
if N.any(N.nonzero(self.Wph < -range)) or \
N.any(N.nonzero(self.Wph > range)):
self.logger.info("Wph out of range")
self.Wph = N.clip(self.Wph,-range,range)
if self.recurrent:
if N.any(N.nonzero(self.Whc < -range)) or \
N.any(N.nonzero(self.Whc > range)):
self.logger.info("Whc out of range")
self.Whc = N.clip(self.Whc,-range,range)
'''
outOfRangeDetected = False
if N.any(N.nonzero(self.Who_1 < -range)) or \
N.any(N.nonzero(self.Who_1 > range)):
self.Who_1 -= self.dWho_1
| |
<reponame>stangelid/qt
import sys
import os.path
import json
import argparse
from random import seed
from time import time
import math
from scipy.cluster.vq import kmeans
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
import torch.utils.tensorboard as tb
from qt import QuantizedTransformerModel
from utils.data import *
from utils.training import *
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Trains the QT model.\nFor usage example, refer to: \n' + \
'\thttps://github.com/stangelid/qt')
data_arg_group = argparser.add_argument_group('Data arguments')
data_arg_group.add_argument('--data',
help='training data in json format',
type=str, default='../data/json/space_train.json')
data_arg_group.add_argument('--sentencepiece',
help='sentencepiece model file',
type=str, default='../data/sentencepiece/spm_unigram_32k.model')
data_arg_group.add_argument('--max_num_entities',
help='maximum number of entities to load for training (default: all)',
type=int, default=None)
data_arg_group.add_argument('--max_rev_len',
help='maximum number of sentences per review (default: 40)',
type=int, default=40)
data_arg_group.add_argument('--max_sen_len',
help='maximum number of tokens per sentence (default: 40)',
type=int, default=40)
model_arg_group = argparser.add_argument_group('Model hyperparams')
model_arg_group.add_argument('--d_model',
help='model dimensionality (default: 320)',
type=int, default=320)
model_arg_group.add_argument('--codebook_size',
help='size of quantization codebook (default: 1024)',
type=int, default=1024)
model_arg_group.add_argument('--output_nheads',
help='number of output sentence heads (default: 8)',
type=int, default=8)
model_arg_group.add_argument('--nlayers',
help='number of sentence-level layers (default: 3)',
type=int, default=3)
model_arg_group.add_argument('--internal_nheads',
help='number of attention heads (default: 4)',
type=int, default=4)
model_arg_group.add_argument('--d_ff',
help='feed-forward dimensionality (default: 512)',
type=int, default=512)
model_arg_group.add_argument('--in_pos',
help='use input positional embeddings',
action='store_true')
model_arg_group.add_argument('--out_pos',
help='use output positional embeddings',
action='store_true')
model_arg_group.add_argument('--dropout',
help='transformer dropout probability (default: 0.0)',
type=float, default=0.0)
train_arg_group = argparser.add_argument_group('Basic training hyperparams')
train_arg_group.add_argument('--batch_size',
help='the batch size (default: 5)',
type=int, default=5)
train_arg_group.add_argument('--epochs',
help='number of epochs (default: 20)',
type=int, default=20)
train_arg_group.add_argument('--lr',
help='initial learning rate',
type=float, default=0.001)
train_arg_group.add_argument('--lr_decay',
help='learning rate decay (default: 0.9)',
type=float, default=0.9)
train_arg_group.add_argument('--label_smoothing',
help='label smoothing coeff (default: 0.1)',
type=float, default=0.1)
train_arg_group.add_argument('--commitment_cost',
help='VQ-VAE commitment coefficient (default: 1.00)',
type=float, default=1.00)
train_arg_group = argparser.add_argument_group('Soft EMA hyperparams')
train_arg_group.add_argument('--ema_temp',
help='sampling temperature for Soft EMA codebook training (default: 1.0)',
type=float, default=1.0)
train_arg_group.add_argument('--ema_num_samples',
help='number of samples for Soft EMA codebook training (default: 10)',
type=int, default=10)
train_arg_group.add_argument('--ema_decay',
help='exponential decay for EMA (default: 0.99)',
type=float, default=0.99)
lr_arg_group = argparser.add_argument_group('Learning rate drop-off hyperparams',
'Learning rate drop-off reduces the lr to 0 after some epochs ' + \
'and slowly increases it again. May help with quantization collapse, ' + \
'but not necessary in most cases.')
lr_arg_group.add_argument('--lr_drop_enc',
help='drop lr for encoder to zero and increase slowly',
action='store_true')
lr_arg_group.add_argument('--lr_drop_all',
help='drop lr for all to zero and increase slowly',
action='store_true')
lr_arg_group.add_argument('--lr_drop_epoch',
help='epoch to drop learning rate to zero',
type=int, default=-1)
lr_arg_group.add_argument('--lr_rtrn_epochs',
help='number of epochs to increase learning rate to normal after drop',
type=int, default=-1)
warmup_arg_group = argparser.add_argument_group('Transformer warmup hyperparams',
'With transformer warmup, QT is trained without quantization for ' + \
'some epochs, and then gradually introduces quantization. Improves ' + \
'training stability.')
warmup_arg_group.add_argument('--no_transformer_warmup',
help='disable transformer warmup before quantization',
action='store_true')
warmup_arg_group.add_argument('--warmup_epochs',
help='don\'t quantize at all for this many epochs (default: 4)',
type=int, default=4)
warmup_arg_group.add_argument('--no_warmup_annealing',
help='disable slow decrease of non-quantized residual coefficient',
action='store_true')
warmup_arg_group.add_argument('--warmup_annealing_min',
help='minimum residual coefficient for non-quantized path (default: 0.0)',
type=float, default=0.0)
warmup_arg_group.add_argument('--warmup_annealing_epochs',
help='non-quantized residual reduction lasts this many epochs (default: 2)',
type=int, default=2)
kmeans_arg_group = argparser.add_argument_group('K-means initialization hyperparams',
'Initialize codebook with kmeans after transformer warmup')
kmeans_arg_group.add_argument('--no_kmeans',
help='disable kmeans codebook initialization after warmup',
action='store_true')
kmeans_arg_group.add_argument('--kmeans_batches',
help='number of batches for kmeans (default: 100)',
type=int, default=100)
kmeans_arg_group.add_argument('--kmeans_iter',
help='number of iterations for kmeans (default: 50)',
type=int, default=50)
other_arg_group = argparser.add_argument_group('Other arguments')
other_arg_group.add_argument('--run_id',
help='unique run id (for logging and saved models)',
type=str, default='run1')
other_arg_group.add_argument('--gpu', help='gpu device to use (default: use cpu)',
type=int, default=-1)
other_arg_group.add_argument('--logdir',
help='directory to put tensorboard logs (default: \'../logs\')',
type=str, default='../logs')
other_arg_group.add_argument('--log_every',
help='log every n forward passes (default: 50)',
type=int, default=50)
other_arg_group.add_argument('--savedir',
help='directory to put saved model snapshots (default: \'../models\')',
type=str, default='../models')
other_arg_group.add_argument('--save_every',
help='save model snapshot every N epochs (default: save on every epoch)',
type=int, default=1)
other_arg_group.add_argument('--seed',
help='random seed',
type=int, default=1)
other_arg_group.add_argument('--data_seed',
help='random seed for dataset (only affects batching and entity subsampling)',
type=int, default=1)
args = argparser.parse_args()
seed(args.data_seed)
if args.gpu >= 0:
device = torch.device('cuda:{0}'.format(args.gpu))
else:
device = torch.device('cpu')
data_path = args.data
spm_path = args.sentencepiece
save_path = args.savedir
log_path = args.logdir
# read data from json file
f = open(data_path, 'r')
data = json.load(f)
f.close()
# initialize dataset
dataset = ReviewDataset(data, sample_size=args.max_num_entities, spmodel=spm_path,
max_sen_len=args.max_sen_len, max_rev_len=args.max_rev_len)
vocab_size = dataset.vocab_size
nclasses = dataset.nclasses
# prepare train/dev/test splits
dataset.split()
# samplers for each split
train_sampler = \
ReviewBucketBatchSampler(dataset, args.batch_size, split='train')
dev_sampler = \
ReviewBucketBatchSampler(dataset, args.batch_size, split='dev')
test_sampler = \
ReviewBucketBatchSampler(dataset, args.batch_size, split='test')
# wrapper for collate function
collator = ReviewCollator(padding_idx=dataset.pad_id(), unk_idx=dataset.unk_id(),
bos_idx=dataset.bos_id(), eos_idx=dataset.eos_id())
# one dataloader per split
train_dl = DataLoader(dataset, batch_sampler=train_sampler,
collate_fn=collator.collate_reviews_generation)
dev_dl = DataLoader(dataset, batch_sampler=dev_sampler,
collate_fn=collator.collate_reviews_generation)
test_dl = DataLoader(dataset, batch_sampler=test_sampler,
collate_fn=collator.collate_reviews_generation)
nbatches_trn = len(train_dl)
nbatches_dev = len(dev_dl)
nbatches_tst = len(test_dl)
pad_id = dataset.pad_id()
bos_id = dataset.bos_id()
eos_id = dataset.eos_id()
unk_id = dataset.unk_id()
torch.manual_seed(args.seed)
# define model
model = QuantizedTransformerModel(
vocab_size,
d_model=args.d_model,
temp=args.ema_temp,
num_samples=args.ema_num_samples,
codebook_size=args.codebook_size,
commitment_cost=args.commitment_cost,
nlayers=args.nlayers,
internal_nheads=args.internal_nheads,
output_nheads=args.output_nheads,
d_ff=args.d_ff,
use_in_pos=args.in_pos,
use_out_pos=args.out_pos,
ema_decay=args.ema_decay,
dropout=args.dropout)
model.to(device)
# prepare optimizer and learning rate scheduler
if args.lr_drop_all:
optimizer = \
torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.98), eps=1e-9)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_calc)
elif args.lr_drop_enc:
param_groups = \
[
{'params': model.in_emb.parameters()},
{'params': model.encoder.parameters()},
{'params': model.decoder.parameters()},
{'params': model.linear.parameters()}
]
lambda1 = lr_calc
lambda2 = lambda epoch: args.lr_decay ** epoch
lr_lambdas = [lambda1, lambda1, lambda2, lambda2]
optimizer = \
torch.optim.Adam(param_groups, lr=args.lr, betas=(0.9, 0.98), eps=1e-9)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambdas)
else:
optimizer = \
torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.98), eps=1e-9)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, args.lr_decay)
# define losses
if args.label_smoothing == 0.0:
criterion = \
nn.CrossEntropyLoss(ignore_index=pad_id, reduction='sum')
else:
criterion = \
LabelSmoothingLoss(args.label_smoothing, vocab_size, ignore_index=pad_id)
valid_criterion = nn.CrossEntropyLoss(ignore_index=pad_id, reduction='sum')
criterion = criterion.to(device)
valid_criterion = valid_criterion.to(device)
# prepare transformer warmup scheduler
if (not args.no_transformer_warmup) and (not args.no_warmup_annealing):
warmup_scheduler = \
ResidualCoefficientScheduler(args.warmup_epochs,
args.warmup_annealing_epochs, nbatches_trn,
min_coeff=args.warmup_annealing_min)
if args.logdir != '':
tb_writer = tb.SummaryWriter(os.path.join(log_path, args.run_id))
for epoch in range(args.epochs):
# initialize loss and counts for accuracy
running_loss = 0.0
running_g_loss = 0.0
running_q_loss = 0.0
running_ppl = 0.0
model.train()
# quantize or not
quantize = (args.no_transformer_warmup or epoch >= args.warmup_epochs)
# if warmup is over, initialize codebook with kmeans
if (not args.no_kmeans) and epoch == args.warmup_epochs:
with torch.no_grad():
model.eval()
sentence_vecs = []
for i, batch in enumerate(train_dl):
if i == args.kmeans_batches:
break
src, tgt, gld = [x.to(device) for x in batch]
out, _, _, _ = model.encode(src, quantize=False)
sentence_vecs.append(out.reshape(-1, args.d_model).detach().to('cpu'))
sentence_vecs = torch.cat(sentence_vecs, dim=0).detach().numpy()
kmeans_codebook, _ = kmeans(sentence_vecs, args.codebook_size, iter=args.kmeans_iter)
# in case of missing clusters, fill in random ones.
# missing cluster may occur when there are identical
# sentence vectors in the clustered data
if kmeans_codebook.shape[0] < args.codebook_size:
num_missing_clusters = args.codebook_size - kmeans_codebook.shape[0]
new_clusters = np.random.randn(num_missing_clusters, args.d_model)
kmeans_codebook = np.concatenate((kmeans_codebook, new_clusters), axis=0)
model.encoder.set_codebook(torch.Tensor(kmeans_codebook))
model.train()
# save model snapshot
if args.save_every is not None:
torch.save(model, '{0}/{1}_{2}pkm_model.pt'.format(save_path, args.run_id, epoch))
for i, batch in enumerate(train_dl):
src, tgt, gld = [x.to(device) for x in batch]
batch_size, nsent, src_ntokens = src.size()
optimizer.zero_grad()
if not args.no_transformer_warmup:
if not args.no_warmup_annealing:
residual_coeff = warmup_scheduler.get_residual_coefficient(i, epoch)
else:
residual_coeff = 0.0 if quantize else 1.0
else:
residual_coeff = 0.0
out, encodings, q_loss, perplexity = \
model(src, tgt, quantize=quantize, residual_coeff=residual_coeff)
if args.label_smoothing > 0.0:
out = F.log_softmax(out, dim=-1)
g_loss = criterion(out.flatten(end_dim=-2), gld.flatten())
non_padding_elem = (tgt != pad_id).sum().item()
g_loss /= batch_size * nsent
q_loss *= float(non_padding_elem) / (batch_size * nsent)
loss = g_loss + q_loss
loss.backward()
optimizer.step()
running_loss += loss.item()
running_g_loss += g_loss.item()
if quantize:
running_q_loss += q_loss.item()
running_ppl += perplexity.item()
# log average loss per batch every k passes
if args.logdir != '' and i % args.log_every == args.log_every - 1:
step = epoch * nbatches_trn + i
running_uq_loss = running_q_loss / args.commitment_cost
lrs = lr_scheduler.get_lr()
lr_enc = lrs[0]
if len(lrs) > 1:
lr_dec = lrs[2]
else:
lr_dec = lr_enc
tb_writer.add_scalar('loss/train', running_loss / args.log_every, step)
tb_writer.add_scalar('g_loss/train', running_g_loss / args.log_every, step)
tb_writer.add_scalar('q_loss/train', running_q_loss / args.log_every, step)
tb_writer.add_scalar('uq_loss/train', running_uq_loss / args.log_every, step)
tb_writer.add_scalar('perplexity/train', running_ppl / args.log_every, step)
tb_writer.add_scalar('residual_coeff/train', residual_coeff, step)
tb_writer.add_scalar('learning_rate/enc', lr_enc, step)
tb_writer.add_scalar('learning_rate/dec', lr_dec, step)
running_loss = 0.0
running_g_loss = 0.0
running_q_loss = 0.0
running_ppl = 0.0
with torch.no_grad():
# initialize loss
running_loss = 0.0
running_g_loss = 0.0
running_q_loss = 0.0
running_ppl = 0.0
model.eval()
for i, batch in enumerate(dev_dl):
src, tgt, gld = [x.to(device) for x in batch]
batch_size, nsent, src_ntokens = src.size()
out, encodings, q_loss, perplexity = \
model(src, tgt, quantize=quantize, residual_coeff=residual_coeff)
g_loss = valid_criterion(out.flatten(end_dim=-2), gld.flatten())
non_padding_elem = (tgt != pad_id).sum().item()
g_loss /= batch_size | |
= []
if icdf.idl > -1:
wrk = ncid.variables[icdf.tname][:]
self.T_LIST = list(wrk)
else:
self.T_LIST = []
self.DATE = []
for i in range(icdf.nt):
self.DATE.append(num2date(self.T_LIST[i], \
units=icdf.time_units, \
calendar=icdf.time_calendar))
# ========================
def plot_initialize(self):
# ========================
# Meridian and parallel range and intervalls:
tmp1 = np.trunc(100*(self.PLOT.EAST.get()-self.PLOT.WEST.get())/4)/100
if tmp1 > 1:
tmp1 = np.rint(tmp1)
self.PLOT.MERIDIAN_INT.set(tmp1)
self.PLOT.MERIDIAN_INI.set(np.trunc(self.PLOT.WEST.get()/tmp1 - 2)*tmp1)
self.PLOT.MERIDIAN_FIN.set(np.trunc(self.PLOT.EAST.get()/tmp1 + 2)*tmp1)
tmp1 = None
tmp2 = np.trunc(100*(self.PLOT.NORTH.get() - self.PLOT.SOUTH.get())/4)/100
if tmp2 > 1:
tmp2 = np.rint(tmp2)
self.PLOT.PARALLEL_INT.set(tmp2)
self.PLOT.PARALLEL_INI.set(np.trunc(self.PLOT.SOUTH.get()/tmp2 - 2)*tmp2)
self.PLOT.PARALLEL_FIN.set(np.trunc(self.PLOT.NORTH.get()/tmp2 + 2)*tmp2)
tmp2 = None
# ==================
def make_plot(self):
# ==================
#toconsola("EG make_plot:\n PLOT.OUTPUT_FIGURE: "+str(self.PLOT.OUTPUT_FIGURE.get()),
# wid=self.cons)
if self.PLOT.OUTPUT_FIGURE.get():
if self.fig is None:
#toconsola("\n EGL creation", wid=self.cons)
self.Window_mapa = tk.Toplevel(self.master)
self.Window_mapa.title("COSMO-VIEW plotting tool")
self.Window_mapa.resizable(width=True,height=True)
self.Window_mapa.grid_columnconfigure(0, weight=1)
self.Window_mapa.grid_rowconfigure(0, weight=1)
#self.Window_mapa.wm_geometry("1900x1200")
#self.canvas = None # canvas
# Frame container
topframe = tk.Frame(self.Window_mapa)
topframe.grid_rowconfigure(0, weight=1)
topframe.grid(sticky='swen')
topframe.grid_columnconfigure(0, weight=1)
# Two panels Utilizamos pack en canvas y grid en consola
# Afegim el canvas
top_panel = tk.Frame(topframe, pady = 20)
# Initialize figure,canvas an Plot panel
#self.ax=None
self.fig = Figure(figsize=self.PLOT.SIZE, \
facecolor=self.PLOT.FIGURE_COLOR.get(),dpi=self.PLOT.DPI.get())
#toconsola(" MAP_PLOT: Set projection parameters",wid=self.cons)
proj = map_proj(self.PLOT.MAP_PROJECTION.get(), params=self.params)
self.ax = self.fig.add_subplot(111, projection=proj['proj'])
self.canvas = FigureCanvasTkAgg(self.fig, master=top_panel)
#EG Dibujamos con self.draw_figure
#EG self.canvas.draw()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
toolbar = NavigationToolbar2Tk(self.canvas, top_panel)
toolbar.update()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=1)
#EG event controllers
self.CANVAS_CLICK = self.canvas.mpl_connect('button_press_event',self.canvas_click)
self.canvas.mpl_connect('close_event',self.canvas_closing)
self.canvas.mpl_connect('resize_event',self.canvas_resizing)
top_panel.grid(row=0, column=0, sticky='swen')
self.drawmap = True
else: toconsola(" EG ojo fig existe",wid=self.cons)
self.draw_figure()
# ========================
def setmap(self,target=0):
# ========================
#'''EG OJOJ new setmap Routine focused to set the projection
# We implement a function to manage projection with Cartopy
# map_proj(name,list). See tools module
#'''
#projection = self.PLOT.MAP_PROJECTION.get()
#EG self.toconsola("EG Set map Projection")
#proj = map_proj(projection)
#self.ax = self.fig.add_subplot(111, projection=proj['proj'])
#self.ax.set_extent([ float(self.PLOT.WEST.get()), \
#float(self.PLOT.EAST.get()), float(self.PLOT.SOUTH.get()), \
#float(self.PLOT.NORTH.get())],proj['proj'])
#EG self.ax.coastlines()
#EG Projection
'''
if proj is None:
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
self.ax = self.fig.add_subplot(111, projection=proj['proj'])
'''
return
# ====================
def draw_figure(self):
# ====================
global CONSOLA
toconsola("EG draw_figure:",wid=self.cons)
toconsola((" EG Configuration:\n"+ \
"\t Projection: "+str(self.PLOT.MAP_PROJECTION.get())+ \
"\n\t Domain:\t \t West - East: "+str(float(self.PLOT.WEST.get()))+ \
" - "+str(float(self.PLOT.EAST.get()))+ \
"\n\t \t South - North: "+str(float(self.PLOT.SOUTH.get()))+ \
" - "+str(float(self.PLOT.NORTH.get()))),wid=self.cons)
try:
self.scbar.remove()
except: pass
for bar in self.cdfbar:
try:
bar.remove()
except: pass
self.cdfbar = []
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
self.ax.clear()
font_family = self.PLOT.MAP_FONT_TYPE.get() # Lets see ...
font_size = self.PLOT.LABEL_SIZE.get()
# EPSG
# EG Not necessary
# epsg = int(self.PLOT.EPSG.get())
# Este bloque podría rehacerse ahora
# Temporally deprecated self.PLOT.GEOMAP.get()
self.ax.set_extent([float(self.PLOT.WEST.get()) ,float(self.PLOT.EAST.get()),\
float(self.PLOT.SOUTH.get()),float(self.PLOT.NORTH.get())],\
crs=proj['proj'])
#Eg pruebas con projeccions self.ax.coastlines()
toconsola(" EG self.PLOT.GEOMAP: "+str(self.PLOT.GEOMAP.get()),wid=self.cons)
if self.drawmap:
toconsola(" EG draw_figure: call setmap no more needed !",wid=self.cons)
self.drawmap = False
#EG We implement GEBCO+EMODNET Tiles services
toconsola(" EG: RELIEF tiles"+str(self.PLOT.RELIEF_SHOW.get()),wid=self.cons)
if self.PLOT.RELIEF_SHOW.get():
if self.PLOT.RELIEF.get() == 1:
gebco ="GEBCO_2019_Grid"
try:
toconsola("\t EG: GEBCO tiles",wid=self.cons)
self.ax.add_wms(wms='https://www.gebco.net/data_and_products/gebco_web_services/2019/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,360&crs=EPSG:4326&format=image/jpeg&layers=gebco_2019_grid&width=1200&height=600&version=1.3.0',layers=gebco,zorder=0)
except:
toconsola("\t WARNING: GEBCO server failed !, it is disabled......",wid=self.cons)
elif self.PLOT.RELIEF.get() == 2:
emod_land="emodnet:mean_atlas_land"
toconsola("\t EG: EMODNET tiles",wid=self.cons)
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emod_land,zorder=0)
except:
toconsola("\t WARNING: EMODNET server failed !, it is disabled......",wid=self.cons)
else:
#EG Sometimes this situation is possible (i.e. manual edition of conf files)
self.PLOT.RELIEF_SHOW.set(False)
if self.PLOT.EMODNET_ISO.get():
emodnet="emodnet:contours"
toconsola("\t EG: EMODNET contours",wid=self.cons)
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET contours failed !, it is disabled......",wid=self.cons)
# Draw SAIDIN:
#
if not empty(self.SAIDIN.FILENAME.get()):
if self.SAIDIN.show.get():
toconsola("EG plot SAIDIN",wid=self.cons)
#EG Added projection argument, map reference dropped
self.scbar = contourplot.drawing(self.fig,self.ax,proj['proj'],
self.SAIDIN.FLD.xx, self.SAIDIN.FLD.yy,
self.SAIDIN.FLD.data,
self.SAIDIN.FLD.data.mask,
self.SAIDIN.PLOT)
# Draw fields:
#
if self.ncdf > 0:
#EG Added projection argument, map reference dropped
toconsola("EG: plot netcdf",wid=self.cons)
for ii in range(self.ncdf):
if self.CDF[ii].show.get():
self.cdfbar.append(contourplot.drawing(self.fig,
self.ax, proj['proj'],
self.CDF[ii].FLD.xx, self.CDF[ii].FLD.yy,
self.CDF[ii].FLD.data,
self.CDF[ii].FLD.data.mask,
self.CDF[ii].PLOT))
# Draw currents:
#
if self.nvec > 0:
toconsola("EG plot currents",wid=self.cons)
for ii in range(self.nvec):
if self.VEC[ii].show.get():
vectorplot.drawing(self.ax, proj['proj'], self.VEC[ii])
# Draw floats:
#
if self.nfloat > 0:
toconsola("EG plot floats",wid=self.cons)
for ii in range(self.nfloat):
self.FLOAT[ii].L.set(self.L.get())
lagrangian.drawing(self.ax, proj['proj'], self.FLOAT[ii])
# Draw markers:
#
mrklines = []
mrklabls = []
if self.nmarker > 0:
toconsola("EG plot markers",wid=self.cons)
for ii in range(self.nmarker):
#EG Added projection argument, reference map and fig dropped
lmrk = geomarker.drawing(self.ax, proj['proj'], self.MARKER[ii])
mrklines.append(lmrk)
mrklabls.append(self.MARKER[ii].LABEL.get())
# Draw SHAPES:
#
if self.nshape > 0:
toconsola("EG plot shapes",wid=self.cons)
for ii in range(self.nshape):
toconsola("\tSHAPE"+str(ii),wid=self.cons)
#EG Added projection argument, reference map and fig
lmrk = shape.drawing(self.ax, proj['proj'], self.SHAPE[ii])
if lmrk is not None:
mrklines.append(lmrk)
mrklabls.append(self.SHAPE[ii].LABEL.get())
# Draw Ellipses:
#
if self.nellipse > 0:
for ii in range(self.nellipse):
ellipse.drawing(self.ax, proj['proj'], self.ELLIPSE[ii])
#Add Patches:
#
if self.npatch > 0:
for ii in range(self.npatch):
patch.drawing(self.ax, proj['proj'], self.PATCH[ii])
#EG Coastlines
#toconsola("EG: COASTLINES"+str(self.PLOT.COASTLINE_SHOW.get()),wid=self.cons)
if self.PLOT.COASTLINE_SHOW.get():
if self.PLOT.COASTLINE_SOURCE.get() == 2:
emodnet="coastlines"
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET coastlines !, it is disabled......",wid=self.cons)
else:
toconsola("\t EG COASTLINE: Natural_Earth (50m by default) or EMODNET wms",wid=self.cons)
self.ax.coastlines(self.PLOT.MAP_RESOLUTION.get(),color=self.PLOT.COASTLINE_COLOR.get(),
linewidth=self.PLOT.COASTLINE_WIDTH.get(),
zorder=self.PLOT.COASTLINE_ZORDER.get())
if self.PLOT.ISOBAT_NPLOT > 0:
toconsola("EG plot Custom ISOBATHS",wid=self.cons)
# Plot isobaths and its legend:
lines, labels = [], []
toconsola("\t lABEL_SHOW"+str(self.PLOT.ISOBAT_LABEL_SHOW.get()),wid=self.cons)
for ii in range(self.PLOT.nisobat):
label = None
if self.PLOT.ISOBAT_LABEL_SHOW.get():
label = self.PLOT.ISOBAT_LABEL[ii]
try:
color = eval(self.PLOT.ISOBAT_COLOR[ii].get())
except:
color = self.PLOT.ISOBAT_COLOR[ii].get()
if self.PLOT.ISOBAT_SHOW[ii]:
toconsola("\t EG ISOBATA:"+str(self.PLOT.ISOBAT_LABEL[ii]),wid=self.cons)
z = self.PLOT.ISOBAT_DATA[ii]
isox,isoy = z['lon'],z['lat']
for i in range(len(isox)):
if isox[i] > 1e29:
isox[i], isoy[i] = np.nan, np.nan
isbt, = self.ax.plot(isox,isoy,marker=None,
linestyle=self.PLOT.ISOBAT_STYLE[ii].get(),
linewidth=self.PLOT.ISOBAT_WIDTH[ii].get(),
#transform=proj['proj'],
transform=ccrs.PlateCarree(),
color=color)
lines.append(isbt)
labels.append(label)
if self.PLOT.ISOBAT_LEGEND.SHOW.get():
toconsola("\t self.PLOT.ISOBAT_LEGEND.SHOW"+str(self.PLOT.ISOBAT_LEGEND.SHOW.get()),wid=self.cons)
fontsize = self.PLOT.ISOBAT_LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.ISOBAT_LEGEND.FONTSIZE.get() < 1:
fontsize = None
if self.PLOT.ISOBAT_LEGEND.MODE.get() == 1:
mode = 'expand'
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
try: pass
except: pass
# Anchor BBOX:
if self.PLOT.ISOBAT_LEGEND.USE_BB.get():
bb = [self.PLOT.ISOBAT_LEGEND.BBx.get(),
self.PLOT.ISOBAT_LEGEND.BBy.get()]
else:
bb = None
Ilegend = self.ax.legend(lines,labels, \
#title=self.PLOT.ISOBAT_LEGEND.TITLE.get(),
#title_fontsize=24,
loc=self.PLOT.ISOBAT_LEGEND.LOC.get(),
ncol=self.PLOT.ISOBAT_LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.ISOBAT_LEGEND.FRAMEON.get(),
fancybox=self.PLOT.ISOBAT_LEGEND.FANCYBOX.get(),
shadow=self.PLOT.ISOBAT_LEGEND.SHADOW.get(),
framealpha=self.PLOT.ISOBAT_LEGEND.ALPHA.get(),
mode=mode,
bbox_to_anchor=bb,
facecolor=self.PLOT.ISOBAT_LEGEND.COLOR.get(),
edgecolor=self.PLOT.ISOBAT_LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.ISOBAT_LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.ISOBAT_LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.ISOBAT_LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.ISOBAT_LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.ISOBAT_LEGEND.LABELSPACING.get())
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
Ilegend.set_title(self.PLOT.ISOBAT_LEGEND.TITLE.get(),
prop=self.PLOT.ISOBAT_LEGEND.TITLEFONT)
if self.PLOT.WATER_COLOR.get() != 'None':
#toconsola("PLOT.WATER_COLOR por defecto 50m",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical', 'ocean', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.WATER_COLOR.get()),zorder=self.PLOT.WATER_ZORDER.get())
if self.PLOT.LAND_COLOR.get() != 'None':
#toconsola("PLOT.LAND_COLOR por defecto 50m",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical', 'land', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.LAND_COLOR.get()),zorder=self.PLOT.LAND_ZORDER.get())
if self.PLOT.COUNTRYLINE_SHOW.get():
#toconsola("PLOT.COUNTRYLINE",wid=self.cons)
self.ax.add_feature(cfeat.BORDERS,edgecolor=self.PLOT.COUNTRYLINE_COLOR.get(),
linewidth=self.PLOT.COUNTRYLINE_WIDTH.get(),
zorder=self.PLOT.LAND_ZORDER.get()+1)
if self.PLOT.RIVERS_SHOW.get():
#toconsola("PLOT.RIVERS",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical','rivers_and_lakes_centerlines', \
self.PLOT.MAP_RESOLUTION.get(), \
linewidth=self.PLOT.RIVERS_WIDTH.get(),
edgecolor=self.PLOT.RIVERS_COLOR.get(),zorder=self.PLOT.LAND_ZORDER.get()+1))
#self.ax.coastlines(resolution='110m')
#self.ax.gridlines()
if self.PLOT.GRID_SHOW.get():
toconsola("EG PLOT.GRID"+self.PLOT.GRID_LINESTYLE.get(),wid=self.cons)
#EG adaptar falat comprobar
#def setcolor(x,color):
# for m in x:
# for t in x[m][1]:
# t.set_color(color)
vmeridians = np.arange(self.PLOT.MERIDIAN_INI.get(), \
self.PLOT.MERIDIAN_FIN.get(), \
self.PLOT.MERIDIAN_INT.get())
vparallels = np.arange(self.PLOT.PARALLEL_INI.get(), \
self.PLOT.PARALLEL_FIN.get(), \
self.PLOT.PARALLEL_INT.get())
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
#gl = self.ax.gridlines(crs=proj['proj'],draw_labels=True,
gl = self.ax.gridlines(crs=ccrs.PlateCarree(),draw_labels=True,
linewidth=self.PLOT.GRID_LINEWIDTH.get(),
color=self.PLOT.GRID_FONTCOLOR.get(),
alpha=self.PLOT.GRID_ALPHA.get(),
linestyle=self.PLOT.GRID_LINESTYLE.get(),
zorder=self.PLOT.GRID_ZORDER.get())
# Lines visibility
gl.xlines, gl.ylines = True, True
if self.PLOT.GRID_LINESTYLE.get() == "None":
gl.xlines, gl.ylines = False, False
# xy labels visibility
gl.top_labels = self.PLOT.GRID_NORTH.get()
gl.bottom_labels = self.PLOT.GRID_SOUTH.get()
gl.left_labels = self.PLOT.GRID_WEST.get()
gl.right_labels = self.PLOT.GRID_EAST.get()
gl.xlocator = mticker.FixedLocator(vmeridians)
gl.ylocator = mticker.FixedLocator(vparallels)
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
#gl.xpadding , gl.ypadding = self.PLOT.LABEL_PAD.get(), self.PLOT.LABEL_PAD.get()
#else:
# # Default: no labels, no grid just Latitude and Longitude
# toconsola("EG XYLabels ..\n\t"+self.PLOT.XLABEL.get()+" - "+self.PLOT.YLABEL.get(),wid=self.cons)
# font_family = self.PLOT.MAP_FONT_TYPE.get()
# font_size = self.PLOT.LABEL_SIZE.get()
font_weight = 'normal'
font = {'family' : font_family, 'weight' : font_weight,
'color' : self.PLOT.TEXT_COLOR.get(),
'size' : font_size}
# -0.07
self.ax.text(-self.PLOT.YLABEL_PAD.get(), 0.55, self.PLOT.YLABEL.get(), va="bottom", \
ha="center", rotation="vertical", rotation_mode="anchor",
transform=self.ax.transAxes,fontdict=font)
# -0.2
self.ax.text(0.5, -self.PLOT.XLABEL_PAD.get(), self.PLOT.XLABEL.get(), va="bottom", \
ha="center", rotation="horizontal", rotation_mode="anchor",
transform=self.ax.transAxes,fontdict=font)
# Title
toconsola("Plot Title: "+self.PLOT.TITLE.get(),wid=self.cons)
self.ax.set_title(self.PLOT.TITLE.get(),fontproperties=self.PLOT.TITLEFONT)
px,py = self.ax.title.get_position()
dy = self.PLOT.TITLE_PAD.get()/self.fig.get_dpi()
self.ax.title.set_position((px,py+dy))
if self.PLOT.GEOMAP.get():
#toconsola("EG PLOT.GEOMAP 2 scale: Not yet implemented",wid=self.cons)
if self.PLOT.SCALE_SHOW.get():
try:
YOFFSET = float(self.PLOT.SCALE_YOFFSET.get())
except: YOFFSET = None
try:
LINEWIDTH = float(self.PLOT.SCALE_LINEWIDTH.get())
except: LINEWIDTH = None
#EG no parecefuncionarojo scale_bar from tools
toconsola("EG bar scale", wid=self.cons)
scale_bar(self.ax,proj=ccrs.PlateCarree(),
location=[self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
length=self.PLOT.SCALE_LENGTH.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
zorder=self.PLOT.SCALE_ZORDER.get(),
linewidth=LINEWIDTH)
#scale_bar(self.ax, self.PLOT.SCALE_LENGTH.get(), \
# [self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
# linewidth=LINEWIDTH)
'''EG To be implemented with Cartopy
print("EG PLOT.GEOMAP 2 drawmapscale")
self.m.drawmapscale(self.PLOT.SCALE_X.get(),
self.PLOT.SCALE_Y.get(),
self.PLOT.SCALE_XO.get(),
self.PLOT.SCALE_YO.get(),
length=self.PLOT.SCALE_LENGTH.get(),
units=self.PLOT.SCALE_UNITS.get(),
barstyle=self.PLOT.SCALE_STYLE.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
yoffset=YOFFSET,
labelstyle=self.PLOT.SCALE_LABELSTYLE.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fillcolor1=self.PLOT.SCALE_FILLCOLOR1.get(),
fillcolor2=self.PLOT.SCALE_FILLCOLOR2.get(),
format=self.PLOT.SCALE_FORMAT.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
linewidth=LINEWIDTH)
'''
# Time stamp
try:
self.time_stamp.remove()
except: pass
if len(self.DATE) > 0:
toconsola("EG Time stamp: len(self.DATE) > 0", wid=self.cons)
if self.PLOT.TIMESTAMP_SHOW.get():
toconsola("EG Time stamp: "+str(self.DATE[self.L.get()]), wid=self.cons)
font_weight = 'normal'
if self.PLOT.TIMESTAMP_BOLD.get(): font_weight = 'bold'
self.ax.annotate(str(self.DATE[self.L.get()]), \
xy=(self.PLOT.TIMESTAMP_X.get(), \
self.PLOT.TIMESTAMP_Y.get()), \
xycoords='figure fraction', \
color=self.PLOT.TIMESTAMP_COLOR.get(), \
fontsize=self.PLOT.TIMESTAMP_SIZE.get(), \
fontfamily=font_family, \
fontweight=font_weight, \
annotation_clip=False)
if self.PLOT.LOGO_DISPLAY.get() == 1: self.plot_logo()
self.ax.callbacks.connect('xlim_changed', self.on_xlims_change)
self.ax.callbacks.connect('ylim_changed', self.on_ylims_change)
if len(mrklines) > 0 and self.PLOT.LEGEND.SHOW.get():
toconsola("EG self.nmarker ?",wid=self.cons)
fontsize = self.PLOT.LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.LEGEND.FONTSIZE.get() < | |
<filename>__init__.py
#importazione dei pacchetti necessari
from flask import Flask, redirect, url_for, render_template, request, session, flash, send_file, send_from_directory
from datetime import timedelta, datetime
import pytz
from flask_sqlalchemy import SQLAlchemy
import time
import smtplib, ssl
from email.message import EmailMessage
from flask_mail import Mail, Message as MailMessage
from pathlib import Path
from fpdf import FPDF, HTMLMixin
#parametri di configurazione dell'app
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///db.sqlite3"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = '<EMAIL>'
app.config['MAIL_PASSWORD'] = '<PASSWORD>'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
#Classe/Tabella DB CLienti(cf*, nome, cognome, ntelefono, email, password)
class MyPDF(FPDF, HTMLMixin):
pass
class Clienti(db.Model):
cf = db.Column("cf", db.String(16), primary_key=True)
nome = db.Column("nome", db.String(30))
cognome = db.Column("cognome", db.String(30))
ntelefono = db.Column("ntelefono", db.String(10))
email = db.Column("email", db.String(100))
password = db.Column("password", db.String(20))
mecchina = db.relationship("Mezzi", backref="clienti", lazy=True)
def __init__(self, cf, nome, cognome, ntelefono, email, password):
self.cf = cf
self.nome = nome
self.cognome = cognome
self.ntelefono = ntelefono
self.email = email
self.password = password
#Classe/Tabella DB Mezzi(targa*, marca, modello, cilindrata, potenza, cfcliente->)
class Mezzi(db.Model):
targa = db.Column("targa", db.String(10), primary_key=True)
marca = db.Column("marca", db.String(30))
modello = db.Column("modello", db.String(30))
cilindrata = db.Column("cilindrata", db.String(10))
potenza = db.Column("potenza", db.String(10))
cfcliente = db.Column("cfcliente", db.String(20), db.ForeignKey('clienti.cf'), nullable=False)
riparazione = db.relationship("Riparazioni", backref="mezzi", lazy=True)
def __init__(self, targa, marca, modello, cilindrata, potenza, cfcliente):
self.targa = targa
self.marca = marca
self.modello = modello
self.cilindrata = cilindrata
self.potenza = potenza
self.cfcliente = cfcliente
#Classe/Tabella DB Riparazioni(id*, stato, inizio, fine, prezzo, descrizione, targamezzo->)
class Riparazioni(db.Model):
_id = db.Column("id", db.Integer, primary_key=True)
inizio = db.Column("inizio", db.String(30))
fine = db.Column("fine", db.String(30))
prezzo = db.Column("prezzo", db.String(10))
descrizione = db.Column("descrizione", db.String(50))
targamezzo = db.Column("targamezzo", db.String(20), db.ForeignKey('mezzi.targa'), nullable=False)
def __init__(self, inizio, fine, prezzo, descrizione, targamezzo):
self.inizio = inizio
self.fine = fine
self.prezzo = prezzo
self.descrizione = descrizione
self.targamezzo = targamezzo
def inviamail(ricevente):
msg = MailMessage('NoReply - Officina', sender='<EMAIL>', recipients=[ricevente.email])
msg.body = "Buongiorno, \n la riparazione sul mezzo: " + ricevente.marca + " " + ricevente.modello + ", a nome: " + ricevente.nome + " " + ricevente.cognome + " è terminata.\nPrezzo Finale: " + ricevente.prezzo + "\nSaluti, MechSite"
with app.open_resource("/var/www/webApp/webApp/static/blank.pdf") as fp:
msg.attach("Resoconto.pdf", "application/pdf", fp.read())
mail.send(msg)
return 1
def addlog(str):
tz = pytz.timezone("Europe/Rome")
stringa = "\n" + datetime.now().strftime("%m/%d/%Y, %H:%M:%S") + " " + str
file = open("/var/www/webApp/webApp/static/log.txt", "a")
file.write(stringa)
@app.route('/static/<path:filename>', methods=['GET', 'POST'])
def download(filename):
# Appending app path to upload folder path within app root folder
uploads = os.path.join(current_app.root_path, app.config['static'])
# Returning file from appended path
return send_from_directory(directory=uploads, filename=filename, as_attachment=False)
def generapdf(ricevente):
html = """
<html>
<head>
<meta charset="utf-8">
</head>
<body>
<center>
<p>Riparazione Conclusa</p>
<p>Buongiorno, <br>La informiamo della conclusione della riparazione sul mezzo """+ ricevente.marca +""" """ +ricevente.modello +""" a nome: """ + ricevente.nome + """ """ + ricevente.cognome+""" </p>
<br>
<p>Descrizione della riparazione: """ + ricevente.descrizione + """.</p>
<br>
<p>Informazioni sul mezzo:</p>
<br>
<table width="100%">
<tr width="100%">
<th width="25%">Veicolo</th>
<th width="25%">Targa</th>
<th width="25%">Cilindrata</th>
<th width="25%">Potenza</th>
</tr>
<tr width="100%">
<td width="25%">""" + ricevente.marca + """ """ + ricevente.modello + """</td>
<td width="25%">""" + ricevente.targa + """</td>
<td width="25%">""" + ricevente.cilindrata + """ cc</td>
<td width="25%">""" + ricevente.potenza + """ CV</td>
</tr>
</table>
<br>
<p>Prezzo Finale: """ + ricevente.prezzo + """ euro.</p>
<br>
<p>Saluti, MechSite.</p>
</center>
</body>
</html>
"""
pdf = MyPDF()
pdf.add_page()
pdf.write_html(html)
pdf.output("/var/www/webApp/webApp/static/blank.pdf", "F")
#pagina di login degli utenti
@app.route("/", methods=["GET", "POST"])
def accessoclienti():
frase=""
if request.method == "POST":
session.permanent = True
user = request.form["email"]
check_mail = Clienti.query.filter_by(email=user).first()
if check_mail:
session["cliente"] = user
return redirect(url_for("passwordclienti"))
else:
frase="Email non Trovata"
return render_template("accessoclienti.html", frase=frase)
else:
return render_template("accessoclienti.html", frase="")
@app.route("/passwordclienti", methods=["GET", "POST"])
def passwordclienti():
if "cliente" in session:
if request.method == "POST":
if "primapassword" in request.form:
cliente = Clienti.query.filter_by(email=session["cliente"]).first()
password = request.form["password"]
passwordrepeat = request.form["passwordrepeat"]
if password==passwordrepeat:
session.permanent = True
cliente.password=password
db.session.commit()
session["passcliente"]=password
return redirect(url_for("passwordclienti"))
else:
return render_template("passwordclienti.html", controllo=0, frase="Password non coincidono")
elif "accessopassword" in request.form:
cliente = Clienti.query.filter_by(email=session["cliente"]).first()
password = request.form["password"]
if cliente.password==password:
session["passcliente"]=password
addlog("Effettutato Accesso Cliente")
return redirect(url_for("riparazioniclienti"))
else:
return render_template("passwordclienti.html", controllo=1, frase="Password Errata")
else:
cliente = Clienti.query.filter_by(email=session["cliente"]).first()
if cliente.password == None:
return render_template("passwordclienti.html", controllo=0)
else:
return render_template("passwordclienti.html", controllo=1)
else:
return redirect(url_for("accessoclienti"))
#pagina che printa le riparazioni di un determinato utente
@app.route("/riparazioniclienti", methods=["GET", "POST"])
def riparazioniclienti():
if "cliente" in session and "passcliente" in session:
if request.method == "POST":
if request.form["scelta"]=="Tutte":
riparazioniincorso = Riparazioni.query.join(Mezzi, Riparazioni.targamezzo==Mezzi.targa).join(Clienti, Mezzi.cfcliente==Clienti.cf).add_columns(Riparazioni.inizio, Riparazioni.descrizione, Riparazioni.prezzo, Riparazioni.fine, Mezzi.marca, Mezzi.modello, Mezzi.targa).filter_by(email=session["cliente"]).all()
if (len(riparazioniincorso)==0):
return render_template("riparazioniclienti.html", controllo=0, frase="Nessun Record Trovato")
return render_template("riparazioniclienti.html", controllo=0, listariparazioni=riparazioniincorso, frase="Nessun Record Trovato")
elif request.form["scelta"]=="In corso":
riparazioniincorso = Riparazioni.query.join(Mezzi, Riparazioni.targamezzo==Mezzi.targa).join(Clienti, Mezzi.cfcliente==Clienti.cf).add_columns(Riparazioni.inizio, Riparazioni.descrizione, Riparazioni.prezzo, Riparazioni.fine, Mezzi.marca, Mezzi.modello, Mezzi.targa).filter_by(email=session["cliente"]).all()
if (len(riparazioniincorso)==0):
return render_template("riparazioniclienti.html", controllo=1, frase="Nessun Record Trovato")
return render_template("riparazioniclienti.html", controllo=1, listariparazioni=riparazioniincorso, frase="Nessun Record Trovato")
elif request.form["scelta"]=="Terminate":
riparazioniincorso = Riparazioni.query.join(Mezzi, Riparazioni.targamezzo==Mezzi.targa).join(Clienti, Mezzi.cfcliente==Clienti.cf).add_columns(Riparazioni.inizio, Riparazioni.descrizione, Riparazioni.prezzo, Riparazioni.fine, Mezzi.marca, Mezzi.modello, Mezzi.targa).filter_by(email=session["cliente"]).all()
if (len(riparazioniincorso)==0):
return render_template("riparazioniclienti.html", controllo=2, frase="Nessun Record Trovato")
return render_template("riparazioniclienti.html", controllo=2, listariparazioni=riparazioniincorso, frase="Nessun Record Trovato")
else:
return render_template("riparazioniclienti.html", frase="")
else:
return redirect(url_for("accessoclienti"))
@app.route("/mezziclienti")
def mezziclienti():
if "cliente" in session and "passcliente" in session:
mezzicliente = Mezzi.query.join(Clienti, Mezzi.cfcliente==Clienti.cf).add_columns(Mezzi.targa, Mezzi.marca, Mezzi.modello, Mezzi.cilindrata, Mezzi.potenza).filter_by(email=session["cliente"]).all()
if (len(mezzicliente)==0):
return render_template("mezziclienti.html", frase="Non hai nessun Mezzo registrato")
return render_template("mezziclienti.html", listamezzi=mezzicliente)
else:
return redirect(url_for("accessoclienti"))
@app.route("/profilo", methods=["GET", "POST"])
def profilocliente():
if "cliente" in session and "passcliente" in session:
if request.method == "POST":
return redirect(url_for("modificaprofilocliente"))
else:
clientein = Clienti.query.filter_by(email=session["cliente"]).first()
return render_template("profilocliente.html", cliente=clientein)
else:
return redirect(url_for("accessoclienti"))
@app.route("/modificaprofilo", methods=["GET", "POST"])
def modificaprofilocliente():
if "cliente" in session and "passcliente" in session:
if request.method == "POST":
frase=""
clientein = Clienti.query.filter_by(email=session["cliente"]).first()
email=request.form["email"]
ntelefono = request.form["ntelefono"]
if request.form["password"]==request.form["passwordrepeat"] and request.form["password"]!="":
password=request.form["password"]
clientein.password=password
frase += "Password modificata "
elif request.form["password"]!="":
frase="Le due password non coincidono"
if clientein.email != email:
clientein.email = email
frase += "Email modificata "
session["cliente"]=email
if clientein.ntelefono != ntelefono:
clientein.ntelefono = ntelefono
frase += "Numero di Telefono modificato"
db.session.commit()
clientein = Clienti.query.filter_by(email=session["cliente"]).first()
return render_template("modificaprofilocliente.html", cliente=clientein, frase=frase)
else:
clientein = Clienti.query.filter_by(email=session["cliente"]).first()
return render_template("modificaprofilocliente.html", cliente=clientein, frase="")
else:
return redirect(url_for("accessoclienti"))
@app.route("/logoutcliente")
def logoutcliente():
if "cliente" in session and "passcliente" in session:
session.pop("cliente", None)
session.pop("passcliente", None)
return redirect(url_for("accessoclienti"))
else:
return redirect(url_for("accessoclienti"))
#Pagina di accesso Meccanico/Utenti(Da completare)
@app.route("/meccanico", methods=["GET", "POST"])
def home():
password = "<PASSWORD>"
frase=""
if request.method == "POST":
session.permanent = True
user = request.form["n"]
if user==password:
session["pass"] = user
addlog("Eseguito Accesso Meccanico")
return redirect(url_for("riparazioni"))
else:
frase="Password Errata"
return render_template("index.html", frase=frase)
else:
return render_template("index.html", frase="")
#Pagina Riparazioni in corso...
@app.route("/riparazioni", methods=["GET", "POST"])
def riparazioni():
if "pass" in session:
frase=""
tz = pytz.timezone("Europe/Rome")
riparazioniincorso = Clienti.query.join(Mezzi, Clienti.cf==Mezzi.cfcliente).join(Riparazioni, Mezzi.targa==Riparazioni.targamezzo).add_columns(Riparazioni.inizio, Riparazioni.descrizione, Riparazioni.prezzo, Riparazioni._id, Mezzi.marca, Mezzi.modello, Clienti.nome, Clienti.cognome).filter_by(fine="in corso...").all()
if request.method == "POST":
#Cerca le riparazioni in corso per poi printarle nella pagina
ids = Riparazioni.query.filter_by(fine="in corso...").all()
for i in ids:
if str(i._id) in request.form:
riparazione = Riparazioni.query.filter_by(_id=i._id).first()
ricevente = Clienti.query.join(Mezzi, Clienti.cf==Mezzi.cfcliente).join(Riparazioni, Mezzi.targa==Riparazioni.targamezzo).add_columns( Riparazioni._id, Mezzi.marca, Mezzi.modello, Clienti.email, Clienti.nome, Clienti.cognome, Riparazioni.prezzo, Riparazioni.descrizione, Mezzi.cilindrata, Mezzi.potenza, Mezzi.targa).filter_by(_id=i._id).first()
riparazione.fine=datetime.now(tz)
db.session.commit()
addlog("Terminata una Riparazione")
generapdf(ricevente)
n = inviamail(ricevente)
break
riparazioniincorso = Clienti.query.join(Mezzi, Clienti.cf==Mezzi.cfcliente).join(Riparazioni, Mezzi.targa==Riparazioni.targamezzo).add_columns(Riparazioni.inizio, Riparazioni.descrizione, Riparazioni.prezzo, Riparazioni._id, Mezzi.marca, Mezzi.modello, Clienti.nome, Clienti.cognome).filter_by(fine="in corso...").all()
return render_template("riparazioni.html", listariparazioniincorso=riparazioniincorso, frase="")
else:
if len(riparazioniincorso)==0:
frase = "Nessuna riparazione in corso"
return render_template("riparazioni.html", listariparazioniincorso=riparazioniincorso, frase=frase)
else:
return redirect(url_for("home"))
#Pagina Storico Database
@app.route("/storico", methods=["GET", "POST"])
def storico():
if "pass" in session:
if request.method=="POST":
scelta = request.form["scelta"]
if scelta=="CF":
#ricerca Mezzi e Riparazioni associate al CF inserito
cfin = request.form["ricerca"]
lista=Mezzi.query.filter_by(cfcliente=cfin).all()
lista2 = Riparazioni.query.join(Mezzi, Riparazioni.targamezzo==Mezzi.targa).join(Clienti, Mezzi.cfcliente==Clienti.cf).filter_by(cf=cfin).all()
if (len(lista)==0 and len(lista2)==0):
return render_template("storico.html", controllo=1, listariparazioni=lista2, listamacchine=lista, frase="Nessun Record Trovato")
else:
return render_template("storico.html", controllo=1, listariparazioni=lista2, listamacchine=lista, frase="")
elif scelta=="Targa":
#ricerca Riparazioni e Proprietario associati alla Targa inserita
targain=request.form["ricerca"]
lista=Clienti.query.join(Mezzi, Clienti.cf==Mezzi.cfcliente).filter_by(targa=targain).all()
lista2 = Riparazioni.query.filter_by(targamezzo=targain).all()
if (len(lista)==0 and len(lista2)==0):
return render_template("storico.html", controllo=2, listariparazioni=lista2, listaproprietari=lista, frase="Nessun Record Trovato")
else:
return render_template("storico.html", controllo=2, listariparazioni=lista2, listaproprietari=lista, frase="")
else:
return render_template("storico.html", frase="Nessun Record Trovato")
else:
#se non viene selezionata un'opzione vengono printati tutte le riparazione, clienti, mezzi, no andrebbe nell'else qua sopra, gg alde
lista1 = Clienti.query.all()
lista2 = Mezzi.query.all()
lista3 = Riparazioni.query.all()
if len(lista1)==0 or len(lista1)==0 and len(lista2)==0 and len(lista3)==0:
return render_template("storico.html", controllo=0, listariparazioni=lista3, listamezzi=lista2, listaclienti=lista1, frase="Nessun Record Trovato")
return render_template("storico.html", controllo=0, listariparazioni=lista3, listamezzi=lista2, listaclienti=lista1, frase="")
else:
return redirect(url_for("home"))
#Pagina Gestionale del Database
@app.route("/gestionale", methods=["GET", "POST"])
def gestionale():
if "pass" in session:
tz = pytz.timezone("Europe/Rome")
if request.method=="POST":
frase=""
if "scelta" in request.form:
frase=""
scelta = request.form["scelta"]
#In base alla scelta viene printato una tabella differente
if scelta=="Riparazioni":
lista = Riparazioni.query.all()
if len(lista)==0:
frase="Nessuna Riparazione Trovata"
return render_template("gestionale.html", controllo=0, listariparazioni=lista, frase=frase)
elif scelta=="Mezzi":
lista = Mezzi.query.all()
if len(lista)==0:
frase="Nessun Mezzo Trovato"
return render_template("gestionale.html", controllo=1, listamezzi=lista, frase=frase)
elif scelta=="Clienti":
lista = Clienti.query.all()
if len(lista)==0:
frase="Nessun Cliente Trovato"
return render_template("gestionale.html", controllo=2, listaclienti=lista, frase=frase)
elif scelta=="Completo":
lista = Riparazioni.query.all()
if len(lista)==0:
frase="Nessuna Riparazione Trovata"
lista1 | |
an alias.
#
# item_names is the set of final names or aliases for each item in the SELECT list.
item_names = set()
for item in select_items:
if not item.alias and not item.val_expr.is_col:
continue
if item.name in item_names:
item.alias = '*CONFLICT*'
else:
item_names.add(item.name)
# base_item_name_counts stores the number of conflicts that occurred for a name,
# and hence the number of name to skip forward to create a non-conflicting name.
base_item_name_counts = defaultdict(int)
for item in select_items:
if item.alias == '*CONFLICT*' or (not item.val_expr.is_col and not item.alias):
# Use names close to the Impala functional test database so that bugs in
# resolution will be more likely to surface.
alias = base_alias = '%s_col' % item.type.__name__.lower()
while alias in item_names:
base_item_name_counts[base_alias] += 1
alias = base_alias + '_' + str(base_item_name_counts[base_alias])
item.alias = alias
item_names.add(alias)
return SelectClause(select_items)
def _create_basic_select_item(self, table_exprs, return_type):
max_children = self.profile.choose_nested_expr_count()
if max_children:
value = self.create_func_tree(return_type)
value = self.populate_func_with_vals(value, table_exprs)
elif return_type in table_exprs.col_types:
value = self.profile.choose_val_expr(table_exprs.cols_by_type[return_type])
else:
value = self.profile.choose_constant(return_type)
return SelectItem(value)
def create_func_tree(self, return_type, allow_subquery=False):
'''Returns an instance of a basic function that has all of it's arguments either set
to None or another instance of a function that has it's arguments set likewise. The
caller should replace the None values with column references or constants as
desired. The depth of the tree is determined by the query profile (self.profile).
'''
signatures = self._funcs_to_allowed_signatures(FUNCS)
root_signatures = self._find_matching_signatures(
signatures, returns=return_type, allow_subquery=allow_subquery)
root_signature = self.profile.choose_func_signature(root_signatures)
func = root_signature.func(root_signature) # An instance of a function
max_children = self.profile.choose_nested_expr_count()
if max_children:
# Impala does not allow functions that contain subqueries to have arguments
# that contain subqueries. Ex: ... WHERE (int_col IN (SELECT 1)) IN (SELECT TRUE)
subquery_allowed_null_args = list()
subquery_not_allowed_null_args = list()
if func.contains_subquery:
null_args = subquery_not_allowed_null_args
else:
null_args = subquery_allowed_null_args
null_args.extend((func, idx) for idx, arg in enumerate(func.args)
if type(arg) != list and arg.val is None)
while max_children \
and (subquery_allowed_null_args or subquery_not_allowed_null_args):
idx = randrange(
len(subquery_allowed_null_args) + len(subquery_not_allowed_null_args))
if idx < len(subquery_allowed_null_args):
null_args = subquery_allowed_null_args
else:
null_args = subquery_not_allowed_null_args
shuffle(null_args)
parent_func, parent_arg_idx = null_args.pop()
child_signatures = self._find_matching_signatures(
signatures,
returns=parent_func.args[parent_arg_idx].type,
allow_subquery=(allow_subquery and null_args == subquery_allowed_null_args))
child_signature = self.profile.choose_func_signature(child_signatures)
child_func = child_signature.func(child_signature)
parent_func.args[parent_arg_idx] = child_func
if child_func.contains_subquery:
null_args = subquery_not_allowed_null_args
else:
null_args = subquery_allowed_null_args
null_args.extend((child_func, idx) for idx, arg in enumerate(child_func.args)
if type(arg) != list and arg.val is None)
max_children -= 1
return func
def _funcs_to_allowed_signatures(self, funcs):
'''Return a list of the signatures contained in "funcs" that are eligible for use
based on the query profile.
'''
return [signature for func in funcs for signature in func.signatures()
if self.profile.allow_func_signature(signature)]
def _find_matching_signatures(self,
signatures,
returns=None,
accepts=None,
accepts_only=None,
allow_subquery=False):
'''Returns the subset of signatures matching the given criteria.
returns: The signature must return this type.
accepts: The signature must have at least one argument of this type.
accepts_only: The signature must have arguments of only this type.
allow_subquery: If False, the signature cannot contain a subquery.
'''
matching_signatures = list()
for signature in signatures:
if returns and not issubclass(signature.return_type, returns):
continue
if accepts and not any(not arg.is_subquery and issubclass(arg.type, accepts)
for arg in signature.args):
continue
if accepts_only and (not signature.args or any(
not arg.is_subquery and not issubclass(arg.type, accepts_only)
for arg in signature.args)):
continue
if not allow_subquery and any(arg.is_subquery for arg in signature.args):
continue
matching_signatures.append(signature)
return matching_signatures
def populate_func_with_vals(self,
func,
table_exprs=TableExprList(),
val_exprs=ValExprList(),
table_alias_prefix='',
allow_subquery=False,
_allow_table_exprs=None):
if not _allow_table_exprs and func.is_agg:
_allow_table_exprs = True
elif _allow_table_exprs is None and func.contains_agg:
_allow_table_exprs = False
elif _allow_table_exprs is None:
_allow_table_exprs = True
# If a function's return type depends on some of its args then at least one of those
# args must not be the NULL literal. Example: IF(false, NULL, NULL) is considered
# invalid because the return type cannot be determined.
has_non_null_literal_arg = False
for idx, arg in enumerate(func.args):
signature_arg = func.signature.args[idx]
if signature_arg.is_subquery \
or (allow_subquery \
and self.allow_more_nested_queries \
and self.profile.use_scalar_subquery()):
usage = self.profile.choose_subquery_predicate_category(
func.name(),
self.current_query.from_clause.table_exprs.joinable_cols_by_type)
if usage is not None \
and self.allow_more_nested_queries \
and (usage[1] == 'UNCORRELATED'
or self.current_query.from_clause.table_exprs.joinable_cols_by_type):
use_scalar_subquery = (usage[0] == 'Scalar')
use_agg_subquery = (usage[1] == 'AGG')
use_correlated_subquery = (usage[2] == 'CORRELATED')
if use_correlated_subquery:
# TODO: Sometimes this causes an exception because the list is empty
join_expr_type = self.profile.choose_type(list(
self.current_query.from_clause.table_exprs.joinable_cols_by_type))
else:
join_expr_type = None
select_item_data_types = \
[signature_arg.type] if use_scalar_subquery else signature_arg.type
query = self.generate_statement(
table_exprs,
select_item_data_types=select_item_data_types,
required_table_expr_col_type=join_expr_type,
require_aggregate=use_agg_subquery,
# Don't use UNION + LIMIT; IMPALA-1379
allow_union_clause=(not signature_arg.is_subquery),
table_alias_prefix=(table_alias_prefix +
('t' if use_correlated_subquery else '')),
allow_with_clause=self.profile.use_nested_with())
if use_scalar_subquery and not use_agg_subquery:
# Impala will assume the query will return more than one row unless a LIMIT 1
# is added. An ORDER BY will also be added under the assumption that we want
# deterministic results.
query.order_by_clause = OrderByClause([Int(1)])
query.limit_clause = LimitClause(Int(1))
if use_correlated_subquery:
outer_table_expr = choice(
self.current_query.from_clause.table_exprs.by_col_type[join_expr_type])
correlation_condition = self._create_relational_join_condition(
outer_table_expr,
query.from_clause.table_exprs.by_col_type[join_expr_type])
if query.where_clause:
query.where_clause.boolean_expr = And.create_from_args(
query.where_clause.boolean_expr, correlation_condition)
else:
query.where_clause = WhereClause(correlation_condition)
func.args[idx] = Subquery(query)
else:
replacement_func = self.create_func_tree(func.type)
return self.populate_func_with_vals(
replacement_func,
table_exprs=table_exprs,
val_exprs=val_exprs,
table_alias_prefix=table_alias_prefix,
allow_subquery=allow_subquery,
_allow_table_exprs=_allow_table_exprs)
else:
if arg.is_constant and arg.val is None:
candidate_val_exprs = ValExprList()
if val_exprs:
candidate_val_exprs.extend(val_exprs.by_type[arg.type])
if _allow_table_exprs:
candidate_val_exprs.extend(table_exprs.cols_by_type[arg.type])
if candidate_val_exprs:
val = self.profile.choose_val_expr(candidate_val_exprs)
else:
val = self.profile.choose_constant(
return_type=arg.type,
allow_null=(signature_arg.can_be_null \
and signature_arg.can_be_null_literal \
and (has_non_null_literal_arg \
or not signature_arg.determines_signature)))
func.args[idx] = val
arg = val
elif arg.is_func:
func.args[idx] = self.populate_func_with_vals(
arg,
table_exprs=table_exprs,
val_exprs=val_exprs,
_allow_table_exprs=_allow_table_exprs)
if not signature_arg.can_be_null and not arg.is_constant:
val = self.profile.choose_constant(return_type=arg.type, allow_null=False)
func.args[idx] = Coalesce.create_from_args(arg, val)
if not arg.is_constant or arg.val is not None:
has_non_null_literal_arg = True
return func
def _create_agg_select_item(self, table_exprs, basic_select_item_exprs, return_type):
value = self._create_agg_func_tree(return_type)
value = self.populate_func_with_vals(value, table_exprs, basic_select_item_exprs)
return SelectItem(value)
def _create_agg_func_tree(self, return_type):
return self._create_agg_or_analytic_tree(return_type, agg_funcs=AGG_FUNCS)
def _create_agg_or_analytic_tree(self, return_type, agg_funcs=[], analytic_funcs=[],
basic_funcs=FUNCS):
'''Returns an instance of a function that is guaranteed to either be or contain an
aggregate or analytic function. The arguments of the returned function will either
be None or an instance of a function as in create_func_tree.
The chosen aggregate or analytic functions will be restricted to the list of
functions in agg_funcs and analytic_funcs.
If analytic_funcs is non-empty the returned function will be guaranteed to
be an analytic or contain at least on analytic function.
return_type must be set and refers to the data type of the function output.
agg_funcs and analytic_funcs should be used to determine the class of the
returned function. The caller is responsible for restricting the return_type
to types that can be generated by permutations of available functions. If the max
nested expr count in the query profile is at least one, then any return_type
should be possible to generate but this is not guaranteed.
basic_funcs is a list of allowed basic functions that will be used in the function
tree. By default, all basic funcs defined in funcs.py will be allowed. A basic
function is any non-aggregate, non-analytic function.
'''
# The creation of aggregate and analytic functions is so similar that they are
# combined here. "basic" function creation is much simpler so that is kept separate.
# What's going to happen is there will be essentially two important data structures:
#
# 1) A tree of functions, the root of which will be returned. The leaves of the
# tree are "place holders" which are actually instances of a concrete DataType,
# such as Int, with a value of None. In other words, the arguments to all
# functions are either other functions or SQL NULL.
#
# 2) A mapping to place holders from the type of function that they may be replaced
# with. The actual data structure is
# dict<func class> -> list<tuple<tree node, index of place holder in tree node>>
# where "func class" is one of "AggFunc", "AnalyticFunc" or "Func".
#
# This means once a child function is generated, a spot where it can be placed into
# the tree can easily be identified. Although the work is actually done in reverse
# order, a place holder is chosen, | |
<reponame>helenacuesta/multif0-estimation-polyvocals
import os
import glob
import json
import csv
import ast
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy
import utils
import pescador
import mir_eval
import keras.backend as K
''' TRAINING UTIL FUNCTIONS
Some of the functions in this file are taken/adapted from deepsalience.
'''
RANDOM_STATE = 42
def patch_size():
"""Patch size used by all models for training
"""
return (360, 50)
def experiment_output_path():
return "/scratch/hc2945/data/experiment_output"
def data_path_multif0():
"""Data path for complete mulif0 data
"""
return "/scratch/hc2945/data/audiomixtures"
def track_id_list():
"""List of tracks of the datasets
"""
metadata_path = '/scratch/hc2945/data/audiomixtures/mtracks_info.json'
data = utils.load_json_data(metadata_path)
mtracks = list(
data.keys()
)
return mtracks
def keras_loss():
"""Loss function used by all models
"""
return bkld
def keras_metrics():
"""Metrics used by all models
"""
return ['mse', soft_binary_accuracy]
def bkld(y_true, y_pred):
"""Brian's KL Divergence implementation
"""
y_true = K.clip(y_true, K.epsilon(), 1.0 - K.epsilon())
y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
return K.mean(K.mean(
-1.0*y_true* K.log(y_pred) - (1.0 - y_true) * K.log(1.0 - y_pred),
axis=-1), axis=-1)
def soft_binary_accuracy(y_true, y_pred):
"""Binary accuracy that works when inputs are probabilities
"""
return K.mean(K.mean(
K.equal(K.round(y_true), K.round(y_pred)), axis=-1), axis=-1)
def keras_generator(data_list, input_patch_size, batch_size=16, active_str=200, muxrate=20):
"""Generator to be passed to a keras model
"""
streams = []
for fpath_in, fpath_out in data_list:
print("Data list shape is {}".format(len(data_list)))
streams.append(
pescador.Streamer(
patch_generator, fpath_in, fpath_out,
input_patch_size=input_patch_size
)
)
stream_mux = pescador.StochasticMux(streams, active_str, rate=muxrate, mode='with_replacement', random_state=RANDOM_STATE)
batch_generator = pescador.buffer_stream(stream_mux, batch_size)
for batch in batch_generator:
print("\n Batch length: ".format(len(batch['X1'])))
yield [batch['X1'], batch['X2']], batch['Y']
def keras_generator_mag(data_list, input_patch_size, batch_size=16, active_str=200, muxrate=20):
"""Generator to be passed to a keras model
"""
streams = []
for fpath_in, fpath_out in data_list:
print("Data list shape is {}".format(len(data_list)))
streams.append(
pescador.Streamer(
patch_generator_mag, fpath_in, fpath_out,
input_patch_size=input_patch_size
)
)
stream_mux = pescador.StochasticMux(streams, active_str, rate=muxrate, mode='with_replacement', random_state=RANDOM_STATE)
batch_generator = pescador.buffer_stream(stream_mux, batch_size)
for batch in batch_generator:
print("\n Batch length: ".format(len(batch['X1'])))
yield batch['X1'], batch['Y']
def grab_patch_output(f, t, n_f, n_t, y_data):
"""Get a time-frequency patch from an output file
"""
return y_data[f: f + n_f, t: t + n_t][np.newaxis, :, :]
def grab_patch_input(f, t, n_f, n_t, x_data_1, x_data_2):
"""Get a time-frequency patch from an input file
"""
return np.transpose(
x_data_1[:, f: f + n_f, t: t + n_t], (1, 2, 0)
)[np.newaxis, :, :, :], np.transpose(
x_data_2[:, f: f + n_f, t: t + n_t], (1, 2, 0)
)[np.newaxis, :, :, :]
def grab_patch_input_mag(f, t, n_f, n_t, x_data_1):
"""Get a time-frequency patch from an input file
"""
return np.transpose(
x_data_1[:, f: f + n_f, t: t + n_t], (1, 2, 0)
)[np.newaxis, :, :, :]
def patch_generator(fpath_in, fpath_out, input_patch_size):
"""Generator that yields an infinite number of patches
for a single input, output pair
"""
try:
data_in_1 = np.load(fpath_in, allow_pickle=True).item()['dphase/mag'][0]
data_in_2 = np.load(fpath_in, allow_pickle=True).item()['dphase/dphase'][0]
data_out = np.load(fpath_out, allow_pickle=True)
data_in_1 = np.transpose(data_in_1, (2, 1, 0))
data_in_2 = np.transpose(data_in_2, (2, 1, 0))
_, _, n_times = data_in_1.shape
n_f, n_t = input_patch_size
t_vals = np.arange(0, n_times - n_t)
np.random.shuffle(t_vals)
for t in t_vals:
f = 0
#t = np.random.randint(0, n_times - n_t)
x1, x2 = grab_patch_input(
f, t, n_f, n_t, data_in_1, data_in_2
)
y = grab_patch_output(
f, t, n_f, n_t, data_out
)
#print(x1.shape, x2.shape, y.shape)
yield dict(X1=x1[0], X2=x2[0], Y=y[0])
except:
pass
def patch_generator_mag(fpath_in, fpath_out, input_patch_size):
"""Generator that yields an infinite number of patches
for a single input, output pair
"""
try:
data_in_1 = np.load(fpath_in, allow_pickle=True).item()['dphase/mag'][0]
data_out = np.load(fpath_out, allow_pickle=True)
data_in_1 = np.transpose(data_in_1, (2, 1, 0))
_, _, n_times = data_in_1.shape
n_f, n_t = input_patch_size
t_vals = np.arange(0, n_times - n_t)
np.random.shuffle(t_vals)
for t in t_vals:
f = 0
#t = np.random.randint(0, n_times - n_t)
x1 = grab_patch_input_mag(
f, t, n_f, n_t, data_in_1)
y = grab_patch_output(
f, t, n_f, n_t, data_out
)
#print(x1.shape, x2.shape, y.shape)
yield dict(X1=x1[0], Y=y[0])
except:
pass
def get_paths(save_dir, save_key):
save_path = os.path.join(save_dir, save_key)
if not os.path.exists(save_path):
os.mkdir(save_path)
model_save_path = os.path.join(save_path, "{}.pkl".format(save_key))
plot_save_path = os.path.join(save_path, "{}_loss.pdf".format(save_key))
model_scores_path = os.path.join(
save_path, "{}_model_scores.csv".format(save_key))
scores_path = os.path.join(save_path, "{}_scores.csv".format(save_key))
score_summary_path = os.path.join(
save_path, "{}_score_summary.csv".format(save_key))
return (save_path, model_save_path, plot_save_path,
model_scores_path, scores_path, score_summary_path)
def get_file_paths(mtrack_list, data_path):
"""Get the absolute paths to input/output pairs for
a list of multitracks given a data path
"""
file_paths = []
for track_id in mtrack_list:
input_path = glob.glob(
os.path.join(data_path, 'inputs', "{}*_input.npy".format(track_id[:-4]))
)
output_path = glob.glob(
os.path.join(
data_path, 'outputs', "{}*_output.npy".format(track_id[:-4])
)
)
if len(input_path) == 1 and len(output_path) == 1:
input_path = input_path[0]
output_path = output_path[0]
file_paths.append((input_path, output_path))
return file_paths
def plot_metrics_epochs(history, plot_save_path):
"""create and save plot of loss and metrics across epochs
"""
plt.figure(figsize=(15, 15))
plt.subplot(3, 1, 1)
plt.plot(history.history['mean_squared_error'])
plt.plot(history.history['val_mean_squared_error'])
plt.title('mean squared error')
plt.ylabel('mean squared error')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper left')
plt.subplot(3, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper left')
plt.subplot(3, 1, 3)
plt.plot(history.history['soft_binary_accuracy'])
plt.plot(history.history['val_soft_binary_accuracy'])
plt.title('soft_binary_accuracy')
plt.ylabel('soft_binary_accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper left')
plt.savefig(plot_save_path, format='pdf')
plt.close()
def create_data_split(mtrack_dict, output_path):
mtracks = mtrack_dict.keys()
all_tracks = [
m for m in mtracks
]
Ntracks = len(all_tracks)
train_perc = 0.75
validation_perc = 0.1
test_perc = 1 - train_perc - validation_perc
# consider doing the training taking into account the songs
# maybe leaving one song out for evaluation
mtracks_randomized = np.random.permutation(all_tracks)
train_set = mtracks_randomized[:int(train_perc * Ntracks)]
validation_set = mtracks_randomized[int(train_perc * Ntracks):int(train_perc * Ntracks) + int(validation_perc * Ntracks)]
test_set = mtracks_randomized[int(train_perc * Ntracks) + int(validation_perc * Ntracks):]
data_splits = {
'train': list(train_set),
'validate': list(validation_set),
'test': list(test_set)
}
with open(output_path, 'w') as fhandle:
fhandle.write(json.dumps(data_splits, indent=2))
def get_model_metrics(data_object, model, model_scores_path):
"""Get model loss and metrics on train, validation and test generators
"""
train_generator = data_object.get_train_generator()
validation_generator = data_object.get_validation_generator()
test_generator = data_object.get_test_generator()
train_eval = model.evaluate_generator(
train_generator, 1000, max_q_size=10
)
valid_eval = model.evaluate_generator(
validation_generator, 1000, max_q_size=10
)
test_eval = model.evaluate_generator(
test_generator, 1000, max_q_size=10
)
df = pd.DataFrame(
[train_eval, valid_eval, test_eval],
index=['train', 'validation', 'test']
)
print(df)
df.to_csv(model_scores_path)
def get_single_test_prediction_phase_free(model, npy_file=None, audio_file=None):
"""Generate output from a model given an input numpy file
"""
if npy_file is not None:
input_hcqt = np.load(npy_file, allow_pickle=True).item()['dphase/mag'][0]
input_dphase = np.load(npy_file, allow_pickle=True).item()['dphase/dphase'][0]
elif audio_file is not None:
# should not be the case
pump = utils.create_pump_object()
features = utils.compute_pump_features(pump, audio_file)
input_hcqt = features['dphase/mag'][0]
input_dphase = features['dphase/dphase'][0]
# replace phase info by zeros
dim_phase = input_dphase.shape
input_dphase = np.zeros(dim_phase)
print(" >> Phase replaced by zeros!")
else:
raise ValueError("one of npy_file or audio_file must be specified")
input_hcqt = input_hcqt.transpose(1, 2, 0)[np.newaxis, :, :, :]
input_dphase = input_dphase.transpose(1, 2, 0)[np.newaxis, :, :, :]
n_t = input_hcqt.shape[2]
t_slices = list(np.arange(0, n_t, 5000))
output_list = []
# we need two inputs
for t in t_slices:
p = model.predict([np.transpose(input_hcqt[:, :, t:t+5000, :], (0, 1, 3, 2)),
np.transpose(input_dphase[:, :, t:t+5000, :], (0, 1, 3, 2))]
)[0, :, :]
output_list.append(p)
predicted_output = np.hstack(output_list)
return predicted_output, input_hcqt, input_dphase
def get_single_test_prediction(model, npy_file=None, audio_file=None):
"""Generate output from a model given an input numpy file
"""
if npy_file is not None:
input_hcqt = np.load(npy_file, allow_pickle=True).item()['dphase/mag'][0]
input_dphase = np.load(npy_file, allow_pickle=True).item()['dphase/dphase'][0]
elif audio_file is not None:
# should not be the case
pump = utils.create_pump_object()
features = utils.compute_pump_features(pump, audio_file)
input_hcqt = features['dphase/mag'][0]
input_dphase = features['dphase/dphase'][0]
else:
raise ValueError("one of npy_file or audio_file must be specified")
input_hcqt = input_hcqt.transpose(1, 2, 0)[np.newaxis, :, :, :]
input_dphase = input_dphase.transpose(1, 2, 0)[np.newaxis, :, :, :]
n_t = input_hcqt.shape[2]
t_slices = list(np.arange(0, n_t, 5000))
output_list = []
# we need two inputs
for t in t_slices:
p = model.predict([np.transpose(input_hcqt[:, :, t:t+5000, :], (0, 1, 3, 2)),
np.transpose(input_dphase[:, :, t:t+5000, :], (0, 1, 3, 2))]
)[0, :, :]
output_list.append(p)
predicted_output = np.hstack(output_list)
return predicted_output, input_hcqt, input_dphase
def pitch_activations_to_mf0(pitch_activation_mat, thresh):
"""Convert a pitch activation map to multif0 by thresholding peak values
at thresh
"""
freqs = utils.get_freq_grid()
times = utils.get_time_grid(pitch_activation_mat.shape[1])
peak_thresh_mat = np.zeros(pitch_activation_mat.shape)
peaks = scipy.signal.argrelmax(pitch_activation_mat, axis=0)
peak_thresh_mat[peaks] = pitch_activation_mat[peaks]
idx = np.where(peak_thresh_mat >= thresh)
est_freqs = [[] for _ in range(len(times))]
for f, t in zip(idx[0], idx[1]):
est_freqs[t].append(freqs[f])
est_freqs = [np.array(lst) for lst in est_freqs]
return times, est_freqs
def load_broken_mf0(annotpath):
'''Equivalent function to load_ragged_time_series in mir_eval for bad-formatted csv files
'''
times = []
freqs = []
with open(annotpath, 'r') as f:
reader = csv.reader(f)
for line in reader:
times.append(float(line[0]))
fqs = ast.literal_eval(line[1])
freqs.append(np.array(fqs))
times = np.array(times)
# get rid of zeros for input to mir_eval
for i, (tms, fqs) in enumerate(zip(times, freqs)):
if any(fqs == 0):
freqs[i] = np.array([f for f in fqs if f > 0])
return times, freqs
def test_path():
"""top | |
import numpy as np
import pandas as pd
class ConflictManager:
"""In charge of calculating the conflict meassurements, and all the related dataframes
with intermediate steps.
Attributes:
all_content (pd.DataFrame): All content as per received through the Wikiwho Actions API
conflicts (pd.DataFrame): The actions that have conflicts
elegible (pd.DataFrame): Only some tokens are elegible to possible have conflicts, the
dataframe contains all the actions of those elegible tokens
elegible_actions (pd.DataFrame): Only the actions that are elegible to have conflicts
revisions (pd.DataFrame): Revisions as per received through the Wikiwho Actions API
"""
def __init__(self, all_content, revisions,lng, include_stopwords=False):
self.all_content = all_content
self.revisions = self.prepare_revisions(revisions)
self.include_stopwords = include_stopwords
self.lng = lng
def calculate(self):
print('Preparing elegible token actions')
elegible = self.get_elegible()
print('Merge elegible actions and revisions')
elegible = self.merge_actions_and_revisions(
elegible, self.revisions)
print('Get the conflicts')
self.__conflicts = self.__get_conflicts(elegible)
print('Calculate time differences of undos')
elegible = self.__calculate_time_diffs(elegible)
print('Get elegible_actions')
self.__elegible_actions = self.__get_elegible_actions(elegible)
print('Calculate the token conflict')
self.elegible = self.calculate_token_conflict_score(
elegible, self.__conflicts)
self.conflicts = self.elegible[self.__conflicts]
self.elegible_actions = self.elegible[self.__elegible_actions]
self.all_actions = self.__get_all_actions()
if self.include_stopwords:
self.get_source_dict()
return self.elegible
def get_conflicting_actions(self, editor):
return self.elegible[self.__conflicts.shift(-1) & (
self.elegible.shift(-1)['editor'] == editor)]
def prepare_revisions(self, revisions):
revisions = revisions.rename(columns={'o_editor': 'editor'})
revisions['rev_time'] = pd.to_datetime(revisions['rev_time'])
return revisions
def __get_all_actions(self):
all_actions = self.fill_first_insertion(self.all_content)
if not self.include_stopwords:
all_actions = self.remove_stopwords(all_actions)
all_actions = self.wide_to_long(all_actions)
all_actions = all_actions[all_actions['rev_id'] != -1]
return self.merge_actions_and_revisions(all_actions, self.revisions)
def get_elegible(self):
# by not adding the first revisions (i.e. it remains -1), the merge won't succeed;
# therefore the time differences of the first output will be NaN and not taken as
# an elegible action. The first deletion is never considered as a conflict, therefore
# it is not elegible.
# elegible = self.fill_first_insertion(self.all_content)
elegible = self.remove_unique_rows(self.all_content)
if not self.include_stopwords:
elegible = self.remove_stopwords(elegible)
elegible = self.wide_to_long(elegible)
return elegible
def fill_first_insertion(self, actions):
"""The 'in' column only contains reinsertions, the first insertion is indicated
with -1. Nevertheless, the first insertion of the token is equal to the original
revision id, so here the -1s are replaced by the original revision id"""
actions.loc[actions['in'] == -1,
'in'] = actions.loc[actions['in'] == -1, 'o_rev_id']
return actions
def remove_unique_rows(self, actions):
""" A token that just have one row will nor cause any conflict neither the insertions
or deletions can be undos, so they are removed. In order for a conflict to exist,
there should be at least three actions, and tokens with on row only have maximum two:
the first insertion and a possible deletion.
"""
return actions[actions.duplicated(subset=['token_id'], keep=False)]
def remove_stopwords(self, actions):
"""Open a list of stop words and remove from the dataframe the tokens that
belong to this list.
"""
if self.lng == 'en':
stopwords_fn='data/stopword_list.txt'
elif self.lng == 'de':
stopwords_fn='data/stopword_list_de.txt'
else:
stopwords_fn='data/stopword_list.txt'
stop_words = open(stopwords_fn, 'r').read().split()
return actions[~actions['token'].isin(stop_words)]
def wide_to_long(self, actions):
""" Each row in the actions data frame has an in and out column, i.e. two actions.
This method transforms those two columns in two rows. The new dataframe will contain
a column `action` that indicates if it is an `in` or an `out`, and a column `rev_id`
that contains the revision id in which it happens (the revision ids were the values
orginally present in the `in` and `out` columns)
"""
return pd.wide_to_long(
actions.rename(columns={
'in': 'rev_id_in',
'out': 'rev_id_out'
}).reset_index(),
'rev_id', 'index', 'action', sep='_', suffix='.+').reset_index(
).drop(columns='index').sort_values('token_id')
def merge_actions_and_revisions(self, actions, revisions):
""" Here the actions are merged with the revisions so that we have information about
the time and the editor that executed the action in the token. This also returns the
data sorted by token_id and rev_time, so it can be used to calculate time differences.
"""
return pd.merge(actions, revisions[['rev_time', 'rev_id', 'editor']],
how='left', on='rev_id').sort_values(['token_id', 'rev_time'])
def __calculate_time_diffs(self, elegible_actions):
df = elegible_actions
# first calculate the times for all cases. This will produce some errors because
# the shift is not aware of the tokens (revision times should belong to the same
# token). This errors are removed in the next lines
# changed: instead of shifting by 2, shifting by 1
df['time_diff'] = df['rev_time'] - df.shift(1)['rev_time']
# the errors are produced in the first two actions (first insertion and deletion) of
# each token. The first insertion and deletion are guaranteed to exist because duplicates
# were removed.
to_delete = (
#First row of each token
(df['o_rev_id'] == df['rev_id']))
#Second row of each token
#(df.shift(1)['o_rev_id'] == df.shift(1)['rev_id']))
# delete but keep the row
df.loc[to_delete, 'time_diff'] = np.nan
# For testing the above
#if False:
#this line is equivalent and clearer to the above 3 but much
#slower)
#df['time_diff2'] = df.groupby('token_id').apply(
#lambda group: group['rev_time'] - group.shift(2)['rev_time']).values
#this is for testing the two methods
#if (df['time_diff'].fillna(-1) == df['time_diff2'].fillna(-1)).all():
#print('Group by is equivalent to flat operations')
return df
def __get_conflicts(self, df):
""" This return a selector (boolean vector) of the actions that classify as conflicts, i.e.
1. insertion-deletion-insertion of the same token, where the editor is the same for the
insertions but different from the deletions.
2. delection-insertion-deletion of the same token, where the editor is the same for the
deletions but different from the insertions.
"""
# what it should be:
# the token is the same as the previous
# out editor is different from in or vice versa
# changed: we do not consider a conflict only those actions, where the revision is made
#by the same user or the first insertion.
return ((df['token_id'] == df.shift(1)['token_id']) &
(df['editor'] != df.shift(1)['editor']))
def __get_elegible_actions(self, df):
""" Since the difference of time is calculated based on the 2nd previous row
(because we are looking to undos in the form of insertion-delection-insertion or
deletion-insertion-deletion), this means that the first two action per tokens are
expected to be NaN (as the 2nd previous row does not exist for that token). Similarly,
this actions should not be elegible as they have no chance of producing conflicts.
"""
return df['time_diff'].notnull()
def calculate_token_conflict_score(self, df, conflicts, base=3600):
""" Although the time difference is a good indicator of conflicts, i.e. undos that take years
are probably not very relevant, there are two important transformations in order for it to
make sense, let t be the time difference in seconds:
1. It needs to be the inverse of the time (i.e. 1/t), so higher value srepresent higher
conflicts.
2. Calculating the log(t, base=3600) soften the curve so that the values are not so extreme.
Moreover, it sets 1 hour (3600 secs) as the decisive point in which an undo is more relevant.
"""
#changed: time_diff is not calculated for the first insertion so we can emit this checking
df['conflict'] = np.nan
#df.loc[conflicts, ['conflict']] = np.log(
# base) / np.log(df['time_diff'].astype('timedelta64[s]') + 2)
#second verison of conflict score, not using inverted log, but inverted exp, bringing seconds to day values (86400 sec in a day) --> smoothes the curve considerably, avoiding too much weight on the very fast undos
x = df['time_diff'].astype('timedelta64[s]') / 86400.0
df.loc[conflicts, ['conflict']] = 1.0 / np.exp(x)
return df
def get_page_conflict_score(self):
""" This calculates a total conflict score for the page. It adds all the conflicts
and divide them by the sum of all elegible actions (i.e. actions that have the potential
of being undos)
"""
if (self.elegible_actions.shape[0] == 0):
return 0
else:
return (self.elegible.loc[self.__conflicts, 'conflict'].sum() /
self.elegible_actions.shape[0])
#def get_page_conflict_score2(self):
#return (self.elegible.loc[self.__conflicts, 'conflict'].sum() /
#len(self.elegible['rev_id'] == self.elegible['o_rev_id']))
def get_conflict_score_per_editor(self):
""" This calculates an score per editor. It adds all the conflicts per editor, and
divide them by the summ of all elegible actions that belong to each editor( i.e.
actions that have the potential of being undos)
"""
# calculate the number of conflicts per editor
confs_n = self.conflicts[['editor', 'conflict']].groupby('editor').count().rename(
columns={'conflict': 'conflict_n'})
# calculate the accumulated conflict per editor
confs_ed = self.conflicts[
['editor', 'conflict']].groupby('editor').sum()
# calculate the 'elegible' actions per editor
actions = self.elegible_actions[
['editor', 'action']].groupby('editor').count()
# join the dataframes
| |
get_p_tz(self, density=True, light_weighted=False):
"""Get p(t,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_tz_x = self.get_p_tz_x(density=density)
P_x = self.get_p_x(density=False)
p_tz = np.sum(p_tz_x * P_x, (2,3))
if light_weighted:
ssps = self.cube.ssps
P_tz_mass_wtd = self.get_p_tz(density=False)
normalisation = np.sum(P_tz_mass_wtd*ssps.light_weights)
p_tz = p_tz*ssps.light_weights/normalisation
return p_tz
def get_p_v_tx(self, v_edg, density=True, light_weighted=False):
"""Get p(v|t,x)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
na = np.newaxis
if light_weighted is False:
v_edg = v_edg[:, na, na, na]
norm = stats.norm(loc=self.mu_v, scale=self.sig_v)
p_v_tx = norm.cdf(v_edg[1:]) - norm.cdf(v_edg[:-1])
if density is True:
dv = v_edg[1:] - v_edg[:-1]
p_v_tx /= dv
else:
p_tvxz = self.get_p_tvxz(v_edg, density=True, light_weighted=True)
if density is False:
dv = v_edg[1:] - v_edg[:-1]
dv = dv[na, :, na, na, na]
p_tvxz = p_tvxz*dv
ssps = self.cube.ssps
p_tvx = np.sum(p_tvxz*ssps.delta_z, -1)
p_x_t = self.get_p_x_t(density=True, light_weighted=True)
p_t = self.get_p_t(density=True, light_weighted=True)
p_xt = p_x_t * p_t
p_tx = np.einsum('xyt->txy', p_xt)
p_tx = p_tx[:, na, :, :]
p_v_tx = p_tvx/p_tx
p_v_tx = np.einsum('tvxy->vtxy', p_v_tx)
return p_v_tx
def get_p_tvxz(self, v_edg, density=True, light_weighted=False):
"""Get p(t,v,x,z)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_txz = self.get_p_txz(density=density)
p_v_tx = self.get_p_v_tx(v_edg, density=density)
newax = np.newaxis
p_v_txz = p_v_tx[:, :, :, :, newax]
p_txz = p_txz[newax, :, :, :, :]
p_vtxz = p_v_txz * p_txz
p_tvxz = np.einsum('vtxyz->tvxyz', p_vtxz)
if light_weighted:
ssps = self.cube.ssps
light_weights = ssps.light_weights
light_weights = light_weights[:,newax,newax,newax,:]
P_tvxz_mass_wtd = self.get_p_tvxz(v_edg, density=False)
normalisation = np.sum(P_tvxz_mass_wtd*light_weights)
p_tvxz = p_tvxz*light_weights/normalisation
return p_tvxz
def get_p_v_x(self, v_edg, density=True, light_weighted=False):
"""Get p(v|x)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
na = np.newaxis
p_v_tx = self.get_p_v_tx(v_edg=v_edg,
density=density,
light_weighted=light_weighted)
P_t = self.get_p_t(density=False, light_weighted=light_weighted)
P_t = P_t[na, :, na, na]
p_v_x = np.sum(p_v_tx * P_t, 1)
return p_v_x
def get_p_v(self, v_edg, density=True, light_weighted=False):
"""Get p(v)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_v_x = self.get_p_v_x(v_edg,
density=density,
light_weighted=light_weighted)
P_x = self.get_p_x(density=False, light_weighted=light_weighted)
p_v = np.sum(p_v_x*P_x, (1,2))
return p_v
def get_E_v_x(self, light_weighted=False):
"""Get mean velocity map E[p(v|x)]
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
P_t = self.get_p_t(density=False, light_weighted=light_weighted)
E_v_x = np.sum((P_t*self.mu_v.T).T, 0)
return E_v_x
def get_jth_central_moment_v_x(self, j, light_weighted=False):
"""Get j'th central moment of velocity map E[p((v-mu_v)^j|x)]
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
P_t = self.get_p_t(density=False, light_weighted=light_weighted)
mu = self.get_E_v_x()
k = np.arange(0, j+1, 2)
tmp1 = special.comb(j, k)
na = np.newaxis
tmp2 = (self.mu_v - mu)[na,:,:,:]**(j-k[:,na,na,na])
tmp3 = 1.*P_t
tmp4 = self.sig_v[na,:,:,:]**k[:,na,na,na]
tmp5 = special.factorial2(k-1)
muj_v_x = np.einsum('k,ktxy,t,ktxy,k->xy',
special.comb(j, k),
(self.mu_v - mu)[na,:,:,:]**(j-k[:,na,na,na]),
P_t,
self.sig_v[na,:,:,:]**k[:,na,na,na],
special.factorial2(k-1))
return muj_v_x
def get_variance_v_x(self, light_weighted=False):
"""Get variance velocity map
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
var_v_x = self.get_jth_central_moment_v_x(
2,
light_weighted=light_weighted)
return var_v_x
def get_skewness_v_x(self, light_weighted=False):
"""Get skewness of velocity map
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
mu3_v_x = self.get_jth_central_moment_v_x(
3,
light_weighted=light_weighted)
var_v_x = self.get_jth_central_moment_v_x(
2,
light_weighted=light_weighted)
skewness_v_x = mu3_v_x/var_v_x**1.5
return skewness_v_x
def get_kurtosis_v_x(self, light_weighted=False):
"""Get kurtosis of velocity map
Args:
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
mu4_v_x = self.get_jth_central_moment_v_x(
4,
light_weighted=light_weighted)
var_v_x = self.get_jth_central_moment_v_x(
2,
light_weighted=light_weighted)
kurtosis_v_x = mu4_v_x/var_v_x**2.
return kurtosis_v_x
def plot_density(self,
vmin=0.1,
vmax=3.,
show_every_nth_time=4):
"""Plot maps of the spatial density p(x|t) at several timesteps
Plot the density between the start/end times of disk growth, which
depends on the CDF of p(t). Skip every N steps between these points.
Args:
vmin: minimum velocity for colormap
vmax: maximum velocity for colormap
show_every_nth_time (int): number of timesteps to skip between plots
"""
t_idx_list = np.arange(*self.t_pars['idx_start_end'],
show_every_nth_time)
t_idx_list = t_idx_list[::-1]
kw_imshow = {'cmap':plt.cm.gist_heat,
'norm':LogNorm(vmin=vmin, vmax=vmax)}
for t_idx in t_idx_list:
t = self.cube.ssps.par_cents[1][t_idx]
img = self.cube.imshow(self.p_x_t[:,:,t_idx], **kw_imshow)
plt.gca().set_title(f't={t}')
plt.tight_layout()
plt.show()
return
def plot_t_dep(self):
"""Plot map of depletion timescale used for chemical enrichment
"""
kw_imshow = {'cmap':plt.cm.jet}
img = self.cube.imshow(self.t_dep,
colorbar_label='$t_\\mathrm{dep}$',
**kw_imshow)
plt.tight_layout()
plt.show()
return
def plot_mu_v(self,
show_every_nth_time=4,
vmax=None):
"""Plot maps of the mean velocity E[p(v|t,x)] at several timesteps
Plot the map between the start/end times of disk growth, which
depends on the CDF of p(t). Skip every N steps between these points.
Args:
vmax: maximum velocity for colormap
show_every_nth_time (int): number of timesteps to skip between plots
"""
if vmax is None:
vmax = np.max(np.abs(self.mu_v_pars['vmax_lims']))
cube = self.cube
t_idx_list = np.arange(*self.t_pars['idx_start_end'],
show_every_nth_time)
t_idx_list = t_idx_list[::-1]
kw_imshow = {'vmin':-vmax, 'vmax':vmax}
for t_idx in t_idx_list:
t = self.cube.ssps.par_cents[1][t_idx]
self.cube.imshow(self.mu_v[t_idx,:,:], **kw_imshow)
plt.gca().set_title(f't={t}')
plt.tight_layout()
plt.show()
def plot_sig_v(self,
show_every_nth_time=4,
vmin=None,
vmax=None):
"""Plot maps of the dispersion of p(v|t,x) at several timesteps
Plot the map between the start/end times of disk growth, which
depends on the CDF of p(t). Skip every N steps between these points.
Args:
vmax: minimum velocity for colormap
vmax: maximum velocity for colormap
show_every_nth_time (int): number of timesteps to skip between plots
"""
cube = self.cube
t_idx_list = np.arange(*self.t_pars['idx_start_end'],
show_every_nth_time)
t_idx_list = t_idx_list[::-1]
sigs = np.concatenate((
self.sig_v_pars['sig_v_in_lims'],
self.sig_v_pars['sig_v_out_lims']
))
if vmin is None:
vmin = np.min(sigs)
if vmax is None:
vmax = np.max(sigs)
kw_imshow = {'cmap':plt.cm.jet,
'vmin':vmin,
'vmax':vmax}
for t_idx in t_idx_list:
t = cube.ssps.par_cents[1][t_idx]
cube.imshow(self.sig_v[t_idx,:,:], **kw_imshow)
plt.gca().set_title(f't={t}')
plt.tight_layout()
plt.show()
class stream(component):
"""A stream with spatially varying kinematics but uniform enrichment.
The (mass-weighted) joint density of this component can be factorised as
p(t,x,v,z) = p(t) p(x) p(v|x) p(z|t)
where the factors are given by:
- p(t) : a beta distribution (see `set_p_t`),
- p(x) : a curved line with constant thickness (see `set_p_x`),
- p(v|x) : Guassian with mean varying along stream and constant sigma (see
set_p_v_x`),
- p(z|t) : single chemical evolution track i.. `t_dep` (see `set_p_z_t`).
Args:
cube: a pkm.mock_cube.mockCube.
center (x0,y0): co-ordinates of the component center.
rotation: angle (radians) between x-axes of component and cube.
nsmp:
"""
def __init__(self,
cube=None,
center=(0,0),
rotation=0.):
super(stream, self).__init__(
cube=cube,
center=center,
rotation=rotation)
def set_p_x(self,
theta_lims=[0., np.pi/2.],
mu_r_lims=[0.7, 0.1],
sig=0.03,
nsmp=75):
"""Define the stream track p(x)
Defined in polar co-ordinates (theta,r). Stream extends between angles
`theta_lims` between radii in `mu_r_lims`. Density is constant along
with varying theta. The track has a constant width on the sky, `sig`.
Args:
theta_lims: (start, end) values of stream angle in radians. Must be
in -pi to pi. To cross negative x-axis, set non-zero rotation when
instantiating the stream component.
mu_r_lims: (start, end) values of stream distance from center.
sig (float): stream thickness.
nsmp (int): number of points to sample the angle theta.
Returns:
type: Description of returned object.
"""
assert np.min(theta_lims)>=-np.pi, "Angles must be in -pi<theta<pi'"
assert np.max(theta_lims)<=np.pi, "Angles must be in -pi<theta<pi'"
self.theta_lims = theta_lims
cube = self.cube
theta0, theta1 = theta_lims
self.nsmp = nsmp
theta_smp = np.linspace(theta0, theta1, self.nsmp)
mu_r0, mu_r1 = mu_r_lims
tmp = (theta_smp - theta0)/(theta1 - theta0)
mu_r_smp = mu_r0 + (mu_r1 - mu_r0) * tmp
mu_x_smp = mu_r_smp * np.cos(theta_smp)
nrm_x = stats.norm(mu_x_smp, sig)
pdf_x = nrm_x.cdf(self.xxp[:,:,np.newaxis] + cube.dx/2.)
pdf_x -= nrm_x.cdf(self.xxp[:,:,np.newaxis] - cube.dx/2.)
mu_y_smp = mu_r_smp * np.sin(theta_smp)
nrm_y = stats.norm(mu_y_smp, sig)
pdf_y = nrm_y.cdf(self.yyp[:,:,np.newaxis] + cube.dy/2.)
pdf_y -= nrm_y.cdf(self.yyp[:,:,np.newaxis] - cube.dy/2.)
pdf = pdf_x * pdf_y
pdf = np.sum(pdf, -1)
pdf /= np.sum(pdf*cube.dx*cube.dy)
self.p_x_pars = dict(theta_lims=theta_lims,
mu_r_lims=mu_r_lims,
sig=sig,
nsmp=nsmp)
self.p_x = pdf
def get_p_x(self, | |
""" some tools about fcm"""
# -*- coding: utf-8 -*-
import csv
import os
import random
import time
from math import exp
import matplotlib
# matplotlib.use('Agg')
import matplotlib.colors as pltColors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from collections import deque
def saveUV(U, V, name):
""" save membership and centriod mat as csv files
Args:
U: membership mat of n*c
V: centriod mat of c*s
name: file name to save
"""
f = open('./tem/' + name + '_U.csv', 'w')
for i in U:
k = ','.join([str(j) for j in i])
f.write(k + "\n")
f.close()
print 'save U success'
f = open('./tem/' + name + '_V.csv', 'w')
for i in V:
k = ','.join([str(j) for j in i])
f.write(k + "\n")
f.close()
print 'save V success'
def loadUV(path):
""" load membership and centriod mat from csv files
Args:
path: file name to load without _U.csv and _V.csv
Returns:
The tuple(U,V) consist of membership and centriod mat,
the datatype of each mat is ndarray
"""
U = loadCsv(path + '_U.csv')
V = loadCsv(path + '_V.csv')
return U, V
def loadCsv(path):
""" load data set from csv file
Args:
path: file path to load
Returns:
a 2-d ndarray
"""
lines = csv.reader(open(path, "rb"))
dataset = list(lines)
for i in xrange(len(dataset)):
dataset[i] = [float(x) for x in dataset[i]]
return np.array(dataset)
def normalization(dataSet, axis=0):
""" normaliza the mat by axis
Args:
dataSet: a 2-d ndarray to normaliza
axis: the axis of the ndarray to normaliza,
default is 0
Returns:
the ndarray be normalized
"""
dataSet = np.float32(dataSet)
colMax = np.max(dataSet, axis=axis)
colMin = np.min(dataSet, axis=axis)
colRange = colMax - colMin
return (dataSet - colMin) / colRange
def initMembership(n, c):
""" init membership mat of n*c by random """
membership = np.random.uniform(0.001, 1, [n, c])
return membership / np.sum(membership, axis=1).reshape(n, 1)
def initCentroid(dataSet, c):
""" init Centroid mat of n*c by random """
dimension = np.shape(dataSet)[1]
return np.random.rand(dimension * c).reshape(c, dimension)
def calcMembership(centriod, dataSet, m):
n = dataSet.shape[0]
c = centriod.shape[0]
dist = distanceMat(centriod, dataSet)
if dist[dist == 0].shape[0]:
print '-------------- dist == 0 ------------------'
print centriod.tolist()
exit(0)
distPower = np.power(dist, -2.0 / (m - 1))
return distPower / np.dot(
np.sum(distPower, axis=1).reshape((n, 1)), np.ones((1, c)))
def calcCentriod(membership, dataSet, m):
n, c = membership.shape
dimension = dataSet.shape[1]
centriod = np.zeros((c, dimension))
membershipPower = np.power(membership, m)
denominator = np.dot(
np.sum(membershipPower, axis=0).reshape((c, 1)),
np.ones((1, dimension)))
return np.dot(membershipPower.T, dataSet) / denominator
def calcCentroidHessian(centriod, dataSet, m):
""" caculate the Hessian mat at a centriod mat """
n = dataSet.shape[0]
c, dimension = centriod.shape
U = calcMembership(centriod, dataSet, m)
membershipPower = np.power(U, m)
sumByn = np.sum(membershipPower, axis=0)
A = 2 * sumByn.reshape(c, 1, 1) * np.eye(dimension) - (4 * m / (
m - 1) * sumByn).reshape(c, 1, 1)
distMat = distanceMat(centriod, dataSet)
distPower = np.power(distMat, -2.0 / (m - 1))
gk = np.sum(distPower, axis=1)
gkPower = np.power(gk, m - 1)
h = np.repeat(dataSet, c, axis=0).reshape(n, c, dimension) - V
h = h * membershipPower.reshape(n, c, 1)
H = np.zeros((c * dimension, c * dimension))
for i in xrange(c):
for j in xrange(c):
Bij = 4 * m / (m - 1) * np.sum(
np.sum(h[:, i] * h[:, j], axis=1) * gkPower)
if i == j:
tem = A[i] + Bij
else:
tem = Bij
H[i * dimension:(i + 1) * dimension, j * dimension:(j + 1) *
dimension] = tem
return H
def calcMembershipHessian(membership, dataSet, m):
""" caculate the Hessian mat at a Membership mat """
dimension = dataSet.shape[1]
n, c = membership.shape
centriod = calcCentriod(membership, dataSet, m)
membershipPower = np.power(membership, m)
denominator = np.sum(membershipPower, axis=0)
h = np.repeat(dataSet, c, axis=0).reshape(n, c, dimension) - V
h = h * (membershipPower / membership).reshape(n, c, 1)
H = np.zeros((c * n, c * n))
for i in xrange(c):
mat = np.dot(h[:, i], h[:, i].T)
diagD = np.diag(mat) / membershipPower[:, i]
D = np.diag(diagD)
G = mat / denominator[i]
tem = D - 2 * m / (m - 1) * G
H[i * n:(i + 1) * n, i * n:(i + 1) * n] = m * (m - 1) * tem
return H
def calcObjective(membership, centriod, dataSet, m):
""" caculate the value of objective function (J)"""
membershipPower = np.power(membership, m)
dist = np.power(distanceMat(centriod, dataSet), 2)
return np.sum(membershipPower * dist)
def distance(x, y):
""" the Euclidean distance of dot x and dot y """
np_x = np.array(x)
np_y = np.array(y)
return np.linalg.norm(np_x - np_y)
def distanceMat(centriod, dataSet):
""" the Euclidean distance mat of two 2-d mat """
c, dimension = centriod.shape
n = dataSet.shape[0]
mat = np.zeros((n, c))
for i in xrange(c):
mat[:, i] = np.linalg.norm(dataSet - centriod[i], axis=1)
return mat
def drawImage(dataSet, exp, c, figName="figure", V=None):
""" draw image in 2-d dataset
Args:
dataSet: the dataSet mat
exp: the Clustering results of each dot in dataSet, a 1-d adarray
c: Number of clusters
figName: figName to save
V: calcCentriod mat, if the arg is given, calcCentriod will be drawn on the figure
Returns:
None, save the scatter in path ./images/d31/
"""
global figIndex
contact = np.column_stack((dataSet, exp))
colors = pltColors.cnames.keys()
fig = plt.figure()
for i in xrange(c):
mask = contact[:, -1] == i
select = contact[mask]
x, y = select[:, 0], select[:, 1]
plt.scatter(
x,
y,
c=colors[i],
label=str(i),
s=100,
marker="${}$".format(i),
alpha=1,
edgecolors='none')
if V <> None:
plt.scatter(
V[i][0],
V[i][1],
c=colors[i],
label=str(i),
s=100,
marker="o",
alpha=1,
edgecolors='white')
plt.title(str(figName))
plt.xlabel('x')
plt.ylabel('y')
# plt.legend()
plt.grid(True)
fig.savefig(
'./images/d31/' + str(figIndex) + '.' + str(figName) + '.png',
dpi=fig.dpi)
figIndex += 1
# plt.show()
def getExpResult(membership):
""" get the Clustering results,
item belong to the cluster which the menbership is max
"""
return np.array([np.argmax(item) for item in membership])
def evaluate(membership, std, dataSet):
""" calc the external indicators
Args:
membership: membership mat of n*c (ndarray)
std: the classification of each item (1-d ndarray)
dataSet: data mat of n*s
"""
n = len(std)
classNum = membership.shape[1]
exp = getExpResult(membership)
a = b = c = d = 0
expMat = np.repeat(exp, n).reshape(n, n)
expFlag = expMat == expMat.T
stdMat = np.repeat(std, n).reshape(n, n)
stdFlag = stdMat == stdMat.T
a = (np.sum(expFlag * stdFlag) - n) / 2.0
b = np.sum(expFlag * ~stdFlag) / 2.0
c = np.sum(~expFlag * stdFlag) / 2.0
d = np.sum(~expFlag * ~stdFlag) / 2.0
JC = a / (a + b + c)
FMI = (a**2 / ((a + b) * (a + c)))**(1.0 / 2)
RI = 2 * (a + d) / (n * (n - 1))
# print JC, FMI, RI
return FMI
def fcmIteration(U, V, dataSet, m, c, returnType=0):
""" fcm iteration start from the init value
MAX_ITERATION = 50
epsilon = 1e-8
Args:
U: Membership mat of n*c (ndarray)
V: Centriod mat of c*s (ndarray)
dataSet: data mat of n*s
m: m in fcm
c: numbers of cluster
Returns:
The tuple(U,V,J) consist of membership, centriod mat, and the value of objective function
the mats are all 2-d ndarray
"""
MAX_ITERATION = 50
epsilon = 1e-8
delta = float('inf')
VQue = deque([V])
while delta > epsilon and MAX_ITERATION > 0:
U = calcMembership(V, dataSet, m)
# J = calcObjective(U, V, dataSet, m)
# print('{0},{1}').format(J, evaluate(U, classes, dataSet))
_V = calcCentriod(U, dataSet, m)
# J = calcObjective(U, _V, dataSet, m)
# print('{0},{1}').format(J, evaluate(U, classes, dataSet))
delta = distance(V, _V)**2
V = _V
VQue.append(V)
MAX_ITERATION -= 1
J = calcObjective(U, V, dataSet, m)
if returnType == 0:
return U, V, J
else:
return U, V, J, VQue
def fcm(dataSet, m, c, returnType=0):
""" the Entrance of fcm alg. """
n = len(dataSet)
U = initMembership(n, c)
V = initCentroid(dataSet, c)
return fcmIteration(U, V, dataSet, m, c, returnType)
def sortByCol(ndarray):
""" sort 2d ndarray by col val. """
return ndarray[np.argsort(ndarray[:, 0])]
class TabuSearch:
""" the Class of Tabu Search
include the structure and the tool functions
Attributes:
tabuList: a list of tabu object
tabuLength: the tabu length, int, default = 0.25 * MAX_ITERATION
maxSearchNum: the number of neighborhood samples
MAX_ITERATION: max iteration number of ts
neighbourhoodUnit: step length of each move
neighbourhoodTimes: step | |
# -*- coding: utf-8 -*
from pathlib import Path
import ujson as json
import numpy as np
from . import _deck
from . import _utils
_BONUS_CARDS = {_deck.Card(c) for c in ["Qh", "Kh"]}
class DefaultPlayer: # pylint: disable=too-many-instance-attributes
"""Player which selects one card randomly at each round.
Note
----
This class is the base class for other players. They should mostly improve
methods "set_reward" and "_propose_card_to_play". A proposition can be rejected if
it proposed an card which could not be played. A random choice is made in this case,
and the acceptation_ratio attribute allows to keep track of how often the propositions
are accepted.
"""
def __init__(self):
self._cards = None
self._initial_cards = None # keep a record of initial cards for each game
self._order = None
self.last_correct = None
self._last_playable_cards = None
self._card_played_count = 0
self._erroneous_selection_count = 0
self._acceptation_queue = _utils.ReplayQueue(1000)
self.reward_sum = 0
self._last_acceptable = _utils.ReplayQueue(1000)
self.reinitialize()
def reinitialize(self):
self._cards = _deck.CardList([])
self._last_playable_cards = None
self._initial_cards = _deck.CardList([]) # keep a record of initial cards for each game
self._order = None
self.last_correct = 32
def get_acceptation_ratio(self):
"""Ratio of card proposition which have been accepted (allowed to play)
"""
return (self._card_played_count - self._erroneous_selection_count) / self._card_played_count
def get_instantaneous_acceptation_ratio(self):
"""Ratio of card proposition which have been accepted (allowed to play)
"""
return np.mean(self._acceptation_queue._data)
def get_mean_acceptable(self):
"""Ratio of card proposition which have been accepted (allowed to play)
"""
return np.mean(self._last_acceptable._data)
@property
def initial_cards(self):
return self._initial_cards
@property
def order(self):
return self._order
@property
def cards(self):
"""Makes a copy, to make sure no modification happens outside
"""
return _deck.CardList(self._cards, self._cards.trump_suit)
def initialize_game(self, order, cards):
"""Initialize a game with order and cards.
Parameters
----------
order: int
the order in which the player will play
cards: list
a list of 8 cards
Note
----
A game can only be initialized if the card list is empty (no ongoing game)
"""
assert not self._cards, "Cannot initialize a new game when card are still at play: {}".format(self._cards)
assert len(cards) == 8, "Wrong number of cards for initialization: {}.".format(self._cards)
self._cards = cards
self.last_correct = 32
self._initial_cards = _deck.CardList(cards)
self._order = order
def _get_playable_cards(self, board):
"""Returns the cards that can be played
"""
if self._cards.trump_suit is None:
self._cards.trump_suit = board.trump_suit
round_cards = board.get_current_round_cards()
return self._cards.get_playable_cards([] if len(round_cards) == 4 else round_cards)
def get_card_to_play(self, board):
"""Returns an acceptable card to play.
Parameter
---------
board: GameBoard
the current board for the game
Returns
-------
Card
an acceptable card to play in the current game
Note
----
This function makes sure the sent card is acceptable to play. It keeps tracks of remaining
cards, and of how often the propositions (from a neural network for instance) where accepted.
Propositions are provided through the "_propose_card_to_play" method.
The playable cards at this round are kept for later use in set_reward.
"""
selected = self._propose_card_to_play(board)
self._last_playable_cards = self._get_playable_cards(board)
if selected is None or selected not in self._last_playable_cards:
#print(np.round(self._get_expectations(board)), len(board.actions))
self._erroneous_selection_count += 1
self._acceptation_queue.append(False)
selected = np.random.choice(self._last_playable_cards)
card_num = len(board.actions)
if self.last_correct >= card_num:
self.last_correct = card_num
self._last_acceptable.append(card_num)
else:
self._acceptation_queue.append(True)
self._cards.remove(selected)
self._card_played_count += 1
return selected
def set_reward(self, board, value): # pylint: disable=unused-argument
"""Function to be called after each action on the board, to provide feedbacks for neural networks
for instance.
Parameter
---------
board: GameBoard
the current board for the game
value: int
the value of the reward
Note
----
This function is called after *each* action (from any player), while get_card_to_play method
is only called when it is the user's time to play.
"""
self.reward_sum += value
def _propose_card_to_play(self, board): # pylint: disable=unused-argument
"""Propose a card to play thanks to an advanced method.
Parameter
---------
board: GameBoard
the current board for the game
Returns
-------
Card
a card proposition for playig, which may be unacceptable.
Note
----
Implement a technique here.
"""
pass
def initialize_players_cards(players):
"""Initialize players for a new game.
This function sets the player order and its cards.
Parameter
---------
player: list
a list of 4 players.
"""
assert len(players) == 4
# initialize players' cards
cards = _deck.get_full_deck()
np.random.shuffle(cards)
for k, cards in enumerate(_utils.grouper(cards, 8)):
players[k].initialize_game(k, _deck.CardList(cards))
def play_game(board, players, verbose=False):
"""Plays a game, given a board with biddings and initialized players.
Parameters
----------
board: GameBoard
a board, with biddings already performed, but no action
players: list
a list of 4 initialized players, with 8 cards each and given orders
""" # IMPROVEMENT: handle partially played games
# checks
assert board.biddings, "Biddings must have been already performed"
assert not board.actions, "No cards should have already been played"
for k, player in enumerate(players): # make sure the data is correct
assert player._order == min(3, k)
assert len(player.cards) == 8
# game
for _ in range(32):
player_ind = board.next_player
card = players[player_ind].get_card_to_play(board)
points = board.add_played_card(card, verbose=verbose)
for k, player in enumerate(players):
player.set_reward(board, points[k % 2])
return board
class GameBoard:
"""Elements which are visible to all players.
Attributes
----------
actions: list
played cards, as a list of tuples of type (#player, card)
biddings: list
the sequence of biddings, as a list of tuples of type (#player, points, trump_suit)
"""
def __init__(self, actions=None, biddings=None):
self.biddings = [] if biddings is None else [(p, v, _deck._SUIT_CONVERTER.get(s, s)) for p, v, s in biddings]
self.next_player = 0
self.points = np.zeros((2, 32), dtype=int)
self._actions = [] if actions is None else [(p, _deck.Card(c)) for p, c in actions]
self._current_point_sum = 0
self._bonus_players = set()
self._current_point_position = 0 # checking that all cards are counted only once
if self._actions:
self._update_next_player()
self._process_actions_points()
def _as_dict(self):
data = {"actions": [(p, c.tag) for p, c in self.actions],
"biddings": self.biddings}
return data
@property
def actions(self):
return tuple(self._actions) # avoid direct modification
def dump(self, filepath):
"""Dumps a GameBoard to a file
Parameter
---------
filepath: str or Path
path to the file where to save the GameBoard.
"""
data = self._as_dict()
filepath = Path(filepath)
with filepath.open("w") as f:
json.dump(data, f)
@classmethod
def load(cls, filepath):
"""Loads a GameBoard from a file
Parameter
---------
filepath: str or Path
path to the file where the GameBoard is save.
Returns
-------
GameBoard
the loaded GameBoard
"""
filepath = Path(filepath)
with filepath.open("r") as f:
data = json.load(f)
actions = [(p, _deck.Card(c)) for p, c in data["actions"]]
board = cls(actions, [tuple(b) for b in data["biddings"]])
return board
def add_played_card(self, card, verbose=False):
"""Add the next card played.
The player is assumed to be the next_player.
This function saves the action, updates the next player and computes points.
Parameters
----------
card: Card
the card to play
verbose: bool
whether to print a summary after each round
Returns
-------
np.array
the points earned by each time, as an array of 2 elements
"""
self._actions.append((self.next_player, card))
player = self.next_player
self._update_next_player()
if verbose and not len(self._actions) % 4:
first_player_index = self.actions[-4][0]
print("Round #{} - Player {} starts: {}".format(len(self.actions) // 4, first_player_index,
self.get_current_round_cards().get_round_string()))
return self._process_card_points(len(self.actions) - 1, card, player, self.next_player)
def _update_next_player(self):
"""Updates the next_player attribute to either the following player (inside a round),
or the winner (end of a round).
"""
if len(self._actions) % 4:
self.next_player = (self._actions[-1][0] + 1) % 4
else:
round_cards = _deck.CardList([x[1] for x in self._actions[-4:]], self.trump_suit)
highest = round_cards.get_highest_round_card()
index = round_cards.index(highest)
self.next_player = (self._actions[-4][0] + index) % 4
def _process_card_points(self, index, card, player, next_player):
"""Computes the points earned after a card being played.
This function keeps a record of unaffected points (inside a round), and updates the "points"
attribute.
Returns
-------
np.array
the points earned by each time, as an array of 2 elements
"""
assert index == self._current_point_position, "Processing card #{} while expecting #{}".format(index, self._current_point_position)
self._current_point_sum += card.get_points(self.trump_suit)
if not (index + 1) % 4: # end of round
self.points[next_player % 2, index] = self._current_point_sum + (10 if index == 31 else 0)
self._current_point_sum = 0
# special reward
if self.trump_suit == "❤" and card in _BONUS_CARDS:
if player in self._bonus_players:
self.points[player % 2, index] += 20
self._bonus_players.add(player)
self._current_point_position += 1
return self.points[:, index]
@property
def trump_suit(self):
"""Selected trump suit for the game
"""
| |
import time
import base64
import os
import pathlib
import random
import string
import subprocess
from kubernetes import client, watch, config
from kubernetes.dynamic.exceptions import ConflictError
KUBE_SYSTEM = "kube-system"
META_URL = os.getenv("JUICEFS_META_URL") or ""
ACCESS_KEY = os.getenv("JUICEFS_ACCESS_KEY") or ""
SECRET_KEY = os.getenv("JUICEFS_SECRET_KEY") or ""
STORAGE = os.getenv("JUICEFS_STORAGE") or ""
BUCKET = os.getenv("JUICEFS_BUCKET") or ""
TOKEN = os.getenv("JUICEFS_TOKEN") or ""
IS_CE = os.getenv("IS_CE") == "True"
RESOURCE_PREFIX = "ce-" if IS_CE else "ee-"
SECRET_NAME = os.getenv("JUICEFS_NAME") or "ce-juicefs-secret"
STORAGECLASS_NAME = "ce-juicefs-sc" if IS_CE else "ee-juicefs-sc"
SECRETs = []
STORAGECLASSs = []
DEPLOYMENTs = []
PODS = []
PVCs = []
PVs = []
class Secret:
def __init__(self, *, secret_name):
self.secret_name = secret_name
self.namespace = KUBE_SYSTEM
self.meta_url = META_URL
self.access_key = ACCESS_KEY
self.secret_key = SECRET_KEY
self.storage_name = STORAGE
self.bucket = BUCKET
self.token = TOKEN
def create(self):
if IS_CE:
data = {
"name": base64.b64encode(self.secret_name.encode('utf-8')).decode("utf-8"),
"metaurl": base64.b64encode(self.meta_url.encode('utf-8')).decode("utf-8"),
"access-key": base64.b64encode(self.access_key.encode('utf-8')).decode("utf-8"),
"secret-key": base64.b64encode(self.secret_key.encode('utf-8')).decode("utf-8"),
"storage": base64.b64encode(self.storage_name.encode('utf-8')).decode("utf-8"),
"bucket": base64.b64encode(self.bucket.encode('utf-8')).decode("utf-8"),
}
else:
data = {
"name": base64.b64encode(self.secret_name.encode('utf-8')).decode("utf-8"),
"token": base64.b64encode(self.token.encode('utf-8')).decode("utf-8"),
"accesskey": base64.b64encode(self.access_key.encode('utf-8')).decode("utf-8"),
"secretkey": base64.b64encode(self.secret_key.encode('utf-8')).decode("utf-8"),
"storage": base64.b64encode(self.storage_name.encode('utf-8')).decode("utf-8"),
"bucket": base64.b64encode(self.bucket.encode('utf-8')).decode("utf-8"),
}
sec = client.V1Secret(
api_version="v1",
kind="Secret",
metadata=client.V1ObjectMeta(name=self.secret_name),
data=data
)
client.CoreV1Api().create_namespaced_secret(namespace=self.namespace, body=sec)
SECRETs.append(self)
def delete(self):
client.CoreV1Api().delete_namespaced_secret(name=self.secret_name, namespace=self.namespace)
SECRETs.remove(self)
class StorageClass:
def __init__(self, *, name, secret_name):
self.name = name
self.secret_name = secret_name
self.secret_namespace = KUBE_SYSTEM
def create(self):
sc = client.V1StorageClass(
api_version="storage.k8s.io/v1",
kind="StorageClass",
metadata=client.V1ObjectMeta(name=self.name),
provisioner="csi.juicefs.com",
reclaim_policy="Delete",
volume_binding_mode="Immediate",
parameters={
"csi.storage.k8s.io/node-publish-secret-name": self.secret_name,
"csi.storage.k8s.io/node-publish-secret-namespace": self.secret_namespace,
"csi.storage.k8s.io/provisioner-secret-name": self.secret_name,
"csi.storage.k8s.io/provisioner-secret-namespace": self.secret_namespace,
}
)
client.StorageV1Api().create_storage_class(body=sc)
STORAGECLASSs.append(self)
def delete(self):
client.StorageV1Api().delete_storage_class(name=self.name)
STORAGECLASSs.remove(self)
class PVC:
def __init__(self, *, name, access_mode, storage_name, pv):
self.name = RESOURCE_PREFIX + name
self.namespace = "default"
self.access_mode = access_mode
self.storage_class = storage_name
self.pv = pv
def create(self):
spec = client.V1PersistentVolumeClaimSpec(
access_modes=[self.access_mode],
resources=client.V1ResourceRequirements(
requests={"storage": "1Gi"}
)
)
if self.pv != "":
spec.selector = client.V1LabelSelector(match_labels={"pv": self.pv})
spec.storage_class_name = self.storage_class
pvc = client.V1PersistentVolumeClaim(
api_version="v1",
kind="PersistentVolumeClaim",
metadata=client.V1ObjectMeta(name=self.name),
spec=spec
)
client.CoreV1Api().create_namespaced_persistent_volume_claim(namespace=self.namespace, body=pvc)
PVCs.append(self)
def delete(self):
client.CoreV1Api().delete_namespaced_persistent_volume_claim(name=self.name, namespace=self.namespace)
PVCs.remove(self)
def check_is_deleted(self):
try:
client.CoreV1Api().read_namespaced_persistent_volume_claim(name=self.name, namespace=self.namespace)
except client.exceptions.ApiException as e:
if e.status == 404:
return True
raise e
return False
def get_volume_id(self):
p = client.CoreV1Api().read_namespaced_persistent_volume_claim(name=self.name, namespace=self.namespace)
pv_name = p.spec.volume_name
pv = client.CoreV1Api().read_persistent_volume(name=pv_name)
return pv.spec.csi.volume_handle
class PV:
def __init__(self, *, name, access_mode, volume_handle, secret_name):
self.name = RESOURCE_PREFIX + name
self.access_mode = access_mode
self.volume_handle = volume_handle
self.secret_name = secret_name
self.secret_namespace = KUBE_SYSTEM
def create(self):
spec = client.V1PersistentVolumeSpec(
access_modes=[self.access_mode],
capacity={"storage": "10Pi"},
volume_mode="Filesystem",
persistent_volume_reclaim_policy="Delete",
csi=client.V1CSIPersistentVolumeSource(
driver="csi.juicefs.com",
fs_type="juicefs",
volume_handle=self.volume_handle,
node_publish_secret_ref=client.V1SecretReference(
name=self.secret_name,
namespace=self.secret_namespace
),
)
)
pv = client.V1PersistentVolume(
api_version="v1",
kind="PersistentVolume",
metadata=client.V1ObjectMeta(name=self.name, labels={"pv": self.name}),
spec=spec
)
client.CoreV1Api().create_persistent_volume(body=pv)
PVs.append(self)
def delete(self):
client.CoreV1Api().delete_persistent_volume(name=self.name)
PVs.remove(self)
def get_volume_id(self):
p = client.CoreV1Api().read_persistent_volume(name=self.name)
return p.spec.csi.volume_handle
class Deployment:
def __init__(self, *, name, pvc, replicas, out_put=""):
self.name = RESOURCE_PREFIX + name
self.namespace = "default"
self.image = "centos"
self.pvc = pvc
self.replicas = replicas
self.out_put = out_put
def create(self):
cmd = "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"
if self.out_put != "":
cmd = "while true; do echo $(date -u) >> /data/{}; sleep 5; done".format(self.out_put)
container = client.V1Container(
name="app",
image="centos",
command=["/bin/sh"],
args=["-c", cmd],
volume_mounts=[client.V1VolumeMount(
name="juicefs-pv",
mount_path="/data",
mount_propagation="HostToContainer",
)]
)
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"deployment": self.name}),
spec=client.V1PodSpec(
containers=[container],
volumes=[client.V1Volume(
name="juicefs-pv",
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=self.pvc)
)]),
)
deploySpec = client.V1DeploymentSpec(
replicas=self.replicas,
template=template,
selector={"matchLabels": {"deployment": self.name}}
)
deploy = client.V1Deployment(
api_version="apps/v1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=self.name),
spec=deploySpec,
)
client.AppsV1Api().create_namespaced_deployment(namespace=self.namespace, body=deploy)
DEPLOYMENTs.append(self)
def update_replicas(self, replicas):
while True:
try:
deployment = client.AppsV1Api().read_namespaced_deployment(name=self.name, namespace=self.namespace)
deployment.spec.replicas = replicas
client.AppsV1Api().patch_namespaced_deployment(name=self.name, namespace=self.namespace,
body=deployment)
except (client.ApiException, ConflictError) as e:
if e.reason == "Conflict":
print(e)
continue
break
def delete(self):
client.AppsV1Api().delete_namespaced_deployment(name=self.name, namespace=self.namespace)
DEPLOYMENTs.remove(self)
def refresh(self):
deploy = client.AppsV1Api().read_namespaced_deployment(name=self.name, namespace=self.namespace)
self.replicas = deploy.spec.replicas
return self
class Pod:
def __init__(self, name, deployment_name, replicas, namespace="default", pvc="", out_put=""):
self.name = name
self.namespace = namespace
self.deployment = deployment_name
self.pods = []
self.replicas = replicas
self.image = "centos"
self.pvc = pvc
self.replicas = replicas
self.out_put = out_put
def watch_for_success(self):
v1 = client.CoreV1Api()
w = watch.Watch()
for event in w.stream(v1.list_pod_for_all_namespaces, timeout_seconds=5 * 60):
resource = event['object']
if resource.metadata.namespace != "default":
continue
if self.name == "" and resource.metadata.labels.get("deployment") != self.deployment:
continue
if self.name != "" and resource.metadata.name != self.name:
continue
print("Event: %s %s" % (event['type'], event['object'].metadata.name))
if self.__is_pod_ready(resource):
if self.name == "":
self.pods.append(resource)
if len(self.pods) == self.replicas:
self.pods = []
return True
else:
return True
return False
@staticmethod
def __is_pod_ready(resource):
if resource.status.phase.lower() != "running":
print("Pod {} status phase: {}".format(resource.metadata.name, resource.status.phase))
return False
conditions = resource.status.conditions
for c in conditions:
if c.status != "True":
return False
print("Pod {} status is ready.".format(resource.metadata.name))
return True
def watch_for_delete(self, num):
v1 = client.CoreV1Api()
w = watch.Watch()
for event in w.stream(v1.list_pod_for_all_namespaces, timeout_seconds=5 * 60):
resource = event['object']
message_type = event['type']
if resource.metadata.namespace != "default":
continue
if self.name == "" and resource.metadata.labels.get("deployment") != self.deployment:
continue
if self.name != "" and resource.metadata.name != self.name:
continue
print("Event: %s %s" % (event['type'], event['object'].metadata.name))
if message_type == "DELETED":
if self.name == "":
self.pods.append(resource)
if len(self.pods) == num:
self.pods = []
return True
else:
return True
return False
def is_deleted(self):
try:
po = client.CoreV1Api().read_namespaced_pod(self.name, self.namespace)
except client.exceptions.ApiException as e:
if e.status == 404:
return True
raise e
return po.metadata.deletion_timestamp != ""
def is_ready(self):
try:
po = client.CoreV1Api().read_namespaced_pod(self.name, self.namespace)
return self.__is_pod_ready(po)
except client.exceptions.ApiException as e:
if e.status == 404:
return False
raise e
def get_log(self, container_name):
return client.CoreV1Api().read_namespaced_pod_log(self.name, self.namespace, container=container_name)
def delete(self):
client.CoreV1Api().delete_namespaced_pod(name=self.name, namespace=self.namespace)
def create(self):
cmd = "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"
if self.out_put != "":
cmd = "while true; do echo $(date -u) >> /data/{}; sleep 5; done".format(self.out_put)
container = client.V1Container(
name="app",
image="centos",
command=["/bin/sh"],
args=["-c", cmd],
volume_mounts=[client.V1VolumeMount(
name="juicefs-pv",
mount_path="/data",
mount_propagation="HostToContainer",
)]
)
pod = client.V1Pod(
metadata=client.V1ObjectMeta(name=self.name, namespace=self.namespace),
spec=client.V1PodSpec(
containers=[container],
volumes=[client.V1Volume(
name="juicefs-pv",
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=self.pvc)
)]),
)
client.CoreV1Api().create_namespaced_pod(namespace=self.namespace, body=pod)
PODS.append(self)
def get_id(self):
try:
po = client.CoreV1Api().read_namespaced_pod(self.name, self.namespace)
return po.metadata.uid
except client.exceptions.ApiException as e:
raise e
def mount_on_host(mount_path):
print(f"Mount {mount_path}")
try:
if IS_CE:
subprocess.check_output(
["sudo", "/usr/local/bin/juicefs", "format", f"--storage={STORAGE}", f"--access-key={ACCESS_KEY}",
f"--secret-key={SECRET_KEY}", f"--bucket={BUCKET}", META_URL, SECRET_NAME])
subprocess.check_output(["sudo", "/usr/local/bin/juicefs", "mount", "-d", META_URL, mount_path])
else:
subprocess.check_output(
["sudo", "/usr/bin/juicefs", "auth", f"--token={TOKEN}", f"--accesskey={ACCESS_KEY}",
f"--secretkey={SECRET_KEY}", f"--bucket={BUCKET}", SECRET_NAME])
subprocess.check_output(["sudo", "/usr/bin/juicefs", "mount", "-d", SECRET_NAME, mount_path])
print("Mount success.")
except Exception as e:
print("Error in juicefs mount: {}".format(e))
raise e
def check_mount_point(mount_path, check_path):
mount_on_host(mount_path)
for i in range(0, 60):
try:
print("Open file {}".format(check_path))
f = open(check_path)
content = f.read(1)
if content is not None and content != "":
f.close()
print(f"Umount {mount_path}.")
subprocess.run(["sudo", "umount", mount_path])
return True
time.sleep(5)
f.close()
except FileNotFoundError:
print(os.listdir(mount_path))
print("Can't find file: {}".format(check_path))
time.sleep(5)
continue
except Exception as e:
print(e)
log = open("/var/log/juicefs.log", "rt")
print(log.read())
raise e
print(f"Umount {mount_path}.")
subprocess.run(["sudo", "umount", mount_path])
return False
def get_mount_pod_name(volume_id):
nodes = client.CoreV1Api().list_node()
node_name = nodes.items[0].metadata.name
return "juicefs-{}-{}".format(node_name, volume_id)
def check_mount_pod_refs(pod_name, replicas):
pod = client.CoreV1Api().read_namespaced_pod(name=pod_name, namespace=KUBE_SYSTEM)
annotations = pod.metadata.annotations
if annotations is None:
if replicas == 0:
return True
else:
return False
num = 0
for k in annotations.keys():
if k.startswith("juicefs-"):
num += 1
return num == replicas
def deploy_secret_and_sc():
print("Deploy secret & storageClass..")
secret = Secret(secret_name=SECRET_NAME)
secret.create()
print("Deploy secret {}".format(secret.secret_name))
sc = StorageClass(name=STORAGECLASS_NAME, secret_name=secret.secret_name)
sc.create()
print("Deploy storageClass {}".format(sc.name))
def tear_down():
print("Tear down all resources begin..")
try:
for po in PODS:
print("Delete pod {}".format(po.name))
po.delete()
print("Watch for pods {} for delete.".format(po.name))
result = po.watch_for_delete(1)
if not result:
raise Exception("Pods {} are not delete within 5 min.".format(po.name))
for deploy in DEPLOYMENTs:
print("Delete deployment {}".format(deploy.name))
deploy = deploy.refresh()
deploy.delete()
pod = Pod(name="", deployment_name=deploy.name, replicas=deploy.replicas)
print("Watch for pods of deployment {} for delete.".format(deploy.name))
result = pod.watch_for_delete(deploy.replicas)
if not result:
raise Exception("Pods of deployment {} are not delete within 5 min.".format(deploy.name))
for pvc in PVCs:
print("Delete pvc {}".format(pvc.name))
pvc.delete()
for sc in STORAGECLASSs:
print("Delete storageclass {}".format(sc.name))
sc.delete()
for pv in PVs:
print("Delete pv {}".format(pv.name))
pv.delete()
for secret in SECRETs:
print("Delete secret {}".format(secret.secret_name))
secret.delete()
print("Delete all volumes in file system.")
clean_juicefs_volume("/mnt/jfs")
except Exception as e:
print("Error in tear down: {}".format(e))
print("Tear down success.")
def clean_juicefs_volume(mount_path):
mount_on_host(mount_path)
subprocess.run(["sudo", "rm", "-rf", mount_path + "/*"])
subprocess.run(["sudo", "umount", mount_path])
def die(e):
# csi_node_name = os.getenv("JUICEFS_CSI_NODE_POD")
# po = Pod(name=csi_node_name, deployment_name="", replicas=1, namespace=KUBE_SYSTEM)
# print("Get csi node log:")
# print(po.get_log("juicefs-plugin"))
print("Get csi controller log:")
controller_po = Pod(name="juicefs-csi-controller-0", deployment_name="", replicas=1, namespace=KUBE_SYSTEM)
print(controller_po.get_log("juicefs-plugin"))
print("Get event: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "event", "--all-namespaces"], check=True)
print("Get pvc: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "pvc", "--all-namespaces"], check=True)
print("Get pv: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "pv"], check=True)
print("Get sc: ")
subprocess.run(["sudo", "microk8s.kubectl", "get", "sc"], check=True)
raise Exception(e)
def gen_random_string(slen=10):
return ''.join(random.sample(string.ascii_letters + string.digits, slen))
###### test case in ci ######
def test_deployment_using_storage_rw():
print("[test case] Deployment using storageClass with rwm begin..")
# deploy pvc
pvc = PVC(name="pvc-dynamic-rw", access_mode="ReadWriteMany", storage_name=STORAGECLASS_NAME, pv="")
print("Deploy pvc {}".format(pvc.name))
pvc.create()
# | |
#
# Copyright (c) 2015,2017 - Adjacent Link LLC, Bridgewater, New Jersey
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Adjacent Link LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from . import Event
from . import tdmascheduleevent_pb2
from collections import namedtuple
import os
class TDMAScheduleEvent(Event):
IDENTIFIER = 105
TX=1
RX=2
IDLE=3
_FrameEntry = namedtuple('FrameEntry', ['frame', 'slots'])
def __init__(self,**kwargs):
self._event = tdmascheduleevent_pb2.TDMAScheduleEvent()
self._frames = {}
for (name,value) in list(kwargs.items()):
if name == 'frequency':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value >= 0:
self._event.frequencyHz = value
else:
raise ValueError("frequency must be a positive numeric")
elif name == 'datarate':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value >= 0:
self._event.dataRatebps = value
else:
raise ValueError("datarate must be a positive numeric")
elif name == 'service':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value >= 0:
self._event.serviceClass = value
else:
raise ValueError("service must be a positive numeric")
elif name == 'power':
if (isinstance(value,int) or \
isinstance(value,int) or \
isinstance(value,float)):
self._event.powerdBm = value
else:
raise ValueError("power must be a numeric")
else:
raise KeyError("unknown parameter: %s" % name)
def structure(self,**kwargs):
slotsPerFrame = None
framesPerMultiFrame = None
slotOverheadMicroseconds = None
slotDurationMicroseconds = None
bandwidthHz = None
if not kwargs:
if self._event.HasField('structure'):
return {'slots': self._event.structure.slotsPerFrame,
'frames': self._event.structure.framesPerMultiFrame,
'slotduration':self._event.structure.slotDurationMicroseconds,
'slotoverhead':self._event.structure.slotOverheadMicroseconds,
'bandwidth': self._event.structure.bandwidthHz}
else:
return None
for (name,value) in list(kwargs.items()):
if name == 'slots':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
slotsPerFrame = value
else:
raise ValueError("'slots' must be a positive integer greater than 0")
elif name == 'frames':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
framesPerMultiFrame = value
else:
raise ValueError("'frames' must be a positive integer greater than 0")
elif name == 'slotduration':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
slotDurationMicroseconds = value
else:
raise ValueError("'slotduration' must be a positive integer greater than 0")
elif name == 'slotoverhead':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value >= 0:
slotOverheadMicroseconds = value
else:
raise ValueError("'slotoverhead' must be a positive integer (usecs)")
elif name == 'bandwidth':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
bandwidthHz = value
else:
raise ValueError("'bandwidth' must be a positive integer greater than 0 (Hz)")
else:
raise KeyError("unknown parameter: %s" % name)
if slotsPerFrame == None or \
framesPerMultiFrame == None or \
slotOverheadMicroseconds == None or \
slotDurationMicroseconds == None or \
bandwidthHz == None:
raise KeyError("Missing one ore more keys: 'slots', 'frames', 'slotduration', 'slotoverhead', 'bandwidth'")
self._event.structure.slotsPerFrame = slotsPerFrame
self._event.structure.framesPerMultiFrame = framesPerMultiFrame
self._event.structure.slotDurationMicroseconds = slotDurationMicroseconds
self._event.structure.slotOverheadMicroseconds = slotOverheadMicroseconds
self._event.structure.bandwidthHz = bandwidthHz
def append(self,frameIndex,slotIndex,**kwargs):
frameFrequencyHz = None
frameDataRatebps = None
frameClass = None
framePowerdBm = None
slotFrequencyHz = None
slotDataRatebps = None
slotClass = None
slotPowerdBm = None
slotType = None
slotDestination = None
if frameIndex in self._frames and \
slotIndex in self._frames[frameIndex].slots:
raise ValueError("slot index already defined for frame")
for (name,value) in list(kwargs.items()):
if name == 'frame.frequency':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
frameFrequencyHz = value
else:
raise ValueError("'frame.frequency' must be a integer greater that 0 (Hz)")
elif name == 'frame.datarate':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
frameDataRatebps = value
else:
raise ValueError("'frame.datarate' must be a positive integer greater than 0 (bps)")
elif name == 'frame.service':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value >= 0 and value <= 3:
frameClass = value
else:
raise ValueError("'frame.service' must be a positive integer in the set [0,3]")
elif name == 'frame.power':
if (isinstance(value,int) or \
isinstance(value,int) or \
isinstance(value,float)):
framePowerdBm = value
else:
raise ValueError("'frame.power' must be a numeric (dBm)")
elif name == 'frequency':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
slotFrequencyHz = value
else:
raise ValueError("'frequency' must be a integer greater that 0 (Hz)")
elif name == 'datarate':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
slotDataRatebps = value
else:
raise ValueError("'datarate' must be a positive integer greater than 0 (bps)")
elif name == 'service':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value >= 0 and value <= 3:
slotClass = value
else:
raise ValueError("'service' must be a positive integer in the set [0,3]")
elif name == 'power':
if (isinstance(value,int) or \
isinstance(value,int) or \
isinstance(value,float)):
slotPowerdBm = value
else:
raise ValueError("'power' must be a numeric (dBm)")
elif name == 'destination':
if (isinstance(value,int) or \
isinstance(value,int)) and \
value > 0:
slotDestination = value
else:
raise ValueError("'destination' must be a positive integer (NEM Id)")
elif name == 'type':
if value == "tx" or value == TDMAScheduleEvent.TX:
slotType = "tx"
elif value == "rx" or value == TDMAScheduleEvent.RX:
slotType = "rx"
elif value =="idle" or value == TDMAScheduleEvent.IDLE:
slotType = "idle"
else:
raise ValueError("'type' must be one of: tx, rx or idle")
else:
raise KeyError("unknown parameter: %s" % name)
if slotType == "tx":
if slotFrequencyHz == None and \
frameFrequencyHz == None and \
self._event.frequencyHz == None:
raise KeyError("tx slot 'frequency' must be specified when 'frame.frequency' missing and default not set")
if slotDataRatebps == None and \
frameDataRatebps == None and \
self._event.dataRatebps == None:
raise KeyError("tx slot 'datarate' must be specified when 'frame.datarate' missing and default not set")
if slotClass == None and \
frameClass == None and \
self._event.serviceClass == None:
raise KeyError("tx slot 'service' must be specified when 'frame.service' missing and default not set")
if slotPowerdBm != None and \
framePowerdBm == None and \
self._event.powerdBm == None:
raise KeyError("tx slot 'power' must be specified when 'frame.power' missing and default not set")
elif slotType == "rx":
if slotDataRatebps != None or \
slotClass != None or \
slotPowerdBm != None or \
slotDestination != None:
raise KeyError("rx slot cannot have 'datarate', 'service', 'power' and/or 'destination'")
if slotFrequencyHz == None and \
frameFrequencyHz == None and \
self._event.frequencyHz == None:
raise KeyError("rx slot 'frequency' must be specified when 'frame.frequency' missing and default not set")
elif slotType == "idle":
if slotFrequencyHz != None or \
slotDataRatebps != None or \
slotClass != None or \
slotPowerdBm != None:
raise ValueError("idle slot cannot have 'frequency', 'datarate', 'service', 'power', and/or 'destination'")
else:
raise KeyError("missing 'type'")
if frameIndex in self._frames:
frame = self._frames[frameIndex].frame
else:
frame = self._event.frames.add()
self._frames[frameIndex] = TDMAScheduleEvent._FrameEntry(frame,set())
frame.index = frameIndex
if frameFrequencyHz != None:
frame.frequencyHz = frameFrequencyHz
if frameDataRatebps != None:
frame.dataRatebps = frameDataRatebps
if frameClass != None:
frame.serviceClass = frameClass
if framePowerdBm != None:
frame.powerdBm = framePowerdBm
slot = frame.slots.add()
slot.index = slotIndex
if slotType == "tx":
slot.type = tdmascheduleevent_pb2.TDMAScheduleEvent.Frame.Slot.SLOT_TX
if slotFrequencyHz != None:
slot.tx.frequencyHz = slotFrequencyHz
if slotDataRatebps != None:
slot.tx.dataRatebps = slotDataRatebps
if slotClass != None:
slot.tx.serviceClass = slotClass
if slotPowerdBm != None:
slot.tx.powerdBm = slotPowerdBm
if slotDestination != None:
slot.tx.destination = slotDestination
elif slotType == "rx":
slot.type = tdmascheduleevent_pb2.TDMAScheduleEvent.Frame.Slot.SLOT_RX
if slotFrequencyHz != None:
| |
"""
SPC Statistical Process Control provides means to monitor process behaviour
using statistical tools defined by Shewhart and others. The process run is shown
as Quality Control Charts (QCC).
Author: <NAME> <<EMAIL>>
License: MIT
"""
"""
Edited by <NAME>
Added: Missing Rules
Some changes
Changepoint is added, when changepoints are set, the calculations are made in the intervals individually
"""
import numpy as np
CHART_X_BAR_R_X = "x_bar R - X"
CHART_X_BAR_R_R = "x_bar R - R"
CHART_X_BAR_S_X = "x_bar S - X"
CHART_X_BAR_S_S = "x_bar S - S"
CHART_X_MR_X = "X mR - X"
CHART_X_MR_MR = "X mR - mR"
CHART_P = "p"
CHART_NP = "np"
CHART_C = "c"
CHART_U = "u"
CHART_EWMA = "EWMA"
CHART_CUSUM = "CUSUM"
CHART_THREE_WAY = "three way"
CHART_TIME_SERIES = "time series"
RULES_1_BEYOND_3SIGMA = "1 beyond 3*sigma"
RULES_2_OF_3_BEYOND_2SIGMA = "2 of 3 beyond 2*sigma"
RULES_4_OF_5_BEYOND_1SIGMA = "4 of 5 beyond 1*sigma"
RULES_7_ON_ONE_SIDE = "7 on one side"
RULES_8_ON_ONE_SIDE = "8 on one side"
RULES_9_ON_ONE_SIDE = "9 on one side"
RULES_6_TRENDING = "6 trending"
RULES_14_UP_DOWN = "14 up down"
RULES_15_BELOW_1SIGMA = "15 below 1*sigma"
RULES_8_BEYOND_1SIGMA_BOTH_SIDES = "8 beyond 1*sigma on both sides"
RULES_BASIC = [RULES_1_BEYOND_3SIGMA,
RULES_7_ON_ONE_SIDE]
RULES_PMI = [RULES_1_BEYOND_3SIGMA,
RULES_8_ON_ONE_SIDE]
RULES_WECO = [RULES_1_BEYOND_3SIGMA,
RULES_2_OF_3_BEYOND_2SIGMA,
RULES_4_OF_5_BEYOND_1SIGMA,
RULES_8_ON_ONE_SIDE,
RULES_6_TRENDING, RULES_14_UP_DOWN]
RULES_NELSON = [RULES_1_BEYOND_3SIGMA,
RULES_9_ON_ONE_SIDE,
RULES_6_TRENDING,
RULES_14_UP_DOWN,
RULES_2_OF_3_BEYOND_2SIGMA,
RULES_4_OF_5_BEYOND_1SIGMA,
RULES_15_BELOW_1SIGMA,
RULES_8_BEYOND_1SIGMA_BOTH_SIDES]
RULES_ALL = [RULES_1_BEYOND_3SIGMA,
RULES_2_OF_3_BEYOND_2SIGMA,
RULES_4_OF_5_BEYOND_1SIGMA,
RULES_7_ON_ONE_SIDE,
RULES_8_ON_ONE_SIDE,
RULES_9_ON_ONE_SIDE,
RULES_6_TRENDING,
RULES_14_UP_DOWN,
RULES_15_BELOW_1SIGMA,
RULES_8_BEYOND_1SIGMA_BOTH_SIDES]
def test_beyond_limits(data, center, lcl, ucl):
return data[0] > ucl or data[0] < lcl
def test_violating_runs(data, center, lcl, ucl):
for i in range(1, len(data)):
if (data[i-1] - center)*(data[i] - center) < 0:
return False
return True
def test_beyond_2_sigma(data, center, lcl, ucl):
cnt = 0
for i in range(len(data)):
if data[i] > center+(ucl-center)*2/3:
cnt +=1
if cnt>1:
return True
cnt = 0
for i in range(len(data)):
if data[i] < center-(center-lcl)*2/3:
cnt +=1
if cnt>1:
return True
return False
def test_beyond_1_sigma(data, center, lcl, ucl):
cnt = 0
for i in range(len(data)):
if data[i] > center+(ucl-center)/3:
cnt +=1
if cnt>3:
return True
cnt = 0
for i in range(len(data)):
if data[i] < center-(center-lcl)/3:
cnt +=1
if cnt>3:
return True
return False
def test_below_1_sigma(data, center, lcl, ucl):
for i in range(len(data)):
if data[i] > center+(ucl-center)/3 and data[i] > center:
return False
for i in range(len(data)):
if data[i] < center-(center-lcl)/3 and data[i] < center:
return False
return True
def test_trending(data, center, lcl, ucl):
if data[1] > data[0]:
for i in range(1, len(data)-1):
if data[i+1] <= data[i]:
return False
if data[1] < data[0]:
for i in range(1, len(data)-1):
if data[i+1] >= data[i]:
return False
if data[1] != data[0]:
return True
return False
def test_up_down(data, center, lcl, ucl):
for i in range(len(data)-2):
if data[i+1] < data[i]:
if data[i+2] < data[i+1]:
return False
if data[i+1] > data[i]:
if data[i+2] > data[i+1]:
return False
return True
def test_beyond_1_sigma_both_sides(data, center, lcl, ucl):
for i in range(len(data)):
if data[i] < center+(ucl-center)/3 and data[i] > center-(center-lcl)/3:
return False
return True
# n 2 3 4 5 6 7 8 9 10
A2 = [0, 0, 1.880, 1.023, 0.729, 0.577, 0.483, 0.419, 0.373, 0.337, 0.308]
D3 = [0, 0, 0, 0, 0, 0, 0, 0.076, 0.136, 0.184, 0.223]
D4 = [0, 0, 3.267, 2.575, 2.282, 2.115, 2.004, 1.924, 1.864, 1.816, 1.777]
# n 0 1 2 3 4 5 6 7 8 9 10
# 11 12 13 14 15 20 25
c4 = [0, 0, 0.7979, 0.8862, 0.9213, 0.9400, 0.9515, 0.9594, 0.9650,
0.9693, 0.9727, 0.9754, 0.9776, 0.9794, 0.9810, 0.9823] # 0.9869, 0.9896]
B3 = [0, 0, 0, 0, 0, 0, 0.030, 0.118, 0.185, 0.239, 0.284, 0.321,
0.354, 0.382, 0.406, 0.428] # 0.510, 0.565]
B4 = [0, 0, 3.267, 2.568, 2.266, 2.089, 1.970, 1.882, 1.815, 1.761,
1.716, 1.679, 1.646, 1.618, 1.594, 1.572] # 1.490, 1.435]
B5 = [0, 0, 0, 0, 0, 0, 0.029, 0.113, 0.179, 0.232, 0.276, 0.313,
0.346, 0.374, 0.399, 0.421] # 0.504, 0.559]
B6 = [0, 0, 2.606, 2.276, 2.088, 1.964, 1.874, 1.806, 1.751, 1.707,
1.669, 1.637, 1.610, 1.585, 1.563, 1.544] # 1.470, 1.420]
A3 = [0, 0, 2.659, 1.954, 1.628, 1.427, 1.287, 1.182, 1.099, 1.032,
0.975, 0.927, 0.886, 0.850, 0.817, 0.789] # 0.680, 0.606]
def get_stats_x_mr_x(data, size):
assert size == 1
center = np.mean(data)
sd = 0
for i in range(len(data)-1):
sd += abs(data[i] - data[i+1])
sd /= len(data) - 1
d2 = 1.128
lcl = center - 3*sd/d2
ucl = center + 3*sd/d2
return center, lcl, ucl
def get_stats_x_mr_mr(data, size):
assert size == 1
sd = 0
for i in range(len(data)-1):
sd += abs(data[i] - data[i+1])
sd /= len(data) - 1
d2 = 1.128
center = sd
lcl = 0
ucl = center + 3*sd/d2
return center, lcl, ucl
def get_stats_x_bar_r_x(data, size):
n = size
assert n >= 2
assert n <= 10
r_sum = 0
for xset in data:
assert len(xset) == n
r_sum += max(xset) - min(xset)
r_bar = r_sum / len(data)
x_bar = np.mean(data)
center = x_bar
lcl = center - A2[n]*r_bar
ucl = center + A2[n]*r_bar
return center, lcl, ucl
def get_stats_x_bar_r_r(data, size):
n = size
assert n >= 2
assert n <= 10
r_sum = 0
for xset in data:
assert len(xset) == n
r_sum += max(xset) - min(xset)
r_bar = r_sum / len(data)
center = r_bar
lcl = D3[n]*r_bar
ucl = D4[n]*r_bar
return center, lcl, ucl
def get_stats_x_bar_s_x(data, size):
n = size
assert n >= 2
assert n <= 10
s_bar = np.mean(np.std(data, 1, ddof=1))
x_bar = np.mean(data)
center = x_bar
lcl = center - A3[n]*s_bar
ucl = center + A3[n]*s_bar
return center, lcl, ucl
def get_stats_x_bar_s_s(data, size):
n = size
assert n >= 2
assert n <= 10
s_bar = np.mean(np.std(data, 1, ddof=1))
center = s_bar
lcl = B3[n]*s_bar
ucl = B4[n]*s_bar
return center, lcl, ucl
def get_stats_p(data, size):
n = size
assert n > 1
pbar = float(sum(data)) / (n * len(data))
sd = np.sqrt(pbar*(1-pbar)/n)
center = pbar
lcl = center - 3*sd
if lcl < 0:
lcl = 0
ucl = center + 3*sd
if ucl > 1:
ucl = 1.0
return center, lcl, ucl
def get_stats_np(data, size):
n = size
assert n > 1
pbar = float(sum(data)) / (n * len(data))
sd = np.sqrt(n*pbar*(1-pbar))
center = n*pbar
lcl = center - 3*sd
if lcl < 0:
lcl = 0
ucl = center + 3*sd
if ucl > n:
ucl = n
return center, lcl, ucl
def get_stats_c(data, size):
cbar = np.mean(data)
center = cbar
lcl = center - 3*np.sqrt(cbar)
if lcl < 0:
lcl = 0
ucl = center + 3*np.sqrt(cbar)
return center, lcl, ucl
def get_stats_u(data, size):
n = size
assert n > 1
cbar = float(sum(data))/(len(data)*n)
center = cbar
lcl = center - 3*np.sqrt(cbar/n)
if lcl < 0:
lcl = 0
ucl = center + 3*np.sqrt(cbar/n)
return center, lcl, ucl
def get_stats_cusum(data, size):
"""
Find the data for a cusum graph
Only returns 0 as the center as the data is moved
its mean and ucl and lcl are not reported
"""
return 0, None, None
def prepare_data_none(data, size):
return data
def prepare_data_x_bar_rs_x(data, size):
data2 = []
for xset in data:
data2.append(np.mean(xset))
return data2
def prepare_data_x_bar_r_r(data, size):
data2 = []
for xset in data:
data2.append(max(xset) - min(xset))
return data2
def prepare_data_x_bar_s_s(data, size):
data2 = []
for xset in data:
data2.append(np.std(xset, ddof=1))
return data2
def prepare_data_x_mr(data, size):
data2 = [0]
for i in range(len(data)-1):
data2.append(abs(data[i] - data[i+1]))
return data2
def prepare_data_p(data, size):
data2 = [0]
for d in data:
data2.append(float(d)/size)
return data2
def prepare_data_u(data, size):
data2 = [0]
for d in data:
data2.append(float(d)/size)
return data2
def prepare_data_cusum(data, size, target=None):
"""
Prepares the data for a CUSUM graph
subtracts the mean from each data point
then calculates the culumative sum of each
$S_m=\sum_{i=1}^m (x_i-\mu)$
where $x_i$ is the data point
$\mu$ is the target value
if $\mu is not provided the mean of the sample is used
"""
data2 = []
if target is None:
target = np.mean(data)
for d in data:
data2.append(float(d) - target)
data3 = [sum(data2[:i]) for i in range(len(data2)+1)]
return data3
STATS_FUNCS = {
CHART_X_BAR_R_X: (get_stats_x_bar_r_x, prepare_data_x_bar_rs_x),
CHART_X_BAR_R_R: (get_stats_x_bar_r_r, prepare_data_x_bar_r_r),
CHART_X_BAR_S_X: (get_stats_x_bar_s_x, prepare_data_x_bar_rs_x),
CHART_X_BAR_S_S: (get_stats_x_bar_s_s, prepare_data_x_bar_s_s),
CHART_X_MR_X: (get_stats_x_mr_x, prepare_data_none), ##
CHART_X_MR_MR: (get_stats_x_mr_mr, prepare_data_x_mr), ##
CHART_P: (get_stats_p, prepare_data_p),
CHART_NP: (get_stats_np, prepare_data_none),
CHART_C: (get_stats_c, prepare_data_none), ##
CHART_U: (get_stats_u, prepare_data_u),
CHART_EWMA: (None, prepare_data_none),
CHART_CUSUM: (get_stats_cusum, prepare_data_cusum),
CHART_THREE_WAY: (None, prepare_data_none),
CHART_TIME_SERIES: (None, prepare_data_none)}
RULES_FUNCS = {
RULES_1_BEYOND_3SIGMA: (test_beyond_limits, 1),
RULES_2_OF_3_BEYOND_2SIGMA: (test_beyond_2_sigma, 3),
RULES_4_OF_5_BEYOND_1SIGMA: (test_beyond_1_sigma, 5),
RULES_7_ON_ONE_SIDE: (test_violating_runs, 7),
RULES_8_ON_ONE_SIDE: (test_violating_runs, | |
from scipy.optimize import root
import numpy as np
import cantera as ct
import pandas as pd
from solventx import result_struct as rs
from solventx import utilities
from solventx import config
import operator
import os
class solventx:
coltypes = ['Extraction','Scrub','Strip']
ml2l = 0.001 # mililiter to liter
scale = 1 # volumetric scaling from unit ml/sec
g_p_kg = 1000 # grams to kg
s_p_h = 3600 # seconds to hours
target_conc = 45 #g/L
def __init__(self,config_file,prep_capex=0, prep_opex=0, prep_revenue=0, prep_npv=0):
"""Constructor.
"""
self.confDict = utilities.read_config(config_file)
solventxHome = self.confDict["solventxHome"]
reeComps = self.confDict['compositions']
self.xmlData = self.confDict['xmlData']
self.modulesData = self.confDict['modules']
self.xml = os.path.join(solventxHome,self.xmlData['xml'],self.xmlData['phase']+'_'+''.join(self.modulesData["input"])+'.xml')
# Set required data
self.phase_names = self.confDict["phasenames"] # from xml input file
self.phase = ct.import_phases(self.xml,self.phase_names)
# Derived and/or reusable system parameters
self.column = self.coltypes # Column name
self.solv = self.confDict["solvents"] # solvent list -.i.e., electrolyte, extractant and organic diluent
# ree by modules
self.ree = self.modulesData["input"] #self.REEs[self.moduleID] # (rare earth) metal list
# Cantera indices
self.mix = ct.Mixture(self.phase)
self.aq = self.mix.phase_index(self.phase_names[0])
self.org = self.mix.phase_index(self.phase_names[1])
self.ns = self.mix.n_species
self.naq = self.mix.phase(self.aq).n_species
self.norg = self.mix.phase(self.org).n_species
self.HA_Index = self.mix.species_index(self.org,'(HA)2(org)') # index of extractant in canera species list
self.Hp_Index = self.mix.species_index(self.aq,'H+') # index of H+ in cantera species list
self.Cl_Index = self.mix.species_index(self.aq,'Cl-') # index of Cl in cantera species list
self.canteranames = self.mix.species_names
self.fixed_species = ['H2O(L)','OH-', 'Cl-', 'dodecane']
self.canteravars = [ij for ij in self.canteranames if ij not in self.fixed_species] # 'Cl-',
self.nsy = len(self.canteravars)
self.naqy = len([ij for ij in self.mix.species_names[:self.naq] if ij not in self.fixed_species])
self.norgy = len([ij for ij in self.mix.species_names[self.naq:] if ij not in self.fixed_species])
self.mwre,\
self.mwslv = self.get_mw() # g/mol
self.rhoslv = [1000, 960, 750] # [g/L]
self.upper = [reeComps[i]['upper'] for i in self.ree]
self.lower = [reeComps[i]['lower'] for i in self.ree]
ree_mass = [np.random.uniform(i,j) for i,j in zip(self.lower, self.upper)]
self.get_conc(ree_mass)
self.purity_spec = .99 # not needed?
self.recov_spec = .99 # not needed?
self.revenue = [0,0,0]
self.Ns = [0,0,0]
self.nsp = pd.DataFrame() # feed streams (aq and org) for each column
self.nsp0 = pd.DataFrame() # feed streams (aq and org) for each column
self.y = {} # all compositions
self.Ns = {}
def get_conc(self,ree_mass):
self.ree_mass = ree_mass
self.vol = sum(self.ree_mass) / self.g_p_kg / self.target_conc # [kg/hr]*[g/kg] /[g/L] = [L/hr]
self.ree_conc = [(ij/sum(self.ree_mass)) * self.target_conc for ij in self.ree_mass] #([kg/hr]/[kg/hr] )* [g/L] = [g/L]
def get_mw(self, conv=ml2l):
""" Initialize parameters for cantera simulation. init() calls this function"""
mx = ct.Mixture(self.phase)
aq = mx.phase_index(self.phase_names[0])
org = mx.phase_index(self.phase_names[1])
mwre = np.zeros(len(self.ree)) # molecular weight of rees
mwslv = np.zeros(len(self.solv)) # mw & densities for 'solvents'
for re in self.ree:
mwre[self.ree.index(re)] = mx.phase(aq).molecular_weights[mx.phase(aq).species_index(re+'+++')]
for so in self.solv:
if so == 'H2O(L)':
mwslv[self.solv.index(so)] = mx.phase(aq).molecular_weights[mx.phase(aq).species_index(so)]
else:
mwslv[self.solv.index(so)] = mx.phase(org).molecular_weights[mx.phase(org).species_index(so)]
return mwre, mwslv
def get_process(self):
"""Get products."""
input_components = self.confDict['modules']['input']
strip_components = self.confDict['modules']['output']['strip']
n_components = len(input_components)
config_key = ''
print(f'Looping through following modules config:{list(config.valid_processes.keys())}')
for key,config_dict in config.valid_processes.items():
if set(input_components) == set(config_dict['input']):
if set(strip_components) == set(config_dict['strip']):
config_key = key
if config_key:
print(f'Found the following process config:{config_key}')
else:
raise ValueError(f'No valid configuration found for input:{input_components},strip:{strip_components}!')
modules = config.valid_processes[config_key]['modules']
x = []
print(f'Process config {config_key}:Input:{input_components},Number of modules:{len(modules)}')
print('Modules info:')
for key,module in modules.items():
x.extend(module['x'])
print(f'Module {key}:{module["strip_group"]}')
print(f'x0:{x}')
self.x = x
self.modules = modules
self.num_input = n_components
self.config_key = config_key
def create_var_space(self, input_feeds=1,): #
var_space = {
'immutable': {},
'mutable': {}, #var, index in obj.variables
}
mod_space = {}
x_space = {}
immutable_var_names = ['mol_frac']
feed_var_names = ['(HA)2(org)']
index = 0
for feed_num in range(input_feeds):
var_space['mutable'][f'{feed_var_names[0]}-{feed_num}'] = index
index += 1
for key, value in self.modules.items():
mod_space[f'module-{key}'] = key
x_space[f'module-{key}'] = value['x']
for i, var in enumerate(value['mvn']):
var_space['mutable'][f'{var}-{key}'] = index
index += 1
for comp_num in range(len(self.confDict['modules']['input'])):
var_space['immutable'][f'{immutable_var_names[0]}-{self.confDict["modules"]["input"][comp_num]}'] = index
index += 1
self.var_space = var_space
mutable = self.var_space['mutable']
immutable = self.var_space['immutable']
self.combined_var_space = combine_dict(mutable, immutable)
self.mod_space = mod_space
self.x_space = x_space
def flow_path_exist(self, var): # Not used in current implementation
'''
Does a proper module exist to connect to var.
var: Extraction-0
returns True if Extraction-1 exists
'''
try:
name, module = var.split('-')
except ValueError: #variable doesnt exist in dictionary
return False
#get next module location
if name == 'Extraction':
next_ = get_next(module, 'left')
elif name == 'Strip':
next_ = get_next(module, 'right')
index = self.combined_var_space.get(f'Extraction-{next_}')
return (False, self.variables[index] > 0)[index != None]
def create_nsp_open(self, name, num ):# g_p_kg=g_p_kg ): #, h0, target, xml, cantera_data, experiments):
""" Create mole flows for column feed streams in a given module. Arranges
them in the dataframe, nsp, in the form that Cantera expects """
nre = np.zeros(len(self.ree)) # REE specie moles
salts = np.zeros(len(self.ree)) # neutral complexe moles
strip_ree = config.valid_processes[self.config_key]['modules'][num]['strip_group']
is_scrub = [1 if re in strip_ree else 0 for re in self.ree]
# Determine if there is a parent column or not
if int(num) > 0: #
nnum = str(int(num)-1)
salts = np.array(self.y['Strip-'+nnum][self.canteravars.index('(HA)2(org)')+1:self.nsy]) # organic exit rees
n_HA = np.array(self.y['Strip-'+nnum][self.canteravars.index('(HA)2(org)')]) # organic exit rees
vol_HA = n_HA / (self.rhoslv[self.solv.index('(HA)2(org)')]/self.mwslv[self.solv.index('(HA)2(org)')])
n_dodec = self.nsp0['Strip-'+nnum][self.canteranames.index('dodecane')]
vol_dodec = n_dodec/(self.rhoslv[self.solv.index('dodecane')]/self.mwslv[self.solv.index('dodecane')])
orgvol = vol_HA + vol_dodec
else:
# Compositions
orgvol = self.orgvol[int(num)]
vol_HA = orgvol * (self.x[self.combined_var_space['(HA)2(org)-0']])
vol_dodec = orgvol - vol_HA
n_HA = vol_HA * self.rhoslv[self.solv.index('(HA)2(org)')]/self.mwslv[self.solv.index('(HA)2(org)')] # [L/hr]*[g/L]/[g/mol] = [mol/hr]
n_dodec = vol_dodec * self.rhoslv[self.solv.index('dodecane')]/self.mwslv[self.solv.index('dodecane')] # [L/hr]*[g/L]/[g/mol] = [mol/hr]
aqvols = [orgvol/(self.x[self.combined_var_space['OA Extraction-'+num]]),orgvol/(self.x[self.combined_var_space['OA Scrub-'+num]]), orgvol/(self.x[self.combined_var_space['OA Strip-'+num]]) ]
for k in range(len(self.column)): # k represents different columns - extraction, scrub or strip
n_H2O = aqvols[k] * self.rhoslv[self.solv.index('H2O(L)')]/self.mwslv[self.solv.index('H2O(L)')] # [L/hr]*[g/L]/[g/mol] = [mol/hr]
n_Hp = aqvols[k] * (self.x[self.combined_var_space['H+ '+self.column[k]+'-'+num]]) # [L/hr]*[g/L]/[g/mol] = [mol/hr]
if k==0: # extraction column:
#check if there's a parent column. if there isn't, use primary feed, otherwise,
# take the corresponding parent aqueous composition data
parent_col = get_parent(self.column[k]+'-'+num) # if a parent module exists for ext column
if parent_col:
myree = np.array(self.y[parent_col][-self.nsy+self.canteravars.index('H+')+1:-self.norgy])
n_H2O = self.nsp[parent_col][self.canteranames.index('H2O(L)')]
n_Hp = self.x[self.combined_var_space['H+ '+self.column[k]+'-'+num]] * n_H2O / (self.rhoslv[self.solv.index('H2O(L)')]/self.mwslv[self.solv.index('H2O(L)')])
for re in self.ree:
nre[self.ree.index(re)] = myree[self.ree.index(re)] #[mol/hr]
else:
for re in self.ree:
nre[self.ree.index(re)] = self.ree_conc[self.ree.index(re)] * aqvols[k] / self.mwre[self.ree.index(re)] # [g/L]/[L/hr]/[g/mol] = [mol/hr]
elif k==1:
for re in self.ree:
nre[self.ree.index(re)] = is_scrub[self.ree.index(re)] * self.x[self.combined_var_space['Recycle-'+num]] * self.ree_conc[self.ree.index(re)] * aqvols[k] / self.mwre[self.ree.index(re)] # [1]*[g/L]/[L/hr]/[g/mol] = [mol/hr]
# 0.05 makes it a small value
else:
for re in self.ree:
nre[self.ree.index(re)] = 0.0
n_Cl = 3*(sum(nre)) + n_Hp # Cl- mole balance, all REEs come in as chlorides from leaching
n_specs = [n_H2O,n_Hp,0,n_Cl]+[ii for ii in nre]+[n_HA,n_dodec] +[ij for ij in salts]
# store in pandas dataframe
self.nsp[self.column[k]+'-'+num] = n_specs
self.nsp0[self.column[k]+'-'+num] = n_specs
def eval_column(self, num, col):
""" This function evaluates the column to compute stream compositions
for all stages """
Ns = int(self.x[self.combined_var_space[col+'-'+num]]) # Ns (number of stages per column)
# if Number of stages is zero, populate with default values
if Ns == 0:
resy = rs.result_struct([0]*len(self.canteravars), None,'No stages', 10000)
self.nsp[col+'-'+num] = [0 for ii in range(len(self.nsp0[col+'-'+num]))]
else:
ycol = self.inity(col,num, Ns) # initialize y (stream vector)
try: #Solve design and check for convergence
resy = root(eColOne, ycol, args=(self, num, col, Ns), method='hybr', options=None) # method='hybr', options=None) #method='df-sane' #options={'disp':True, 'maxfev':15}
except (RuntimeError,EOFError):
raise RuntimeError('Convergence failure in root function!')
return resy
def update_nsp(self, resy, prev_col, num):
col_index = self.column.index(prev_col)+1
if col_index <= 2:
col = self.column[col_index] #self.column.index(col_index)]
self.nsp[col+'-'+num][self.naq:] = [ resy.x[self.canteravars.index('(HA)2(org)')] ] + [self.nsp[col+'-'+num][self.canteranames.index('dodecane')] ]+\
[jk for jk in resy.x[self.canteravars.index('(HA)2(org)')+1:self.nsy]] # org exit from previous column
def evaluate_open(self, x,): #
""" This is the simpler implementation of the process column design
it avoids the need to converge recycle streams. For now, | |
<filename>ramldocgen/generator.py
from xml.sax.saxutils import escape
import pyraml.parser as ramlparser
from .inlines import highlight_inline_js, api_doc_inline_css
from collections import OrderedDict
import re
import sys
import json
no_short_close = ['div', 'span', 'script']
def idfirst(od):
res = OrderedDict()
if 'id' in od:
res['id'] = od['id']
for k, v in od.items():
if k == 'id':
continue
res[k] = v
return res
class HTMLNode(object):
def __init__(self, name, attributes={}, may_short_close=None):
self.name = name
self.attributes = idfirst(OrderedDict([(k, attributes[k]) for k in sorted(attributes)]))
self.children = []
if may_short_close is None:
self.may_short_close = name not in no_short_close
else:
self.may_short_close = may_short_close
self._indent = 0
self._pretty = False
self._debug = False
def append(self, tag):
if not isinstance(tag, HTMLNode):
tag = HTMLText(str(tag))
self.children.append(tag)
return tag
def prepend(self, tag):
if not isinstance(tag, HTMLNode):
tag = HTMLText(str(tag))
self.children.insert(0, tag)
return tag
def extend(self, tags):
for a in tags:
self.append(a)
def render(self, indent=None, pretty=None):
i = self._indent
p = self._pretty
if indent is not None:
self._indent = indent
if pretty is not None:
self._pretty = pretty
ret = str(self)
self._indent = i
self._pretty = p
return ret
def copy(self):
dest = HTMLNode(self.name)
dest.attributes = self.attributes.copy()
dest.children = [c.copy() for c in self.children]
dest.may_short_close = self.may_short_close
return dest
def copy_contents(self):
return [c.copy() for c in self.children]
def is_onlytext(self):
for el in self.children:
if not isinstance(el, HTMLText):
return False
return True
def __str__(self):
indent = self._indent * '\t' if self._pretty else ''
attrib = ''
if len(self.attributes) > 0:
attrib = ' {0}'.format(' '.join(["{0}=\"{1}\"".format(k, escape(self.attributes[k])) for k in self.attributes]))
if len(self.children) == 0 and self.may_short_close:
return "{2}<{0}{1} />".format(self.name, attrib, indent)
else:
if self._pretty:
cjoin = '\n'
else:
cjoin = ''
br = '\n' if self._pretty and not self.is_onlytext() else ''
if self._debug:
print("<{0}>{1}</{0}>".format(self.name, cjoin.join([c.render(self._indent + 1, self._pretty) for c in self.children]), attrib, indent, br, indent if br != '' else ''), file=sys.stderr)
return "{3}<{0}{2}>{4}{1}{4}{5}</{0}>".format(self.name, cjoin.join([c.render(self._indent + 1, self._pretty) for c in self.children]), attrib, indent, br, indent if br != '' else '')
def urlify(text):
return re.sub(r'http(?:s?)://\S+', r'<a href="\g<0>" target="_blank">\g<0></a>', text)
class HTMLText(HTMLNode):
def __init__(self, text):
self.text = text
self._indent = 0
self._pretty = False
def copy(self):
return HTMLText(self.text)
def __str__(self):
return urlify(escape(self.text)).replace('\n', '<br />')
class HTMLScript(HTMLNode):
def __init__(self, text):
self.text = text
self._indent = 0
self._pretty = False
def copy(self):
return HTMLScript(self.text)
def __str__(self):
return self.text
class inline(HTMLScript):
def js(self):
tag = HTMLNode('script', {'type': 'text/javascript'})
tag.append(self)
return tag
def css(self):
tag = HTMLNode('style', {'type': 'text/css'})
tag.append(self)
return tag
class src(HTMLScript):
def js(self):
tag = HTMLNode('script', {'type': 'text/javascript', 'src': self.text})
return tag
def css(self):
tag = HTMLNode('link', {'rel': 'stylesheet', 'href': self.text})
return tag
class meta(object):
def __init__(self, content, name=None, httpequiv=None):
self.node = HTMLNode('meta')
if name is not None:
self.node.attributes['name'] = name
if httpequiv is not None:
self.node.attributes['http-equiv'] = httpequiv
self.node.attributes['content'] = content
def meta(self):
return self.node
def __str__(self):
return str(self.node)
class HTMLTagHead(HTMLNode):
def __init__(self):
super(HTMLTagHead, self).__init__("head")
self.meta = []
self.css = []
self.title = None
self.js = []
def add_inline_js(self, data):
self.js.append(inline(data))
def add_inline_css(self, data):
self.css.append(inline(data))
def add_external_js(self, url):
self.js.append(src(url))
def add_external_css(self, url):
self.css.append(src(url))
def add_named_meta(self, name, content):
self.meta.append(meta(content, name=name))
def add_equiv_meta(self, httpequiv, content):
self.meta.append(meta(content, httpequiv=httpequiv))
def __str__(self):
self.attributes = {}
self.children = []
if self.title is not None:
t = HTMLNode('title')
t.append(HTMLText(self.title))
self.children.append(t)
self.children.extend([s.meta() for s in self.meta])
self.children.extend([s.css() for s in self.css])
self.children.extend([s.js() for s in self.js])
ret = super(HTMLTagHead, self).__str__()
return ret
def copy(self):
dest = HTMLTagHead()
dest.meta = self.meta.copy()
dest.css = self.css.copy()
dest.title = self.title
dest.js = self.js.copy()
return dest
class HTML(HTMLNode):
def __init__(self):
super(HTML, self).__init__('html')
self.head = HTMLTagHead()
self.body = HTMLNode('body')
def __str__(self):
self.children = [self.head, self.body]
return super(HTML, self).__str__()
def copy(self):
dest = HTML()
dest.head = self.head.copy()
dest.body = self.body.copy()
return dest
def classify(name):
return re.sub(r'/+|\{|\}', '_', re.sub('[^a-zA-Z0-9/{}]', '', name.lstrip('/')))
def collect(root, base=''):
root._collect_path = base
elems = [root]
if root.resources is not None:
for e in root.resources:
elems.extend(collect(root.resources[e], ''.join([base, e])))
return elems
class Generator(object):
def __init__(self, file):
self.raml = ramlparser.load(file)
def generate(self):
doc = HTML()
doc.head.title = self.raml.title
# Add meta tags, CSS and JS sources.
doc.head.add_equiv_meta('X-UA-Compatible', 'IE=edge')
doc.head.add_equiv_meta('Content-Type', 'text/html; charset=utf-8')
doc.head.add_external_css('https://netdna.bootstrapcdn.com/bootstrap/3.1.1/css/bootstrap.min.css')
doc.head.add_external_css('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.1/styles/default.min.css')
doc.head.add_inline_css(api_doc_inline_css)
doc.head.add_external_js('https://code.jquery.com/jquery-1.11.0.min.js')
doc.head.add_external_js('https://netdna.bootstrapcdn.com/bootstrap/3.1.1/js/bootstrap.min.js')
doc.head.add_external_js('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.1/highlight.min.js')
doc.head.add_inline_js(highlight_inline_js)
# Body container and layout.
doc.body.attributes['data-spy'] = 'scroll'
doc.body.attributes['data-target'] = '#sidebar'
container = doc.body.append(HTMLNode('div', {'class': 'container'}))
row = container.append(HTMLNode('div', {'class': 'row'}))
information_wrapper = row.append(HTMLNode('div', {'class': 'col-md-9', 'role': 'main'}))
legend = row.append(HTMLNode('div', {'class': 'col-md-3'})).append(HTMLNode('div', {'id': 'sidebar', 'class': 'hidden-print affix', 'role': 'complementary'})).append(HTMLNode('ul', {'class': 'nav nav-pills nav-stacked'}))
legend_api_wrapper = legend.append(HTMLNode('li'))
legend_api_wrapper.append(HTMLNode('a', {'href': '#_api_endpoints'})).append('API endpoints')
legend_api = legend_api_wrapper.append(HTMLNode('ul', {'class': 'nav nav-stacked nav-pills subnav'}))
# Page header
header = information_wrapper.append(HTMLNode('div', {'class': 'page-header'}))
header_text = header.append(HTMLNode('h1'))
header_text.append('{0} API documentation '.format(self.raml.title))
header_text.append(HTMLNode('small')).append('version {0}'.format(self.raml.version))
header.append(HTMLNode('p')).append(self.raml.baseUri)
# Endpoints container
routes_panel = information_wrapper.append(HTMLNode('div', {'class': 'panel panel-default'}))
routes_panel.append(HTMLNode('div', {'class': 'panel-heading'})).append(HTMLNode('h3', {'class': 'panel-title', 'id': '_api_endpoints'})).append('API Endpoints')
routes = routes_panel.append(HTMLNode('div', {'class': 'panel-body'}))
# The magic happens here.
for group in self.raml.resources:
# Create panel for each "group" (first path component)
panel = routes.append(HTMLNode('div', {'class': 'panel panel-default'}))
groupc = self.raml.resources[group]
panel.append(HTMLNode('div', {'class': 'panel-heading'})).append(HTMLNode('h3', {'id': classify(group), 'class': 'panel-title'})).append(group)
body = panel.append(HTMLNode('div', {'class': 'panel-body'})).append(HTMLNode('div', {'class': 'panel-group'}))
groupclass = classify(group)
# Append this to the legend (stacked pills on the right)
linode = legend_api.append(HTMLNode('li'))
linode.append(HTMLNode('a', {'href': '#{0}'.format(groupclass)})).append(group)
# Endpoints
for ep in collect(groupc, group):
# Insert each endpoint as a panel into the group panel.
path = ep._collect_path
classified = classify(path)
endpoint_wrapper = body.append(HTMLNode('div', {'class': 'panel panel-white'}))
endpoint_details = endpoint_wrapper.append(HTMLNode('div', {'class': 'panel-heading'})).append(HTMLNode('h4', {'class': 'panel-title'}))
endpoint_link = endpoint_details.append(HTMLNode('a', {'class': 'collapsed', 'data-toggle': 'collapse', 'href': '#panel_{0}'.format(classified)}))
# Path colors happen here.
parent, child = path.rsplit('/', 1)
if parent != '':
parent = '/{0}'.format(parent.lstrip('/'))
child = '/{0}'.format(child.lstrip('/'))
endpoint_link.append(HTMLNode('span', {'class': 'parent'})).append(parent)
endpoint_link.append(child)
# Method wrapper (buttons on the right) and the collapsible panel that contains the short descriptions of the methods.
methods = endpoint_details.append(HTMLNode('span', {'class': 'methods'}))
details_panel = endpoint_wrapper.append(HTMLNode('div', {'id': 'panel_{0}'.format(classified), 'class': 'panel-collapse collapse'})).append(HTMLNode('div', {'class': 'panel-body'})).append(HTMLNode('div', {'class': 'list-group'}))
for method in sorted(ep.methods):
# Insert each method into a myriad of places
methodc = ep.methods[method]
method_anchor = '{0}_{1}'.format(classified, method)
# Create the method badge
badge = methods.append(HTMLNode('a', {'href': '#{0}'.format(method_anchor)})).append(HTMLNode('span', {'class': 'badge badge_{0}'.format(method)}))
badge.append('{0} '.format(method))
# Append a lock if the endpoint is securedBy anything
if methodc.securedBy is not None and len(methodc.securedBy) > 0:
badge.append(HTMLNode('span', {'class': 'glyphicon glyphicon-lock', 'title': 'Authentication required'}))
methods.append(' ')
# Create the method panel. The method panel is an entry in the collapsible list of short descriptions.
method_panel = details_panel.append(HTMLNode('div', {'onclick': "window.location.href = '#{0}'".format(method_anchor), 'class': 'list-group-item'}))
method_panel.append(badge.copy())
method_panel.append(HTMLNode('div', {'class': 'method_description'})).append(HTMLNode('p')).append(methodc.description)
method_panel.append(HTMLNode('div', {'class': 'clearfix'}))
# Create the method dialog. The method modal is a dialog that shows up if the method badge is clicked anywhere.
method_dialog = endpoint_wrapper.append(HTMLNode('div', {'class': 'modal fade', 'tabindex': '0', 'id': method_anchor})).append(HTMLNode('div', {'class': 'modal-dialog'})).append(HTMLNode('div', {'class': 'modal-content'}))
# Method dialog header: badge and endpoint
dialog_header = method_dialog.append(HTMLNode('div', {'class': 'modal-header'}))
dialog_header.append(HTMLNode('button', {'class': 'close', 'data-dismiss': 'modal', 'aria-hidden': 'true'})).append('×')
dialog_title = dialog_header.append(HTMLNode('h4', {'class': 'modal-title', 'id': 'myModalLabel'}))
dialog_title.append(badge.copy())
dialog_title.append(' ')
dialog_title.extend(endpoint_link.copy_contents())
# Method dialog body: method description, authentication description, request and response details.
dialog_body = method_dialog.append(HTMLNode('div', {'class': 'modal-body'}))
dialog_body.append(HTMLNode('div', {'class': 'alert alert-info'})).append(HTMLNode('p')).append(methodc.description)
# Append a warning box for each security measure on this endpoint.
if methodc.securedBy is not None and len(methodc.securedBy) > 0:
security_box = dialog_body.append(HTMLNode('div', {'class': 'alert alert-warning'}))
if len(methodc.securedBy) > 1:
security_box.append(HTMLNode('div', {'class': 'authentication top'})).append(HTMLNode('strong')).append('This endpoint may be used with any of the following authentication schemes:')
else:
security_box.append(HTMLNode('div', {'class': 'authentication top'})).append(HTMLNode('strong')).append('This endpoint must be used with the following authentication scheme:')
for security in methodc.securedBy:
if security in self.raml.securitySchemes:
mydiv = security_box.append(HTMLNode('div', {'class': 'authentication'}))
mydiv.append(HTMLNode('span', {'class': 'glyphicon glyphicon-lock', 'title': 'Authentication required'}))
mydiv.append(' Secured by ')
mydiv.append(HTMLNode('a', {'href': '#panel__security_{0}'.format(classify(security))})).append(security)
mydiv.append(HTMLNode('p')).append(self.raml.securitySchemes[security].description)
# Create a tab display where the request and response details will live.
tabs = dialog_body.append(HTMLNode('ul', {'class': 'nav nav-tabs'}))
tab_contents = dialog_body.append(HTMLNode('div', {'class': 'tab-content'}))
first_tab = True
# The request tab is only inserted if list of headers, query parameters or a body example is present in the RAML.
if (methodc.headers is not None and len(methodc.headers) > 0) or (methodc.queryParameters is not None and len(methodc.queryParameters) > 0) or (methodc.body is not None and len(methodc.body) > 0):
first_tab = False
tabs.append(HTMLNode('li', {'class': 'active'})).append(HTMLNode('a', {'href': '#{0}_request'.format(method_anchor), 'data-toggle': 'tab'})).append('Request')
contents = tab_contents.append(HTMLNode('div', {'class': 'tab-pane active', 'id': '{0}_request'.format(method_anchor)}))
# List of headers inserted here.
if methodc.headers is not None and len(methodc.headers) | |
import json
import os
import pytest
import requests
from tests.acceptance.helpers import ENDPOINT_ACTIVATE
from tests.acceptance.helpers import ENDPOINT_CONFIG
from tests.acceptance.helpers import create_and_validate_request_and_response
from tests.acceptance.helpers import sort_response
expected_activate_ab = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_activate_ab_empty_experimentKey = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "",
"variationKey": "",
"type": "",
"enabled": false,
"error": "experimentKey not found"
}
]"""
expected_activate_ab_invalid_experimentKey = """[
{
"userId": "matjaz",
"experimentKey": "invalid exper key",
"featureKey": "",
"variationKey": "",
"type": "",
"enabled": false,
"error": "experimentKey not found"
}
]"""
@pytest.mark.parametrize("experiment_key, expected_response, expected_status_code", [
("ab_test1", expected_activate_ab, 200),
("", expected_activate_ab_empty_experimentKey, 200),
("invalid exper key", expected_activate_ab_invalid_experimentKey, 200),
], ids=["valid case", "empty exper key", "invalid exper key"])
def test_activate__experiment(session_obj, experiment_key, expected_response,
expected_status_code):
"""
Test validates:
1. Presence of correct variation in the returned decision for AB experiment
Instead of on single field (variation, enabled), validation is done on the whole
response (that includes variations and enabled fields).
This is to add extra robustness to the test.
Sort the reponses because dictionaries shuffle order.
:param session_obj: session object
:param experiment_key: experiment_key
:param expected_response: expected_response
:param expected_status_code: expected_status_code
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"experimentKey": experiment_key}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, payload=payload,
params=params)
assert json.loads(expected_response) == resp.json()
assert resp.status_code == expected_status_code, resp.text
resp.raise_for_status()
expected_activate_feat = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
expected_activate_feat_empty_featureKey = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "",
"variationKey": "",
"type": "",
"enabled": false,
"error": "featureKey not found"
}
]"""
expected_activate_feat_invalid_featureKey = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "invalid feat key",
"variationKey": "",
"type": "",
"enabled": false,
"error": "featureKey not found"
}
]"""
@pytest.mark.parametrize("feature_key, expected_response, expected_status_code", [
("feature_1", expected_activate_feat, 200),
("", expected_activate_feat_empty_featureKey, 200),
("invalid feat key", expected_activate_feat_invalid_featureKey, 200),
], ids=["valid case", "empty feat key", "invalid feat key"])
def test_activate__feature(session_obj, feature_key, expected_response,
expected_status_code):
"""
Test validates:
That feature is enabled in the decision for the feature test
Instead of on single field (variation, enabled), validation is done on the whole
response (that includes variations and enabled fields).
This is to add extra robustness to the test.
Sort the reponses because dictionaries shuffle order.
:param session_obj: session object
:param feature_key: API request feature key
:param expected_response: API expected response
:param expected_status_code: API response expected status code
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"featureKey": feature_key}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, payload=payload,
params=params)
if isinstance(resp.json(), dict) and resp.json()['error']:
with pytest.raises(requests.exceptions.HTTPError):
assert resp.json() == json.loads(expected_response)
assert resp.status_code == expected_status_code, resp.text
resp.raise_for_status()
assert json.loads(expected_response) == resp.json()
assert resp.status_code == expected_status_code, resp.text
expected_activate_type_exper = """[
{
"userId": "matjaz",
"experimentKey": "feature_2_test",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_activate_type_feat = """[
{
"userId": "matjaz",
"experimentKey": "feature_2_test",
"featureKey": "feature_2",
"variationKey": "variation_1",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_3",
"variationKey": "",
"type": "feature",
"enabled": false
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_4",
"variationKey": "",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_5",
"variationKey": "",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
@pytest.mark.parametrize("decision_type, expected_response, expected_status_code, bypass_validation_request", [
("experiment", expected_activate_type_exper, 200, False),
("feature", expected_activate_type_feat, 200, False),
("invalid decision type", {'error': 'type "invalid decision type" not supported'}, 400, True),
("", {'error': 'type "" not supported'}, 400, True)
], ids=["experiment decision type", "feature decision type", "invalid decision type", "empty decision type"])
def test_activate__type(session_obj, decision_type, expected_response,
expected_status_code, bypass_validation_request):
"""
Test cases:
1. Get decisions with "experiment" type
2. Get decisions with "feature" type
3. Get empty list when non-existent decision type -> bug OASIS-6031
:param session_obj: session object
:param decision_type: parameterized decision type
:param expected_response: expected response
:param bypass_validation: option to bypass schema validation
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"type": decision_type}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, bypass_validation_request,
payload=payload, params=params)
if decision_type in ['experiment', 'feature']:
sorted_actual = sort_response(
resp.json(), 'experimentKey', 'featureKey')
sorted_expected = sort_response(json.loads(expected_response), 'experimentKey',
'featureKey')
assert sorted_actual == sorted_expected
elif resp.json()['error']:
with pytest.raises(requests.exceptions.HTTPError):
assert resp.json() == expected_response
resp.raise_for_status()
def test_activate_403(session_override_sdk_key):
"""
Test that 403 Forbidden is returned. We use invalid SDK key to trigger 403.
:param session_override_sdk_key: sdk key to override the session using invalid sdk key
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"type": "experiment"}
with pytest.raises(requests.exceptions.HTTPError):
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_override_sdk_key,
payload=payload, params=params)
assert resp.status_code == 403
assert resp.json()['error'] == 'unable to fetch fresh datafile (consider ' \
'rechecking SDK key), status code: 403 Forbidden'
resp.raise_for_status()
@pytest.mark.parametrize(
"experiment, disableTracking, expected_status_code, bypass_validation_request", [
("ab_test1", "true", 200, False),
("ab_test1", "false", 200, False),
("feature_2_test", "true", 200, False),
("feature_2_test", "false", 200, False),
("ab_test1", "", 200, True),
("ab_test1", "invalid_boolean", 200, True),
], ids=["ab_experiment and decision_tr true", "ab_experiment and decision_tr false",
"feature test and decision_tr true",
"feature test and decision_tr false", "empty disableTracking",
"invalid disableTracking"])
def test_activate__disable_tracking(session_obj, experiment, disableTracking,
expected_status_code, bypass_validation_request):
"""
Setting to true will disable impression tracking for ab experiments and feature tests.
It's equivalent to previous "get_variation".
Can not test it in acceptance tests. Just testing basic status code.
FS compatibility test suite uses proxy event displatcher where they test this by
validating that event was not sent.
:param session_obj: session fixture
:param experiment: ab experiment or feature test
:param disableTracking: true or false
:param expected_status_code
:param bypass_validation: option to bypass schema validation
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {
"experimentKey": experiment,
"disableTracking": disableTracking
}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, bypass_validation_request,
payload=payload, params=params)
resp.raise_for_status()
assert resp.status_code == expected_status_code
expected_enabled_true_all_true = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_enabled_true_feature_off = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_enabled_false_feature_on = """[]"""
expected_enabled_false_feature_off = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_3",
"variationKey": "",
"type": "feature",
"enabled": false
}
]"""
expected_enabled_empty = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
expected_enabled_invalid = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
@pytest.mark.parametrize(
"enabled, experimentKey, featureKey, expected_response, expected_status_code, bypass_validation_request", [
("true", "ab_test1", "feature_1", expected_enabled_true_all_true, 200, False),
("true", "ab_test1", "feature_3", expected_enabled_true_feature_off, 200, False),
("false", "ab_test1", "feature_1", expected_enabled_false_feature_on, 200, False),
("false", "ab_test1", "feature_3", expected_enabled_false_feature_off, 200, False),
("", "ab_test1", "feature_1", expected_enabled_empty, 200, True),
("invalid for enabled", "ab_test1",
"feature_1", expected_enabled_invalid, 200, True)
], ids=["enabled true, all true", "enabled true, feature off",
"enabled false, feature on",
"enabled false, feature off", "empty value for enabled",
"invalid value for enabled"])
def test_activate__enabled(session_obj, enabled, experimentKey, featureKey,
expected_response, expected_status_code, bypass_validation_request):
"""
Filter the activation response to return only enabled decisions.
Value for enabled key needs to be a string: "true" or "false"
- feature_1 feature is enabled - should not appear in response when enabled is set to False
- feature_3 feature is not enabled in the project - should not appear in the project when enabled is True
:param session_obj: session fixture
:param enabled: boolean is feature enabled
:param experimentKey: experiment key
:param featureKey: feature key
:param expected_response: API expected response
:param expected_status_code: expected status code
:param bypass_validation: option to bypass schema validation
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {
"experimentKey": experimentKey,
"featureKey": featureKey,
"enabled": enabled
}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, bypass_validation_request,
payload=payload, params=params)
actual_response = sort_response(resp.json(), 'experimentKey', 'featureKey')
expected_response = sort_response(json.loads(expected_response), 'experimentKey',
'featureKey')
assert actual_response == expected_response
assert resp.status_code == expected_status_code
resp.raise_for_status()
# #######################################################
# MISCELANEOUS ALTERNATIVE TEST CASES
# #######################################################
expected_activate_with_config = """[
{
"userId": "matjaz",
| |
from __future__ import absolute_import, print_function, division
import threading
import time
import traceback
import h2.exceptions
import hyperframe
import six
from h2 import connection
from h2 import events
from six.moves import queue
import netlib.exceptions
from mitmproxy import exceptions
from mitmproxy import models
from mitmproxy.protocol import base
from mitmproxy.protocol import http
import netlib.http
from netlib import tcp
from netlib import basethread
from netlib.http import http2
class SafeH2Connection(connection.H2Connection):
def __init__(self, conn, *args, **kwargs):
super(SafeH2Connection, self).__init__(*args, **kwargs)
self.conn = conn
self.lock = threading.RLock()
def safe_increment_flow_control(self, stream_id, length):
if length == 0:
return
with self.lock:
self.increment_flow_control_window(length)
self.conn.send(self.data_to_send())
with self.lock:
if stream_id in self.streams and not self.streams[stream_id].closed:
self.increment_flow_control_window(length, stream_id=stream_id)
self.conn.send(self.data_to_send())
def safe_reset_stream(self, stream_id, error_code):
with self.lock:
try:
self.reset_stream(stream_id, error_code)
except h2.exceptions.StreamClosedError: # pragma: no cover
# stream is already closed - good
pass
self.conn.send(self.data_to_send())
def safe_update_settings(self, new_settings):
with self.lock:
self.update_settings(new_settings)
self.conn.send(self.data_to_send())
def safe_send_headers(self, is_zombie, stream_id, headers):
# make sure to have a lock
if is_zombie(): # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
self.send_headers(stream_id, headers.fields)
self.conn.send(self.data_to_send())
def safe_send_body(self, is_zombie, stream_id, chunks):
for chunk in chunks:
position = 0
while position < len(chunk):
self.lock.acquire()
if is_zombie(): # pragma: no cover
self.lock.release()
raise exceptions.Http2ProtocolException("Zombie Stream")
max_outbound_frame_size = self.max_outbound_frame_size
frame_chunk = chunk[position:position + max_outbound_frame_size]
if self.local_flow_control_window(stream_id) < len(frame_chunk):
self.lock.release()
time.sleep(0.1)
continue
self.send_data(stream_id, frame_chunk)
try:
self.conn.send(self.data_to_send())
except Exception as e:
raise e
finally:
self.lock.release()
position += max_outbound_frame_size
with self.lock:
if is_zombie(): # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
self.end_stream(stream_id)
self.conn.send(self.data_to_send())
class Http2Layer(base.Layer):
def __init__(self, ctx, mode):
super(Http2Layer, self).__init__(ctx)
self.mode = mode
self.streams = dict()
self.server_to_client_stream_ids = dict([(0, 0)])
self.client_conn.h2 = SafeH2Connection(self.client_conn, client_side=False, header_encoding=False)
# make sure that we only pass actual SSL.Connection objects in here,
# because otherwise ssl_read_select fails!
self.active_conns = [self.client_conn.connection]
def _initiate_server_conn(self):
self.server_conn.h2 = SafeH2Connection(self.server_conn, client_side=True, header_encoding=False)
self.server_conn.h2.initiate_connection()
self.server_conn.send(self.server_conn.h2.data_to_send())
self.active_conns.append(self.server_conn.connection)
def connect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("HTTP2 layer should already have a connection.")
def set_server(self): # pragma: no cover
raise exceptions.Http2ProtocolException("Cannot change server for HTTP2 connections.")
def disconnect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("Cannot dis- or reconnect in HTTP2 connections.")
def next_layer(self): # pragma: no cover
# WebSockets over HTTP/2?
# CONNECT for proxying?
raise NotImplementedError()
def _handle_event(self, event, source_conn, other_conn, is_server):
self.log(
"HTTP2 Event from {}".format("server" if is_server else "client"),
"debug",
[repr(event)]
)
if hasattr(event, 'stream_id'):
if is_server and event.stream_id % 2 == 1:
eid = self.server_to_client_stream_ids[event.stream_id]
else:
eid = event.stream_id
if isinstance(event, events.RequestReceived):
headers = netlib.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid] = Http2SingleStreamLayer(self, eid, headers)
self.streams[eid].timestamp_start = time.time()
self.streams[eid].start()
elif isinstance(event, events.ResponseReceived):
headers = netlib.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].queued_data_length = 0
self.streams[eid].timestamp_start = time.time()
self.streams[eid].response_headers = headers
self.streams[eid].response_arrived.set()
elif isinstance(event, events.DataReceived):
if self.config.body_size_limit and self.streams[eid].queued_data_length > self.config.body_size_limit:
raise netlib.exceptions.HttpException("HTTP body too large. Limit is {}.".format(self.config.body_size_limit))
self.streams[eid].data_queue.put(event.data)
self.streams[eid].queued_data_length += len(event.data)
source_conn.h2.safe_increment_flow_control(event.stream_id, event.flow_controlled_length)
elif isinstance(event, events.StreamEnded):
self.streams[eid].timestamp_end = time.time()
self.streams[eid].data_finished.set()
elif isinstance(event, events.StreamReset):
self.streams[eid].zombie = time.time()
if eid in self.streams and event.error_code == 0x8:
if is_server:
other_stream_id = self.streams[eid].client_stream_id
else:
other_stream_id = self.streams[eid].server_stream_id
if other_stream_id is not None:
other_conn.h2.safe_reset_stream(other_stream_id, event.error_code)
elif isinstance(event, events.RemoteSettingsChanged):
new_settings = dict([(id, cs.new_value) for (id, cs) in six.iteritems(event.changed_settings)])
other_conn.h2.safe_update_settings(new_settings)
elif isinstance(event, events.ConnectionTerminated):
if event.error_code == h2.errors.NO_ERROR:
# Do not immediately terminate the other connection.
# Some streams might be still sending data to the client.
return False
else:
# Something terrible has happened - kill everything!
self.client_conn.h2.close_connection(
error_code=event.error_code,
last_stream_id=event.last_stream_id,
additional_data=event.additional_data
)
self.client_conn.send(self.client_conn.h2.data_to_send())
self._kill_all_streams()
return False
elif isinstance(event, events.PushedStreamReceived):
# pushed stream ids should be unique and not dependent on race conditions
# only the parent stream id must be looked up first
parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
with self.client_conn.h2.lock:
self.client_conn.h2.push_stream(parent_eid, event.pushed_stream_id, event.headers)
self.client_conn.send(self.client_conn.h2.data_to_send())
headers = netlib.http.Headers([[str(k), str(v)] for k, v in event.headers])
self.streams[event.pushed_stream_id] = Http2SingleStreamLayer(self, event.pushed_stream_id, headers)
self.streams[event.pushed_stream_id].timestamp_start = time.time()
self.streams[event.pushed_stream_id].pushed = True
self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
self.streams[event.pushed_stream_id].timestamp_end = time.time()
self.streams[event.pushed_stream_id].request_data_finished.set()
self.streams[event.pushed_stream_id].start()
elif isinstance(event, events.PriorityUpdated):
stream_id = event.stream_id
if stream_id in self.streams.keys() and self.streams[stream_id].server_stream_id:
stream_id = self.streams[stream_id].server_stream_id
depends_on = event.depends_on
if depends_on in self.streams.keys() and self.streams[depends_on].server_stream_id:
depends_on = self.streams[depends_on].server_stream_id
# weight is between 1 and 256 (inclusive), but represented as uint8 (0 to 255)
frame = hyperframe.frame.PriorityFrame(stream_id, depends_on, event.weight - 1, event.exclusive)
self.server_conn.send(frame.serialize())
elif isinstance(event, events.TrailersReceived):
raise NotImplementedError()
return True
def _cleanup_streams(self):
death_time = time.time() - 10
for stream_id in self.streams.keys():
zombie = self.streams[stream_id].zombie
if zombie and zombie <= death_time:
self.streams.pop(stream_id, None)
def _kill_all_streams(self):
for stream in self.streams.values():
if not stream.zombie:
stream.zombie = time.time()
stream.request_data_finished.set()
stream.response_arrived.set()
stream.data_finished.set()
def __call__(self):
if self.server_conn:
self._initiate_server_conn()
preamble = self.client_conn.rfile.read(24)
self.client_conn.h2.initiate_connection()
self.client_conn.h2.receive_data(preamble)
self.client_conn.send(self.client_conn.h2.data_to_send())
try:
while True:
r = tcp.ssl_read_select(self.active_conns, 1)
for conn in r:
source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
is_server = (conn == self.server_conn.connection)
with source_conn.h2.lock:
try:
raw_frame = b''.join(http2.framereader.http2_read_raw_frame(source_conn.rfile))
except:
# read frame failed: connection closed
self._kill_all_streams()
return
incoming_events = source_conn.h2.receive_data(raw_frame)
source_conn.send(source_conn.h2.data_to_send())
for event in incoming_events:
if not self._handle_event(event, source_conn, other_conn, is_server):
# connection terminated: GoAway
self._kill_all_streams()
return
self._cleanup_streams()
except Exception as e:
self.log(repr(e), "info")
self.log(traceback.format_exc(), "debug")
self._kill_all_streams()
class Http2SingleStreamLayer(http._HttpTransmissionLayer, basethread.BaseThread):
def __init__(self, ctx, stream_id, request_headers):
super(Http2SingleStreamLayer, self).__init__(
ctx, name="Http2SingleStreamLayer-{}".format(stream_id)
)
self.zombie = None
self.client_stream_id = stream_id
self.server_stream_id = None
self.request_headers = request_headers
self.response_headers = None
self.pushed = False
self.request_data_queue = queue.Queue()
self.request_queued_data_length = 0
self.request_data_finished = threading.Event()
self.response_arrived = threading.Event()
self.response_data_queue = queue.Queue()
self.response_queued_data_length = 0
self.response_data_finished = threading.Event()
@property
def data_queue(self):
if self.response_arrived.is_set():
return self.response_data_queue
else:
return self.request_data_queue
@property
def queued_data_length(self):
if self.response_arrived.is_set():
return self.response_queued_data_length
else:
return self.request_queued_data_length
@property
def data_finished(self):
if self.response_arrived.is_set():
return self.response_data_finished
else:
return self.request_data_finished
@queued_data_length.setter
def queued_data_length(self, v):
self.request_queued_data_length = v
def is_zombie(self):
return self.zombie is not None
def read_request(self):
self.request_data_finished.wait()
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
authority = self.request_headers.get(':authority', '')
method = self.request_headers.get(':method', 'GET')
scheme = self.request_headers.get(':scheme', 'https')
path = self.request_headers.get(':path', '/')
self.request_headers.clear(":method")
self.request_headers.clear(":scheme")
self.request_headers.clear(":path")
host = None
port = None
if path == '*' or path.startswith("/"):
first_line_format = "relative"
elif method == 'CONNECT': # pragma: no cover
raise NotImplementedError("CONNECT over HTTP/2 is not implemented.")
else: # pragma: no cover
first_line_format = "absolute"
# FIXME: verify if path or :host contains what we need
scheme, host, port, _ = netlib.http.url.parse(path)
if authority:
host, _, port = authority.partition(':')
if not host:
host = 'localhost'
if not port:
port = 443 if scheme == 'https' else 80
port = int(port)
data = []
while self.request_data_queue.qsize() > 0:
data.append(self.request_data_queue.get())
data = b"".join(data)
return models.HTTPRequest(
first_line_format,
method,
scheme,
host,
port,
path,
b"HTTP/2.0",
self.request_headers,
data,
timestamp_start=self.timestamp_start,
timestamp_end=self.timestamp_end,
)
def read_request_body(self, request): # pragma: no cover
raise NotImplementedError()
def send_request(self, message):
if self.pushed:
# nothing to do here
return
while True:
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
self.server_conn.h2.lock.acquire()
max_streams = self.server_conn.h2.remote_settings.max_concurrent_streams
if self.server_conn.h2.open_outbound_streams + 1 >= max_streams:
# wait until we get a free slot for a new outgoing stream
self.server_conn.h2.lock.release()
time.sleep(0.1)
continue
# keep the lock
break
# We must not assign a stream id if we are already a zombie.
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
self.server_stream_id = self.server_conn.h2.get_next_available_stream_id()
self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
headers = message.headers.copy()
headers.insert(0, ":path", message.path)
headers.insert(0, ":method", message.method)
headers.insert(0, ":scheme", message.scheme)
self.server_stream_id = self.server_conn.h2.get_next_available_stream_id()
self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
try:
self.server_conn.h2.safe_send_headers(
self.is_zombie,
self.server_stream_id,
headers,
)
except Exception as e:
raise e
finally:
self.server_conn.h2.lock.release()
self.server_conn.h2.safe_send_body(
self.is_zombie,
self.server_stream_id,
message.body
)
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
def read_response_headers(self):
self.response_arrived.wait()
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
status_code = int(self.response_headers.get(':status', 502))
headers = self.response_headers.copy()
headers.clear(":status")
return models.HTTPResponse(
http_version=b"HTTP/2.0",
status_code=status_code,
reason='',
headers=headers,
content=None,
timestamp_start=self.timestamp_start,
timestamp_end=self.timestamp_end,
)
def read_response_body(self, request, response):
while True:
try:
yield self.response_data_queue.get(timeout=1)
except queue.Empty:
pass
if self.response_data_finished.is_set():
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
while self.response_data_queue.qsize() > 0:
yield self.response_data_queue.get()
break
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
def send_response_headers(self, response):
headers = response.headers.copy()
headers.insert(0, ":status", str(response.status_code))
with self.client_conn.h2.lock:
self.client_conn.h2.safe_send_headers(
self.is_zombie,
self.client_stream_id,
headers
)
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
def send_response_body(self, _response, chunks):
self.client_conn.h2.safe_send_body(
self.is_zombie,
self.client_stream_id,
chunks
)
if self.zombie: # pragma: no cover
raise exceptions.Http2ProtocolException("Zombie Stream")
def check_close_connection(self, flow):
# This layer only handles a single stream.
# RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
return True
def set_server(self, *args, **kwargs): # pragma: | |
get children sections of root
for s in sections:
txt += u"""<section>\n<sectionLabel> {0} </sectionLabel>\n""".format(s)
markups = self.getSectionMarkups(s)
for m in markups:
txt += u"<sentence>\n<sentenceNumber> %d </sentenceNumber>\n<sentenceOffset> %d </sentenceOffset></sentence>\n%s"%(
(m[0],sentenceOffsets[m[0]],m[1].getXML()))
txt += u"""</section>\n"""
return ConTextDocumentXMLSkel.format(txt)
def __unicode__(self):
txt = u'_'*42+"\n"
return txt
def __str__(self):
return self.__unicode__()
def __repr__(self):
return self.__unicode__()#.encode('utf-8')
def getConTextModeNodes(self,mode):
"""
Deprecated. This functionality should be accessed via the ConTextMarkup object now returned from getDocumentGraph()
"""
print("This function is deprecated and will be eliminated shortly.")
print("The same functionality can be accessed through the ConTextMarkup object returned from getDocumentGraph()")
nodes = [n[0] for n in self.__documentGraph.nodes(data=True) if n[1]['category'] == mode]
nodes.sort()
return nodes
def computeDocumentGraph(self, verbose=False):
"""Create a single document graph from the union of the graphs created
for each sentence in the archive. Note that the algorithm in NetworkX
is different based on whether the Python version is greater than or
equal to 2.6"""
# Note that this as written does not include the currentGraph in the DocumentGraph
# Maybe this should be changed
self.__documentGraph = ConTextMarkup()
if verbose:
print("Document markup has {0d} edges".format(self.__document.number_of_edges()))
markups = [e[1] for e in self.__document.edges(data=True) if e[2].get('category') == 'markup']
if verbose:
print("Document markup has {0d} conTextMarkup objects".format(len(markups)))
ic = 0
for i in range(len(markups)):
#for m in markups:
m = markups[i]
if verbose:
print("markup {0d} has {1d} total items including {2d} targets".format(i,m.number_of_nodes(),m.getNumMarkedTargets()))
self.__documentGraph = nx.union(m,self.__documentGraph)
if verbose:
print("documentGraph now has {0d} nodes".format(self.__documentGraph.number_of_nodes()))
class ConTextMarkup(nx.DiGraph):
"""
base class for context document.
build around markedTargets a list of termObjects representing desired terms
found in text and markedModifiers, tagObjects found in the text
"""
def __init__(self,txt=u'',unicodeEncoding='utf-8'):
"""txt is the string to parse"""
# __document capture the document level structure
# for each sentence and then put in the archives when the next sentence
# is processed
super(ConTextMarkup,self).__init__(__txt=None,__rawtxt=txt,__scope=None,__SCOPEUPDATED=False)
self.__document = nx.DiGraph()
self.__document.add_node("top",category="document")
self.__VERBOSE = False
self.__tagID = 0
self.__unicodeEncoding = unicodeEncoding
def getUnicodeEncoding(self):
return self.__unicodeEncoding
def getNextTagID(self):
return uuid.uuid1().int
return u"%06d"%self.__tagID
def toggleVerbose(self):
"""toggles the boolean value for verbose mode"""
self.__VERBOSE = not self.__VERBOSE
def getVerbose(self):
return self.__VERBOSE
def setRawText(self,txt=u''):
"""
sets the current txt to txt and resets the current attributes to empty
values, but does not modify the object archive
"""
if self.getVerbose():
print("Setting text to",txt)
self.graph["__rawTxt"] = txt
self.graph["__txt"] = None
self.graph["__scope"] = None
self.graph["__SCOPEUPDATED"] = False
def getText(self):
return self.graph.get("__txt",u'')
def getScope(self):
return self.graph.get("__scope",u'')
def getScopeUpdated(self):
return self.graph.get("__SCOPEUPDATED")
def getRawText(self):
return self.graph.get("__rawTxt",u'')
def getNumberSentences(self): # !!! Need to rewrite this to match graph
return len(self.__document)
def cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):
"""Need to rename. applies the regular expression scrubbers to rawTxt"""
if stripNonAlphaNumeric:
txt = r1.sub(" ",self.getRawText() )
else:
txt = self.getRawText()
# clean up white spaces
txt = r2.sub(" ",txt)
if stripNumbers:
txt = r3.sub("",txt)
self.graph["__scope"] = (0,len(txt))
self.graph["__txt"] = txt
if self.getVerbose():
print(u"cleaned text is now",self.getText())
return txt
def getXML(self):
nodes = self.nodes(data=True)
nodes.sort()
nodeString = u''
for n in nodes:
attributeString = u''
keys = list(n[1].keys())
keys.sort()
for k in keys:
attributeString += """<{0}> {1} </{2}>\n""".format(k,n[1][k],k)
modificationString = u''
modifiedBy = self.predecessors(n[0])
if modifiedBy:
for m in modifiedBy:
modificationString += u"""<modifiedBy>\n"""
modificationString += u"""<modifyingNode> {0} </modifyingNode>\n""".format(m.getTagID())
modificationString += u"""<modifyingCategory> {0} </modifyingCategory>\n""".format(m.getCategory())
modificationString += u"""</modifiedBy>\n"""
modifies = self.successors(n[0])
if modifies:
for m in modifies:
modificationString += u"""<modifies>\n"""
modificationString += u"""<modifiedNode> {0} </modifiedNode>\n""".format(m.getTagID())
modificationString += u"""</modifies>\n"""
nodeString += nodeXMLSkel.format(attributeString+"{0}".format(n[0].getXML())+modificationString )
edges = self.edges(data=True)
edges.sort()
edgeString = u''
for e in edges:
keys = list(e[2].keys())
keys.sort()
attributeString = u''
for k in keys:
attributeString += """<{0}> {1} </{2}>\n""".format(k,e[2][k],k)
edgeString += "{0}".format(edgeXMLSkel.format(e[0].getTagID(),e[1].getTagID(),attributeString))
return ConTextMarkupXMLSkel.format(xmlScrub(self.getRawText()),xmlScrub(self.getText()),
nodeString,edgeString)
def __unicode__(self):
txt = u'_'*42+"\n"
txt += 'rawText: {0}\n'.format(self.getRawText())
txt += 'cleanedText: {0}\n'.format(self.cleanText())
nodes = [n for n in self.nodes(data=True) if n[1].get('category','') == 'target']
nodes.sort()
for n in nodes:
txt += "*"*32+"\n"
txt += "TARGET: {0}\n".format(n[0].__unicode__())
modifiers = self.predecessors(n[0])
modifiers.sort()
for m in modifiers:
txt += "-"*4+"MODIFIED BY: {0}\n".format(m.__unicode__())
mms = self.predecessors(m)
if mms:
for ms in mms:
txt += "-"*8+"MODIFIED BY: {0}\n".format(ms.__unicode__())
txt += u"_"*42+"\n"
return txt
def __str__(self):
return self.__unicode__()#.encode('utf-8')
def __repr__(self):
return self.__unicode__()#.encode('utf-8')
def getConTextModeNodes(self,mode):
nodes = [n[0] for n in self.nodes(data=True) if n[1]['category'] == mode]
nodes.sort()
return nodes
def updateScopes(self):
"""
update the scopes of all the marked modifiers in the txt. The scope
of a modifier is limited by its own span, the span of modifiers in the
same category marked in the text, and modifiers with rule 'terminate'.
"""
if self.getVerbose():
print(u"updating scopes")
self.__SCOPEUPDATED = True
# make sure each tag has its own self-limited scope
modifiers = self.getConTextModeNodes("modifier")
for modifier in modifiers:
if self.getVerbose():
print(u"old scope for {0} is {1}".format(modifier.__str__(),modifier.getScope()))
modifier.setScope()
if self.getVerbose():
print(u"new scope for {0} is {1}".format(modifier.__str__(),modifier.getScope()))
# Now limit scope based on the domains of the spans of the other
# modifier
for i in range(len(modifiers)-1):
modifier = modifiers[i]
for j in range(i+1,len(modifiers)):
modifier2 = modifiers[j]
if modifier.limitScope(modifier2) and \
modifier2.getRule().lower() == 'terminate':
self.add_edge(modifier2,modifier)
if modifier2.limitScope(modifier) and \
modifier.getRule().lower() == 'terminate':
self.add_edge(modifier,modifier2)
def markItems(self,items, mode="target"):
"""tags the sentence for a list of items
items: a list of contextItems"""
if not items:
return
for item in items:
self.add_nodes_from(self.markItem(item, ConTextMode=mode), category=mode)
def markItem(self,item, ConTextMode="target", ignoreCase=True):
"""
markup the current text with the current item.
If ignoreCase is True (default), the regular expression is compiled with
IGNORECASE."""
if not self.getText():
self.cleanText()
# See if we have already created a regular expression
if not item.getLiteral() in compiledRegExprs:
if not item.getRE():
regExp = r"\b{}\b".format(item.getLiteral())
if self.getVerbose():
print("generating regular expression",regExp)
else:
regExp = item.getRE()
if self.getVerbose():
print("using provided regular expression",regExp)
if ignoreCase:
r = re.compile(regExp, re.IGNORECASE|re.UNICODE)
else:
r = re.compile(regExp,re.UNICODE)
compiledRegExprs[item.getLiteral()] = r
else:
r = compiledRegExprs[item.getLiteral()]
iter = r.finditer(self.getText())
terms=[]
for i in iter:
tO = tagObject(item,ConTextMode, tagid=self.getNextTagID(),
scope = self.getScope())
tO.setSpan(i.span())
tO.setPhrase(i.group())
tO.setMatchedGroupDictionary(i.groupdict())
if self.getVerbose():
print(u"marked item",tO)
terms.append(tO)
return terms
def pruneMarks(self):
"""
prune Marked objects by deleting any objects that lie within the span of
another object. Currently modifiers and targets are treated separately
"""
self.__prune_marks(self.nodes(data=True))
def dropInactiveModifiers(self):
# if self.getVerbose():
# print("### in dropInactiveModifiers.")
# print("Raw:", self.getRawText())
# print(" All modifiers:")
# for n in self.getConTextModeNodes("modifier") :
# print(n,self.degree(n))
# print("All targets ({}):".format(self.getNumMarkedTargets()))
# for n in self.getMarkedTargets() :
# print(n)
if self.getNumMarkedTargets() == 0:
if self.getVerbose():
print("No targets in this sentence; dropping ALL modifiers.")
mnodes = self.getConTextModeNodes("modifier")
else:
mnodes = [ n for n in self.getConTextModeNodes("modifier") if self.degree(n) == 0]
if self.getVerbose() and mnodes:
print(u"dropping the following inactive modifiers")
for mn in mnodes:
print(mn)
self.remove_nodes_from(mnodes)
def pruneModifierRelationships(self):
"""Initially modifiers may be applied to multiple targets. This function
computes the text difference between the modifier and each modified
target and keeps only the minimum distance relationship
Finally, we make sure that there are no self modifying modifiers present (e.g. "free" in
the phrase "free air" modifying the target "free air").
"""
modifiers = self.getConTextModeNodes("modifier")
for m in modifiers:
modifiedBy = self.successors(m)
if modifiedBy and len(modifiedBy) > 1:
minm = min([ (m.dist(mb),mb) for mb in modifiedBy ])
edgs = self.edges(m)
edgs.remove((m,minm[1]))
if self.getVerbose():
print(u"deleting relationship(s)",edgs)
self.remove_edges_from(edgs)
def pruneSelfModifyingRelationships(self):
"""
We make sure that there are no self modifying modifiers present (e.g. "free" in
the phrase "free air" modifying the target "free air").
modifiers = self.getConTextModeNodes("modifier")
"""
modifiers = self.getConTextModeNodes("modifier")
nodesToRemove = []
for m in modifiers:
modifiedBy = self.successors(m)
if modifiedBy:
for mb in modifiedBy:
if self.getVerbose():
print(mb,m,mb.encompasses(m))
if mb.encompasses(m):
nodesToRemove.append(m)
if self.getVerbose():
print("removing the following self modifying nodes",nodesToRemove)
self.remove_nodes_from(nodesToRemove)
def __prune_marks(self, marks):
if len(marks) < 2:
return
# this can surely be done faster
marks.sort()
nodesToRemove = []
for i in range(len(marks)-1):
t1 = marks[i]
if t1[0] not in nodesToRemove:
for j in range(i+1,len(marks)):
t2 = marks[j]
if t1[0].encompasses(t2[0]) and t1[1]['category'] == t2[1]['category']:
nodesToRemove.append(t2[0])
elif t2[0].encompasses(t1[0]) and t2[1]['category'] == t1[1]['category']:
nodesToRemove.append(t1[0])
break
if self.getVerbose():
print(u"pruning the following nodes")
for n in nodesToRemove:
print(n)
self.remove_nodes_from(nodesToRemove)
def dropMarks(self,category="exclusion"):
"""Drop any targets that have the category equal to category"""
if self.getVerbose():
print("in dropMarks")
for n in self.nodes():
print(n.getCategory(),n.isA(category.lower()))
dnodes = [n for n in self.nodes() if n.isA( category | |
bgp_v4_neighbors_output = \
"""
BGP neighbor is 10.0.0.57, remote AS 64600, local AS 65100, external link
Description: ARISTA01T1
Member of peer-group PEER_V4 for session parameters
BGP version 4, remote router ID 172.16.58.3, local router ID 10.1.0.32
BGP state = Established, up for 00:00:39
Last read 00:00:00, Last write 00:00:00
Hold time is 10, keepalive interval is 3 seconds
Configured hold time is 10, keepalive interval is 3 seconds
Neighbor capabilities:
4 Byte AS: advertised and received
AddPath:
IPv4 Unicast: RX advertised IPv4 Unicast and received
Route refresh: advertised and received(new)
Address Family IPv4 Unicast: advertised and received
Hostname Capability: advertised (name: vlab-01,domain name: n/a) not received
Graceful Restart Capability: advertised and received
Remote Restart timer is 300 seconds
Address families by peer:
none
Graceful restart information:
End-of-RIB send: IPv4 Unicast
End-of-RIB received: IPv4 Unicast
Local GR Mode: Restart*
Remote GR Mode: Helper
R bit: False
Timers:
Configured Restart Time(sec): 240
Received Restart Time(sec): 300
IPv4 Unicast:
F bit: False
End-of-RIB sent: Yes
End-of-RIB sent after update: No
End-of-RIB received: Yes
Timers:
Configured Stale Path Time(sec): 360
Configured Selection Deferral Time(sec): 360
Message statistics:
Inq depth is 0
Outq depth is 0
Sent Rcvd
Opens: 2 1
Notifications: 2 2
Updates: 3203 3202
Keepalives: 14 15
Route Refresh: 0 0
Capability: 0 0
Total: 3221 3220
Minimum time between advertisement runs is 0 seconds
For address family: IPv4 Unicast
PEER_V4 peer-group member
Update group 1, subgroup 1
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Inbound path policy configured
Outbound path policy configured
Route map for incoming advertisements is *FROM_BGP_PEER_V4
Route map for outgoing advertisements is *TO_BGP_PEER_V4
6400 accepted prefixes
Connections established 1; dropped 0
Last reset 00:01:01, No AFI/SAFI activated for peer
Local host: 10.0.0.56, Local port: 179
Foreign host: 10.0.0.57, Foreign port: 44731
Nexthop: 10.0.0.56
Nexthop global: fc00::71
Nexthop local: fe80::5054:ff:fea9:41c2
BGP connection: shared network
BGP Connect Retry Timer in Seconds: 10
Estimated round trip time: 20 ms
Read thread: on Write thread: on FD used: 28
"""
bgp_v4_neighbor_invalid = \
"""Error: Bgp neighbor 172.16.31.10 not configured"""
bgp_v4_neighbor_invalid_address = \
"""Error: invalid_address is not valid ipv4 address"""
bgp_v4_neighbor_output_adv_routes = \
"""
BGP table version is 6405, local router ID is 10.1.0.32, vrf id 0
Default local pref 100, local AS 65100
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 0.0.0.0/0 0.0.0.0 0 64600 65534 6666 6667 i
*> 10.1.0.32/32 0.0.0.0 0 32768 i
*> 172.16.58.3/32 0.0.0.0 0 64600 i
*> 172.16.17.32/32 0.0.0.0 0 64600 i
*> 172.16.58.3/32 0.0.0.0 0 64600 i
*> 172.16.31.10/32 0.0.0.0 0 64600 i
*> 192.168.0.0/21 0.0.0.0 0 32768 i
*> 192.168.8.0/25 0.0.0.0 0 64600 65501 i
*> 192.168.8.128/25 0.0.0.0 0 64600 65501 i
*> 192.168.16.0/25 0.0.0.0 0 64600 65502 i
*> 192.168.16.128/25
0.0.0.0 0 64600 65502 i
*> 192.168.24.0/25 0.0.0.0 0 64600 65503 i
*> 192.168.24.128/25
0.0.0.0 0 64600 65503 i
*> 192.168.32.0/25 0.0.0.0 0 64600 65504 i
*> 192.168.32.128/25
0.0.0.0 0 64600 65504 i
*> 192.168.40.0/25 0.0.0.0 0 64600 65505 i
*> 192.168.40.128/25
0.0.0.0 0 64600 65505 i
*> 192.168.48.0/25 0.0.0.0 0 64600 65506 i
*> 192.168.48.128/25
0.0.0.0 0 64600 65506 i
*> 192.168.56.0/25 0.0.0.0 0 64600 65507 i
*> 192.168.56.128/25
0.0.0.0 0 64600 65507 i
"""
bgp_v4_neighbor_output_recv_routes = \
"""
BGP table version is 6405, local router ID is 10.1.0.32, vrf id 0
Default local pref 100, local AS 65100
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 0.0.0.0/0 10.0.0.57 0 64600 65534 6666 6667 i
*> 172.16.58.3/32 10.0.0.57 0 64600 i
*> 192.168.8.0/25 10.0.0.57 0 64600 65501 i
*> 192.168.8.128/25 10.0.0.57 0 64600 65501 i
*> 192.168.16.0/25 10.0.0.57 0 64600 65502 i
*> 192.168.16.128/25
10.0.0.57 0 64600 65502 i
*> 192.168.24.0/25 10.0.0.57 0 64600 65503 i
*> 192.168.24.128/25
10.0.0.57 0 64600 65503 i
*> 192.168.32.0/25 10.0.0.57 0 64600 65504 i
*> 192.168.32.128/25
10.0.0.57 0 64600 65504 i
*> 192.168.40.0/25 10.0.0.57 0 64600 65505 i
*> 192.168.40.128/25
10.0.0.57 0 64600 65505 i
*> 192.168.48.0/25 10.0.0.57 0 64600 65506 i
*> 192.168.48.128/25
10.0.0.57 0 64600 65506 i
*> 192.168.56.0/25 10.0.0.57 0 64600 65507 i
*> 192.168.56.128/25
10.0.0.57 0 64600 65507 i
"""
bgp_v6_neighbors_output = \
"""
BGP neighbor is fc00::72, remote AS 64600, local AS 65100, external link
Description: ARISTA01T1
Member of peer-group PEER_V6 for session parameters
BGP version 4, remote router ID 172.16.58.3, local router ID 10.1.0.32
BGP state = Established, up for 01:06:23
Last read 00:00:02, Last write 00:00:00
Hold time is 10, keepalive interval is 3 seconds
Configured hold time is 10, keepalive interval is 3 seconds
Neighbor capabilities:
4 Byte AS: advertised and received
AddPath:
IPv6 Unicast: RX advertised IPv6 Unicast and received
Route refresh: advertised and received(new)
Address Family IPv6 Unicast: advertised and received
Hostname Capability: advertised (name: vlab-01,domain name: n/a) not received
Graceful Restart Capability: advertised and received
Remote Restart timer is 300 seconds
Address families by peer:
none
Graceful restart information:
End-of-RIB send: IPv6 Unicast
End-of-RIB received: IPv6 Unicast
Local GR Mode: Restart*
Remote GR Mode: Helper
R bit: False
Timers:
Configured Restart Time(sec): 240
Received Restart Time(sec): 300
IPv6 Unicast:
F bit: False
End-of-RIB sent: Yes
End-of-RIB sent after update: No
End-of-RIB received: Yes
Timers:
Configured Stale Path Time(sec): 360
Configured Selection Deferral Time(sec): 360
Message statistics:
Inq depth is 0
Outq depth is 0
Sent Rcvd
Opens: 1 1
Notifications: 0 0
Updates: 3206 3202
Keepalives: 1328 1329
Route Refresh: 0 0
Capability: 0 0
Total: 4535 4532
Minimum time between advertisement runs is 0 seconds
For address family: IPv6 Unicast
PEER_V6 peer-group member
Update group 2, subgroup 2
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Inbound path policy configured
Outbound path policy configured
Route map for incoming advertisements is *FROM_BGP_PEER_V6
Route map for outgoing advertisements is *TO_BGP_PEER_V6
6400 accepted prefixes
Connections established 1; dropped 0
Last reset 01:06:46, Waiting for peer OPEN
Local host: fc00::71, Local port: 59726
Foreign host: fc00::72, Foreign port: 179
Nexthop: 10.0.0.56
Nexthop global: fc00::71
Nexthop local: fe80::5054:ff:fea9:41c2
BGP connection: shared network
BGP Connect Retry Timer in Seconds: 10
Estimated round trip time: 4 ms
Read thread: on Write thread: on FD used: 30
"""
bgp_v6_neighbor_output_adv_routes = \
"""
BGP table version is 6407, local router ID is 10.1.0.32, vrf id 0
Default local pref 100, local AS 65100
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> ::/0 :: 0 64600 65534 6666 6667 i
*> fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128 :: 0 64600 i
*> fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128 :: 0 64600 i
*> fdf8:f53e:61e4::18/128 :: 0 64600 i
*> fdf8:f53e:61e4::18/128 :: 0 64600 i
*> 20c0:a808::/64 :: 0 64600 65501 i
*> 20c0:a808:0:80::/64
:: 0 64600 65501 i
*> 20c0:a810::/64 :: 0 64600 65502 i
*> 20c0:a810:0:80::/64
:: 0 64600 65502 i
*> 20c0:a818::/64 :: 0 64600 65503 i
*> 20c0:a818:0:80::/64
:: 0 64600 65503 i
*> 20c0:a820::/64 :: 0 64600 65504 i
*> 20c0:a820:0:80::/64
:: 0 64600 65504 i
*> 20c0:a828::/64 :: 0 64600 65505 i
*> 20c0:a828:0:80::/64
:: 0 64600 65505 i
*> 20c0:a830::/64 :: 0 64600 65506 i
*> 20c0:a830:0:80::/64
:: 0 64600 65506 i
*> 20c0:a838::/64 :: 0 64600 65507 i
*> 20c0:a838:0:80::/64
:: 0 64600 65507 i
*> 20c0:a840::/64 :: 0 64600 65508 i
*> 20c0:a840:0:80::/64
:: 0 64600 65508 i
*> 20c0:a848::/64 :: 0 64600 65509 i
*> 20c0:a848:0:80::/64
:: 0 64600 65509 i
*> 20c0:a850::/64 :: 0 64600 65510 i
*> 20c0:a850:0:80::/64
:: 0 64600 65510 i
*> 20c0:a858::/64 :: 0 64600 65511 i
*> 20c0:a858:0:80::/64
:: 0 64600 65511 i
*> 20c0:a860::/64 :: 0 64600 65512 i
*> 20c0:a860:0:80::/64
:: 0 64600 65512 i
*> 20c0:a868::/64 :: 0 64600 65513 i
*> 20c0:a868:0:80::/64
:: 0 64600 65513 i
"""
bgp_v6_neighbor_output_recv_routes = \
"""
BGP table version is 6407, local router ID is 10.1.0.32, vrf id 0
Default local pref 100, local AS 65100
Status codes: s | |
<filename>feedbag/day2/2.passwords.py
#original sample data
#policy = [
#"1-3 a: abcde",
#"1-3 b: cdefg",
#"2-9 c: ccccccccc"
#]
# Given puzzle data
policy = [
"3-5 f: fgfff",
"6-20 n: qlzsnnnndwnlhwnxhvjn",
"6-7 j: jjjjjwrj",
"8-10 g: gggggggggg",
"5-6 t: ttttttft",
"6-11 h: khmchszhmzm",
"4-6 q: qqbjqqqj",
"6-8 j: gtkwwjjj",
"13-14 w: lwgwrwczwwhkww",
"2-4 p: vcnptzdppvpdzp",
"4-6 n: pmlnzwrnbnxnpmwmhbnz",
"4-9 w: wwprdwwwx",
"13-20 b: rbbcbbbbbbbbsbbbbbmj",
"1-3 r: zwhrnvr",
"4-6 f: fmfgfcf",
"13-15 z: zxzzzzzzzzvxkplvzzz",
"12-18 r: phjqccgmgzmdwxgtrtb",
"5-19 s: ssssssssssssssssssss",
"6-7 d: pddqmdd",
"7-9 p: pwrlpvlhpfpppzp",
"4-5 m: mpmmmm",
"9-10 s: ssxsssssrj",
"6-9 p: lpjrvpppp",
"6-13 r: rrrzvtrgrhqxqrvrvwzr",
"1-3 p: dpjp",
"6-7 w: wkjnmww",
"1-5 r: cfqhhr",
"5-9 m: wmmlpgmqmmrm",
"12-13 x: sxgnwxxkjwrxr",
"6-8 n: lnnrnnnnn",
"10-11 q: kqqqzqqfqqqqq",
"12-17 k: kkkkkkkkkkpkkhgvnjhk",
"12-13 v: vvvvvvvvvvvvvv",
"12-15 h: hhhhhhhjkhhhhhth",
"6-7 l: hmqtlllnllnlmtqnn",
"6-9 m: mcmmmdnmh",
"3-5 k: kkhtn",
"2-4 n: tnfjmnnn",
"5-6 j: cjwlmn",
"4-11 b: bcbbkrlgcbbc",
"5-10 r: rcrcgrrrzwrrxgpzwrcm",
"3-4 n: znnn",
"11-14 s: sssjsssssssssss",
"6-7 d: rwdddddmdddddkdd",
"1-4 s: gssssskssss",
"4-5 j: jjjjzj",
"11-12 j: cjjjgpjxnjjk",
"3-6 z: zzkdzzz",
"7-9 d: mkddddqdhddgdd",
"1-9 d: rxwqcdrswdg",
"2-4 m: gmmmn",
"2-4 k: kkkmk",
"15-16 r: rrrrrrrrlrrrrrrr",
"1-11 g: grgggcgggcggp",
"7-8 q: qqqjqqgq",
"13-14 c: cxccccgccvcpccccc",
"2-5 j: txvwlls",
"3-13 w: plwqwhbwdgxcwfmwjl",
"9-11 g: ggggggggggggggggdgg",
"3-12 g: ggsggggggpgm",
"3-11 g: bcgqgxmbjpwm",
"8-9 z: zxvzrzzzzzdvzzgz",
"17-18 z: zzzxzzzzzzzzzzzzsw",
"9-10 t: ftrmtttktttttdtmdk",
"15-17 r: rjrrprrrrrrrrrrfrrrr",
"7-8 k: kkkkkkkkk",
"6-13 n: nnnvnfgntnnnjnnxh",
"17-19 l: llllflllllllllllqlrl",
"11-13 d: hdlddddddhxddddkd",
"13-17 h: hhhhhhhhhhhhnhhhjhhh",
"1-12 c: ncccczwcnctcwcc",
"4-5 w: wgwkl",
"12-13 w: wwwwwwwwwwwwfwww",
"3-12 s: scssgwshsspsss",
"15-17 w: wwwwwcwxmhwwwwwwwww",
"1-3 x: xxxxw",
"8-9 p: mpppppppzptp",
"11-12 n: nntnbnnnnnmx",
"2-10 x: xrspbmkcthqsdxrdxwx",
"13-14 l: lllllllpllllll",
"5-10 x: xxxxjxjdrx",
"7-8 k: kkkkkkjk",
"9-11 k: vkkkkkkvkkzddbzr",
"3-8 r: lbrrcwbdf",
"6-14 l: lgwllrgllllllgllll",
"6-9 q: nqqkhqqtqgqc",
"2-4 g: ggxggggggg",
"12-13 k: kkkkkjkkkkkvf",
"5-9 b: scvbbpzbbbzbfb",
"16-17 d: mdddddddddddddcttd",
"4-10 l: lllflllllllllllllll",
"3-6 q: qqqqqtqqqq",
"1-3 n: pblsghl",
"9-11 v: bvvvmvfvvgtvfpv",
"4-5 s: ssshq",
"5-8 g: ggwgnggp",
"1-4 f: ffmr",
"4-5 g: ggwzs",
"4-5 h: fhhqs",
"10-17 l: clhlllhslxpgljpvlrkl",
"7-9 m: pnswhtmvmsrmjwrbfz",
"6-16 v: vvvvqcvvvdzvjvldvv",
"5-6 c: cntcrl",
"12-13 n: nnnnnnnnnnnnln",
"2-4 w: wwww",
"2-13 w: swrqssmmwrxtw",
"15-16 z: zzzzzzzzzzzzzzgqzzz",
"6-8 c: cctvkbdcwcbvhc",
"1-2 w: wwzdwjtm",
"1-3 z: mzzh",
"6-13 t: fttttdwtttttkht",
"3-9 g: cggfjgqngwmj",
"1-5 k: pkkkkkkrk",
"1-5 q: qknqd",
"7-11 t: tttttckttkb",
"6-7 q: qqqqqbl",
"16-17 c: ccccccccccccccctc",
"2-5 v: vfcnvlvvvxvrnvvvvvv",
"1-5 w: nwtwnnwwhtwwdwwww",
"2-10 v: ftvxxbjzlqctp",
"5-7 d: nddddgddjdbk",
"8-11 s: wxqjwkcsflssm",
"17-20 v: vvvvvvvvpvgvvvvvvvvv",
"6-8 w: wwlwwwqfw",
"13-14 r: rrrrgrrrrrrrrr",
"5-6 t: ttttzq",
"13-14 c: cccccccccccccc",
"11-12 k: kwzkkkbdkjkk",
"10-11 t: ttttpttttrwwtttt",
"3-4 w: ljwwww",
"7-8 q: ggqzdvqqw",
"1-3 z: zzsg",
"1-2 c: crjvdhgwckszmzpcjmr",
"10-11 n: nngnntnnnqn",
"7-16 w: bwdgwwwgwwnnqtcwc",
"4-16 v: vfkvqvvsvnjhfvvv",
"10-11 p: fgbhpzqvkmn",
"10-11 w: wswgwqwszwwhwnwww",
"15-16 t: stwtfxttthptttttt",
"1-3 v: vlvsdmrv",
"5-8 z: zzzzfzzz",
"14-15 t: ttpvtrlqtlcdrlv",
"2-6 b: qwfnbkc",
"6-9 p: jppjpspplm",
"3-4 s: brgqssz",
"2-6 q: cqwqlq",
"13-14 d: rrdljqdddddcxzdc",
"8-13 s: tsnssssssssrcssrssq",
"7-8 d: dwddddgl",
"9-15 b: gspmhlgbbzbbbrbt",
"3-4 h: khhh",
"3-4 c: nkccpmcct",
"6-7 g: ggggggg",
"17-18 p: ppppppppppppppppjnp",
"2-6 f: rfjxtff",
"3-4 g: ggvg",
"7-9 k: kkkkkkkkkkk",
"11-16 f: fjdffrtfffjfdffcln",
"4-8 w: lhpdwbnfssswwfswwwrw",
"5-9 l: lvlllflll",
"5-6 t: wtrtlr",
"4-8 m: mnmmjgqtgs",
"5-11 n: nnrngbnntnk",
"5-8 s: ssssfssss",
"7-14 m: hrnbhsfcvdmxbmvzfvnq",
"1-5 x: xxxxxxxxwxxdg",
"1-10 v: vcjvvjvvvv",
"5-10 l: vvdmhskmprszklvvl",
"3-17 d: dsddkddddddqddddfjdd",
"9-13 p: pppppppptpppqp",
"12-13 h: hhhhhhhhhhjhh",
"1-4 z: mrvlzlzhzlp",
"3-4 h: hhmc",
"7-17 w: wwwwwwwwwwwwwwwwww",
"6-8 z: cgzxzznzzdhcvwh",
"4-5 g: mgggg",
"7-8 t: ttrthtdkjtgtts",
"8-9 w: wwwwwwwpw",
"9-15 k: kkkkkkkkkkkkkkk",
"8-14 q: qqqqqqqqnqqqqq",
"14-19 w: jkwbwwlwdcmwmmwwrwr",
"4-5 m: hsxgkdmz",
"2-4 v: vsvqvcv",
"4-9 h: grhnhhmhhxhlh",
"6-14 t: tmttrjbtjtfttc",
"1-4 d: pjdbrb",
"15-16 r: rrrrrblrrrrrrrxwr",
"16-18 p: pppjpppppptpplpppppp",
"4-7 x: mxtngxnpsx",
"9-10 b: bbbdbqvmrgbzbbbb",
"4-5 w: wwwwj",
"1-2 d: dgws",
"3-10 l: lglsphlbgfmgdtw",
"10-11 h: hhhhhfhhhpdhh",
"5-9 x: xxxxqgxxjf",
"11-19 m: bfmxmmfgzzhqwsmgwmlr",
"10-14 x: xxxrxkxxqrlxmpxbdx",
"3-8 l: lxlxvwlllznlptfv",
"5-9 h: hhthhhhshdq",
"1-5 q: sqqqqqqq",
"9-14 j: jxfcjpjfjjgjjjjjjjhj",
"1-7 g: gvdgjgsvvgnd",
"2-5 t: tmttpttttt",
"17-19 c: ccccclcccccccccccccc",
"10-13 d: mdvddtsddrxtx",
"11-12 g: gggggggwggzgp",
"3-5 k: hzxnpkq",
"9-11 q: qqqqqsqqqqq",
"8-9 x: btrkbxngx",
"3-5 g: jpkchcxcxhzfhsggqkg",
"7-13 s: ssssssssscssssn",
"13-15 p: pppppppnppppppp",
"5-15 f: ffffjfffffffffgf",
"2-4 p: zqphp",
"1-4 r: wrrr",
"3-9 b: jsjcbrkkczkzmjbg",
"1-2 k: kkkkl",
"2-3 n: dlnxjwzstsdxns",
"7-14 v: fvvvvzhhvpsvvckdvv",
"2-8 r: rrrrwrhrrrr",
"5-8 n: nnxdnnnnnn",
"5-12 c: scmcdscccccmcc",
"9-10 v: vvvvvvvvjm",
"14-15 w: lwppdwwwwdmrrww",
"6-7 c: qcgcvxdrcccpxchrnlq",
"8-10 x: hbrqsksxwxxttz",
"18-19 w: jnftgwpwmwfdgrcpkww",
"3-4 k: ffkj",
"5-9 f: flffsvffsff",
"3-4 k: kkkkk",
"1-3 l: dlglkll",
"10-16 f: bfffjzfffnfffffxhfff",
"11-20 d: ddbdqsddddddddddfddd",
"6-20 l: vmjltmkclbmqrflzgdzl",
"5-19 f: ffffffffffffffffffff",
"5-10 n: nnnnmnnfnz",
"4-13 f: kmrfrfkvjxdbftvhnsdm",
"10-11 k: kkkkkkkkskk",
"12-13 k: kkkkkkkkkkkhj",
"7-8 p: ppppppnhw",
"5-8 l: gtpllwklh",
"13-14 h: hhhhhhhhzhhwsrh",
"7-15 h: hhhhhhthhhhhthqgfh",
"1-3 f: mfdmhv",
"15-16 f: fffffflfffffffbqf",
"12-13 c: cccmcccccccxn",
"2-12 q: nqltxqfdcrxqvt",
"8-10 d: dddddddddvdddd",
"2-9 g: mdrwnvtsd",
"12-13 l: lllllllllllll",
"2-3 m: mnrgmdm",
"6-9 v: hvvvvvvvv",
"4-7 q: bqqqrqcqqqq",
"8-11 v: vvvvdvvvvvr",
"3-4 n: cnnw",
"2-12 r: rrrklfrrrksrr",
"2-12 p: pxppklhsppwdxwcpzvm",
"10-11 n: nnnnnnnnnnn",
"5-6 w: wfwwww",
"3-5 p: pppppp",
"3-8 g: ggggggggggggggg",
"1-4 x: xxxxx",
"11-16 d: ngcdkglddtppbddgdrd",
"11-12 k: kkkkkkkkkkhr",
"12-13 c: hgxxchcvxpdlsrt",
"1-3 m: vmmm",
"4-6 z: kvzgzzzp",
"11-12 d: dzddzlcbvdxk",
"10-11 g: ggsgggggggggdz",
"6-8 q: xfhgpqltbfbdzqg",
"4-7 v: vvvlvcxmvvxq",
"5-7 v: vkkgvgvnpvvlpgkv",
"6-7 b: bbkvnwbqblbvbb",
"2-4 l: wmll",
"6-9 j: djqjcxxljm",
"3-4 j: nmzcsnlnjjjdms",
"3-8 w: wnwwsvww",
"9-20 m: wbntxzztwmblxmsmltmg",
"7-8 r: rprrrtrf",
"1-3 r: lnrfxfswmhgvf",
"5-12 k: zlkdlktsrqjt",
"13-14 v: vvvvvvvvvvvvvbv",
"11-12 d: ddddddddddgc",
"9-11 c: mdwpjjcdcrc",
"5-6 m: mmmmnl",
"5-6 l: lblldn",
"13-14 c: sccfcxsnsclccc",
"4-5 q: qqkkq",
"5-8 m: mmmmmmmm",
"5-8 z: tzhzzttzvwfcv",
"12-15 b: bbbbbbbbbbbtdfb",
"7-10 b: bbnplbbxxbh",
"1-7 x: zxxxxxvx",
"6-17 b: kbnbbbbbptbbrbbbb",
"1-3 l: llllll",
"13-15 c: ccccccccccccqcz",
"1-8 s: sssssssb",
"12-19 x: xxxxxxxxpxxcxxxxxxq",
"12-13 x: xxxxxxxxxxxxxx",
"14-18 w: wwwwwwwwwwwwwlwmbk",
"7-8 q: mzwqblqqz",
"1-7 p: hrppppgdzp",
"2-7 n: qmvpmsjncgkgpbb",
"8-10 x: xxxxxxxwxx",
"2-4 v: rfcpmpvsswsrjkxpdrxh",
"14-16 k: kkkkkkkkkkkhkkskkt",
"14-15 d: ddddddkdddddrqdd",
"4-16 m: njmqmmmjfmmxhmwgdbc",
"12-13 h: hdmdfhhhzhkdhr",
"3-14 n: nbntrsmnmrcwbf",
"6-8 h: hhwhhlhnhh",
"18-19 v: vvvvvvvvvvvvjvvvvvp",
"11-20 l: ldllllbvllmqllmlllfl",
"2-3 d: dcddd",
"4-6 n: bsnncn",
"3-6 f: dwlfqfzgs",
"8-12 d: xdddddfhnddfddd",
"5-13 l: llllqlllllmllll",
"10-14 m: mnmzrmmmmwmmqm",
"3-12 c: qjchcclnbccccpc",
"7-16 j: jjjjjjwjjjjjjrjnjj",
"1-10 v: svvdvvgmgpkfkvhvv",
"2-4 b: gbpbczblbbv",
"7-9 w: drwwbwtgwswwww",
"2-8 h: fhkprhplz",
"1-6 m: mmmmwm",
"3-4 g: gggp",
"1-3 q: qqqqj",
"10-11 h: htfrdwrnkzntnfpdmkhq",
"5-6 x: nxxxxxxxj",
"7-8 w: wwwcwwww",
"1-5 h: lhhhhh",
"7-8 f: ffjfftff",
"3-5 g: kdgfgjgbghggp",
"2-11 p: flgpvfbhpzxmmntw",
"12-13 z: zzzzzzhzzzzgzzqz",
"12-13 j: ttmhmdfjpdpnj",
"5-7 h: hhhhvhh",
"9-13 x: xhxbxxsxxqwxxvxxxx",
"1-3 c: ccchcc",
"2-5 h: hhrsh",
"1-3 q: rqqq",
"2-8 x: sxrxbcxx",
"12-15 b: bbbbnbbrbcbtbbb",
"14-18 m: hmmmmmmmkmmmmmmmmrmm",
"1-4 v: vvvv",
"5-11 h: brnzvqhrrwh",
"4-10 f: dbtfdvlfnffqmwp",
"3-16 r: krrrvmrwrrrtrrrrkrwr",
"11-15 c: gxbccccccccccbh",
"13-18 z: zzrzzzzzzzzzzzzzzzz",
"1-3 l: lkvxtrthfvmdzn",
"7-9 p: vpppmpppppp",
"3-4 w: wwwwwwww",
"8-11 z: vlzzzlzvzzmzzz",
"2-4 v: vqfv",
"10-11 s: sssssslssgss",
"4-5 z: zzzww",
"11-19 p: zpbpdjfjpdfgnpphhpg",
"1-8 f: fftlxmfl",
"9-10 p: phlpqzppxpxp",
"5-7 q: kbqjqqq",
"3-6 m: jcblmv",
"9-10 l: llllllllll",
"13-14 w: wbwhwwwwhwqwmswwmjwf",
"6-11 j: jjjjvtjwjbdjj",
"4-8 j: jljjjvgj",
"3-4 r: lrhvrzrr",
"2-4 p: plpfkphpq",
"2-6 b: bbbbrbvwbbgh",
"2-11 t: mmjmcwcdcbtztxfbtst",
"7-15 r: grsrrjrrtrgvrtrrr",
"6-17 d: dddddgddpddzddddddd",
"3-10 t: qpnbrnmmjtjxtdg",
"6-7 v: vvvvvhb",
"5-20 j: vxmkjnssjdtldsdwltlg",
"7-10 r: rrrrrrrrrrr",
"2-4 r: rrpl",
"3-4 m: mmzg",
"1-8 h: fshhjrhm",
"5-8 m: mmmvmmvmmnmdxnjcjpn",
"8-10 m: mmmmmmmlmzmm",
"14-15 q: qqqqqqqqqqqqqqb",
"6-16 q: qhkdtqnzqqqjgjrqrkbq",
"3-9 b: gtbbfgfhbs",
"14-16 h: hhhhdhhhmhhhhmhhh",
"9-10 n: nnnndnnnvjnl",
"11-12 p: pqpppppppppppp",
"7-9 d: ddpwfdtdd",
"6-10 z: qzzzzjzzmz",
"9-14 q: qqdjqdqqrqqwqnqqp",
"6-7 k: kkkkkbkk",
"7-14 j: hjvjmzkhgltffljjvjj",
"4-5 z: znhzz",
"11-16 r: rrrrbxdrrqrrsrrb",
"5-8 v: vvhvvvvvwvlvhlqg",
"4-10 n: wvnnjnndcnk",
"5-6 g: ggkggk",
"8-9 w: wwwwwwwww",
"5-9 r: mrwrrrrrx",
"4-6 r: jcfrjrrrg",
"6-9 w: wkwwsxwgfcwqb",
"5-6 x: grxjxnrb",
"2-7 s: nsmldnsszdk",
"5-9 j: gjjzkjvjwjcjbj",
"6-11 r: jtrgcvwccrrrfmj",
"7-12 j: jfjjmjjjjxjqzdjjxq",
"1-2 n: nnwqcnr",
"5-16 l: lllpclllllllblllll",
"5-6 j: wjgdjcdpjxfjxnjj",
"3-16 s: klstwqksfclmcbbcn",
"17-18 t: ttttdtttttthtttttttt",
"9-13 p: ppppppvpvhhqp",
"1-2 n: tsnnn",
"3-4 p: jpqpjmpplpw",
"4-6 t: dtfthsxtfpd",
"2-6 m: zhszctkm",
"1-13 p: dppmkpbprgppxcpvhrpz",
"2-7 m: gvmmflp",
"9-16 c: rcdsppjbcwdxnfbc",
"5-15 p: pppxpppqnhpptpppz",
"10-13 q: xfhqhkqkjqjwqqcfrgl",
"14-15 t: tttttttttttttdvtttt",
"5-8 q: qqqbqvqzqqn",
"1-7 v: cvgvvbcgjv",
"6-9 r: rrrrrrrdr",
"2-11 j: wjjgxngswkj",
"11-13 d: dddddddddddddm",
"5-8 p: pzppppppkjp",
"15-16 c: cccccccccccccctc",
"1-7 c: vsvkccbc",
"2-5 h: hhhhchhhhhhhhhh",
"9-11 n: nnnnnnnnnnnnn",
"11-12 q: mdmfmkxcxzjq",
"6-11 m: mmkgnjdhfbc",
"1-4 d: dgrdwbdfdkdrmthsj",
"12-14 k: kkkkkkkkkrjkkkkk",
"2-4 d: hdrvdzd",
"2-3 s: sxmsssssssssssssssss",
"13-15 z: zzzzzzlzzlzztzzz",
"3-8 r: lxrrrnvrtrgnmkrr",
"6-7 w: wwlbrwwww",
"4-7 r: zlnmmkpnrkkcrrxrmfq",
"2-6 v: zgvvjjvvjhjv",
"8-11 l: llllllllllcl",
"8-14 m: mlmldmmgnqzmmmm",
"7-10 z: wzzzzhtzdqzznzz",
"4-7 k: mvgwkkk",
"5-9 p: pdpltvdpptpppg",
"4-15 q: qqqfqbzqlqqqqqpqfqs",
"5-7 q: qqqcxqqm",
"14-17 x: rltjxxbxxxllsxwcx",
"1-13 w: gwggwwwwwwbsbwkwwd",
"8-14 k: kkbkkkkhkkkxjzblgk",
"5-8 p: cppnpprp",
"6-7 k: jrnvhkkgkkb",
"12-14 h: hhhhhhhhhhhhhn",
"10-20 j: gjwjjjpjjljjjjjsjjjj",
"16-17 n: nnnnnnnnnnnnknnnnn",
"5-6 f: cdpppf",
"12-13 z: bvpzqzzlwzccfszt",
"1-9 k: wdkkvtrprltkkltz",
"9-16 x: xxxjxdxhtzjxgxsbx",
"17-18 j: jjjjjjjjjjjjjjjjlrxj",
"2-5 x: xkknmxxxn",
"3-9 f: tfffhfffffff",
"11-16 r: rrlgrrqmrrprcrrrvjr",
"9-12 m: nrlmgmxvgkmpmqvntx",
"4-7 r: hrrrzrrnm",
"3-15 g: stgjgbgggvdljcgdgfg",
"6-9 j: jjjzbpjjjjjj",
"2-3 p: ppjp",
"9-10 t: tttttttttt",
"10-12 w: wwwfwvwvnkwdwww",
"11-14 d: nvljddddftrndzx",
"6-7 r: rlrrrbqcrqr",
"2-4 c: xbdl",
"11-18 c: ccccccccccvcccccccc",
"5-6 j: gjjjjjj",
"3-7 w: xwhnqlhnpfrvlkqqrp",
"6-19 q: qqqfxdglhqqkqqqjnhdq",
"8-9 d: ddsdddqsv",
"14-16 g: gggggggggggggggt",
"3-5 b: bbbnh",
"11-14 t: mtttwttfttttltq",
"8-10 x: xxxcxxxfxq",
"8-11 l: lfgvnlzrfcllnxss",
"1-3 t: gtttttttwgtptt",
"1-20 j: jjgjlqjjcjcjjjsjgjlj",
"8-14 v: dqgmngvvqnswvvrrvc",
"2-8 m: gmpkmmsmmmmm",
"5-6 m: mmmmtm",
"6-7 z: zfzrxzxz",
"4-15 k: wmwkhhskgwzqpnk",
"2-6 d: hddddfzdsdtvg",
"9-10 g: gggsggggggrgggqg",
"11-12 m: mmmmmmmmmmmm",
"5-7 w: wwhqqhwwwwd",
"4-9 k: gtkrkkkdfk",
"1-4 g: kgtvg",
"7-11 r: rhdbzjrwrcr",
"9-18 f: fjffffffdffkfffffvf",
"3-5 t: ttttt",
"13-14 q: qqqqqqqqqqqqcqqq",
"1-8 n: vnnnnwnqlbln",
"3-5 s: ssmss",
"2-4 q: qqqbkvqxh",
"3-4 n: nshscq",
"10-18 s: sssssssssnsssssssgs",
"9-10 v: vvvvvglvzddvvjv",
"5-16 s: sspssssrssssslss",
"4-7 t: ttptxtt",
"2-4 r: rrrrrxdrr",
"8-9 b: xqvbbbbbh",
"3-4 b: bbbbc",
"1-13 v: qhvvvvlvvvvvdv",
"4-8 l: lhkllblwllll",
"3-4 q: qhjgq",
"8-9 p: rpbpppshdpp",
"13-18 k: kkkkkkkkkkkkkkkkkx",
"8-9 r: rrrrrrrrr",
"5-10 f: bjfwwqvjwffpnl",
"1-3 l: pbhl",
"4-6 g: mggfgv",
"1-4 t: tttttt",
"5-10 t: llqrtccxtttntxjcdczk",
"13-14 q: qqqqmqqqdqqfqgqqqq",
"6-10 w: wpwmwwrgwwk",
"13-16 q: qqshgkqkqvscqtqq",
"4-5 t: qttdt",
"2-12 h: fpbbxgpskzjpkvtr",
"10-12 z: zrszvzzzzrzkznpzw",
"1-4 z: tgzz",
"3-6 c: cdwccvgm",
"1-15 q: jqqxkqrqmtfqpvbpq",
"4-6 v: vjvlvvv",
"1-7 l: hjlllwj",
"6-13 s: lrspslfwsgjks",
"3-4 p: dpvk",
"4-8 h: hsjjhhff",
"1-18 k: pkkkkkkkkkkkkkkkkkk",
"6-8 n: nvcnxqnnrnnn",
"7-8 p: ppppppvzp",
"12-17 h: hhhhhzhhhhhkhhhhph",
"13-17 p: ppmpppnppppplpbvcppp",
"8-14 m: tmmcjvmmjgcfmmnj",
"1-3 t: tttth",
"3-4 x: zxxx",
"1-3 d: kpwhxpctcgdbdkb",
"5-6 z: knnlzzzzxjrghzb",
"9-10 w: wwmwwwwxgww",
"9-11 b: bqbbfbbwbbqbb",
"2-3 d: ddxp",
"10-12 g: gggvgsgtgggggbg",
"3-4 b: gbbb",
"8-20 c: jccqcxjcqfncfcbccgcc",
"4-9 z: zzzztzzzzzb",
"2-12 x: gmfmzzrxsqbx",
"10-12 x: xxxzbxxjxxxkxb",
"3-4 v: szpvbvlvr",
"2-5 r: grqqr",
"4-8 q: mqqnvqvqqq",
"6-10 v: vdbvnvvxvvvnvvvvv",
"4-10 d: dddtdddddjd",
"3-4 l: klmc",
"10-13 k: kkbkkkvkhkvkkkkkl",
"1-3 v: vvkz",
"13-14 h: pkgffgcszgsghbcdtpm",
"11-16 c: dgccccccctchxbfm",
"8-9 b: bbbbbbbbbtb",
"1-9 h: hrnhhlphhh",
"17-18 s: ssssssssssssssssst",
"5-10 h: hhhhhzhhhhhx",
"2-7 k: dlrncbkkwp",
"3-5 k: kfqkk",
"4-8 d: ddddddddd",
"10-15 m: mmmmrmmmlpmmmrgmkmx",
"2-4 g: gsgggkm",
"7-9 l: llsllllvrzlxlrgglk",
"2-9 f: fzfbvfkff",
"13-14 p: pxppppppppppvf",
"15-17 z: nbkxkjtszptcndzdl",
"3-6 z: zzxzzhz",
"12-13 t: tttttttttttttt",
"5-10 s: pssbfsbsssnssvsb",
"9-18 c: ccxthchscjjdccvcncl",
"3-5 f: fndfqffffff",
"2-4 m: clwvcdjmz",
"7-8 b: bbbpbjbb",
"2-3 r: rbxx",
"3-8 j: jjjjjjjjj",
"5-10 v: zrwnvbrvmm",
"4-6 h: hhghhhhh",
"6-8 f: fnffzfwfgffpfc",
"5-6 q: qvqqqqkqqq",
"3-5 v: vgwvrgqc",
"9-10 p: pppphbbpphppb",
"10-12 l: rdjrcxkgflll",
"9-14 h: qwwxnhfhnfhhbhhjr",
"9-10 d: ddddpddddd",
"4-13 w: wwwwwwwwwwwwwwww",
"8-16 x: xrxxxxxxxdlxxxxxxx",
"6-10 p: pplppkpzwb",
"5-7 j: wpjjjgjjvjqjrgj",
"4-13 w: cwwswwwwwjwwdvw",
"5-6 j: jjjsjjj",
"4-5 c: ccfccqdqctllbm",
"7-13 c: bcccccccccccccc",
"4-5 p: pgptw",
"1-4 p: qpwmp",
"3-5 f: fftfff",
"2-6 m: mmhbpmjlx",
"9-13 r: zrrczrlrzqxrszrrlrr",
"2-9 w: xrszwwwdkww",
"6-14 g: qlwswsgjdlgmzvwg",
"6-8 z: zzzzzzfz",
"6-7 w: xwzsjjr",
"2-13 f: fxmfdnfffkpggz",
"1-7 d: rdnlqdplfddjdd",
"11-14 j: kjjvjjjxpjjjjjzjjjz",
"12-14 p: gpppwvmqpfpptpfppkpc",
"6-11 k: kkkkkhkknkkkkkgk",
"2-6 m: swpgmf",
"1-3 x: sxzqnxxv",
"12-18 n: nnnnlnnnnnnnnnnjnxnn",
"3-4 p: pppq",
"8-9 v: vvvvvvvpv",
"6-9 l: ltsldwtlhll",
"11-14 x: xxxxlxxhdtxxxxxsxx",
"4-5 q: qsqvqvq",
"5-6 b: sbbbbt",
"10-13 l: lllllslllrllll",
"1-5 | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Vector Autoregression (VAR) processes
References
----------
Lütkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import division, print_function
from statsmodels.compat.python import (range, lrange, string_types,
StringIO, iteritems)
from collections import defaultdict
import numpy as np
import scipy.stats as stats
import scipy.linalg
from statsmodels.iolib.table import SimpleTable
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import OutputWarning
from statsmodels.tools.tools import chain_dot
from statsmodels.tools.linalg import logdet_symm
from statsmodels.tsa.tsatools import vec, unvec, duplication_matrix
from statsmodels.tsa.vector_ar.hypothesis_test_results import \
CausalityTestResults, NormalityTestResults, WhitenessTestResults
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.output import VARSummary
import statsmodels.tsa.tsatools as tsa
from statsmodels.tsa.vector_ar import output, plotting, util
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
# -------------------------------------------------------------------------------
# VAR process routines
def ma_rep(coefs, maxn=10):
r"""
MA(\infty) representation of VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
maxn : int
Number of MA matrices to compute
Notes
-----
VAR(p) process as
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
can be equivalently represented as
.. math:: y_t = \mu + \sum_{i=0}^\infty \Phi_i u_{t-i}
e.g. can recursively compute the \Phi_i matrices with \Phi_0 = I_k
Returns
-------
phis : ndarray (maxn + 1 x k x k)
"""
p, k, k = coefs.shape
phis = np.zeros((maxn+1, k, k))
phis[0] = np.eye(k)
# recursively compute Phi matrices
for i in range(1, maxn + 1):
for j in range(1, i+1):
if j > p:
break
phis[i] += np.dot(phis[i-j], coefs[j-1])
return phis
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print('Eigenvalues of VAR(1) rep')
for val in np.abs(eigs):
print(val)
return (np.abs(eigs) <= 1).all()
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lütkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in range(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in range(p):
result[h] += np.dot(coefs[j], result[h-j-1])
return result
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lütkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert(k == k2)
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k*p, k*p))
SigU[:k, :k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = scipy.linalg.solve(np.eye((k*p)**2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = acf[:k].T.reshape((p, k, k))
return acf
def forecast_cov(ma_coefs, sigma_u, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
neqs = len(sigma_u)
forc_covs = np.zeros((steps, neqs, neqs))
prior = np.zeros((neqs, neqs))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, sigma_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
mse = forecast_cov
def forecast(y, coefs, trend_coefs, steps, exog=None):
"""
Produce linear minimum MSE forecast
Parameters
----------
y : ndarray (k_ar x neqs)
coefs : ndarray (k_ar x neqs x neqs)
trend_coefs : ndarray (1 x neqs) or (neqs)
steps : int
exog : ndarray (trend_coefs.shape[1] x neqs)
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lütkepohl p. 37
Also used by DynamicVAR class
"""
p = len(coefs)
k = len(coefs[0])
# initial value
forcs = np.zeros((steps, k))
if exog is not None and trend_coefs is not None:
forcs += np.dot(exog, trend_coefs)
# to make existing code (with trend_coefs=intercept and without exog) work:
elif exog is None and trend_coefs is not None:
forcs += trend_coefs
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in range(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in range(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs
def _forecast_vars(steps, ma_coefs, sig_u):
"""_forecast_vars function used by VECMResults. Note that the definition
of the local variable covs is the same as in VARProcess and as such it
differs from the one in VARResults!
Parameters
----------
steps
ma_coefs
sig_u
Returns
-------
"""
covs = mse(ma_coefs, sig_u, steps)
# Take diagonal for each cov
neqs = len(sig_u)
inds = np.arange(neqs)
return covs[:, inds, inds]
def forecast_interval(y, coefs, trend_coefs, sig_u, steps=5, alpha=0.05,
exog=1):
assert(0 < alpha < 1)
q = util.norm_signif_level(alpha)
point_forecast = forecast(y, coefs, trend_coefs, steps, exog)
ma_coefs = ma_rep(coefs, steps)
sigma = np.sqrt(_forecast_vars(steps, ma_coefs, sig_u))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
def var_loglike(resid, omega, nobs):
r"""
Returns the value of the VAR(p) log-likelihood.
Parameters
----------
resid : ndarray (T x K)
omega : ndarray
Sigma hat matrix. Each element i,j is the average product of the
OLS residual for variable i and the OLS residual for variable j or
np.dot(resid.T,resid)/nobs. There should be no correction for the
degrees of freedom.
nobs : int
Returns
-------
llf : float
The value of the loglikelihood function for a VAR(p) model
Notes
-----
The loglikelihood function for the VAR(p) is
.. math::
-\left(\frac{T}{2}\right)
\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
"""
logdet = logdet_symm(np.asarray(omega))
neqs = len(omega)
part1 = - (nobs * neqs / 2) * np.log(2 * np.pi)
part2 = - (nobs / 2) * (logdet + neqs)
return part1 + part2
def _reordered(self, order):
# Create new arrays to hold rearranged results from .fit()
endog = self.endog
endog_lagged = self.endog_lagged
params = self.params
sigma_u = self.sigma_u
names = self.names
k_ar = self.k_ar
endog_new = np.zeros([np.size(endog, 0), np.size(endog, 1)])
endog_lagged_new = np.zeros([np.size(endog_lagged, 0), np.size(endog_lagged, 1)])
params_new_inc, params_new = [np.zeros([np.size(params, 0), np.size(params, 1)])
for i in range(2)]
sigma_u_new_inc, sigma_u_new = [np.zeros([np.size(sigma_u, 0), np.size(sigma_u, 1)])
for i in range(2)]
num_end = len(self.params[0])
names_new = []
# Rearrange elements and fill in new arrays
k = self.k_trend
for i, c in enumerate(order):
endog_new[:, i] = self.endog[:, c]
if k > 0:
params_new_inc[0, i] = params[0, i]
endog_lagged_new[:, 0] = endog_lagged[:, 0]
for j in range(k_ar):
params_new_inc[i+j*num_end+k, :] = self.params[c+j*num_end+k, :]
endog_lagged_new[:, i+j*num_end+k] = endog_lagged[:, c+j*num_end+k]
sigma_u_new_inc[i, :] = sigma_u[c, :]
names_new.append(names[c])
for i, c in enumerate(order):
params_new[:, i] = params_new_inc[:, c]
sigma_u_new[:, i] = sigma_u_new_inc[:, c]
return VARResults(endog=endog_new, endog_lagged=endog_lagged_new,
params=params_new, sigma_u=sigma_u_new,
lag_order=self.k_ar, model=self.model,
trend='c', names=names_new, dates=self.dates)
def orth_ma_rep(results, maxn=10, P=None):
r"""Compute Orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
results : VARResults or VECMResults
maxn : int
Number of coefficient matrices to compute
P : ndarray (neqs x neqs), optional
Matrix such that Sigma_u = PP', defaults to the Cholesky decomposition.
Returns
-------
coefs : ndarray (maxn x neqs x neqs)
"""
if P is None:
P = results._chol_sigma_u
ma_mats = results.ma_rep(maxn=maxn)
return np.array([np.dot(coefs, P) for coefs in ma_mats])
def test_normality(results, signif=0.05):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
results : VARResults or statsmodels.tsa.vecm.vecm.VECMResults
signif : float
The test's significance level.
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
Returns
-------
result : NormalityTestResults
References
----------
.. | |
fraction
"""
fnames = []
for frac_d in xrange(self.num_dates_fracs):
frac_id = (frac_num, frac_d)
fnames.append(self.frac_fname(frac_id))
return fnames
def load_frac_by_num(self, frac_num, t_from=None, t_to=None,
hdfs_client=None):
"""
Load a fraction given its frac_num and a date range
"""
if t_from is None:
t_from = 0
if t_to is None:
t_to = self.shape[2]
ndates = t_to - t_from
data = np.zeros([self.frac_height, self.frac_width, ndates],
dtype=self.dtype)
# Fill with nodata if we have
if self.nodataval is not None:
data[:] = self.nodataval
d_from = t_from // self.frac_ndates
d_to = t_to // self.frac_ndates + 1
for d in range(d_from, d_to):
frac_t_range = self.frac_time_range(d)
# Compute the time slice we should take from the fraction
frac_t_from = max(t_from - frac_t_range[0], 0)
frac_t_to = min(t_to - frac_t_range[0],
frac_t_range[1] - frac_t_range[0])
assert frac_t_to >= 0
if frac_t_to - frac_t_from == 0:
continue
slice_t_from = frac_t_from + frac_t_range[0] - t_from
# This correctly handles the truncated time axis case
slice_t_to = frac_t_to + frac_t_range[0] - t_from
assert slice_t_from >= 0
assert slice_t_to >= 0
# sanity check
assert slice_t_to - slice_t_from == frac_t_to - frac_t_from
frac_id = (frac_num, d)
frac_data = self.load_frac(
frac_id,
return_none=True,
slice=((0, self.frac_height),
(0, self.frac_width),
(frac_t_from, frac_t_to)),
hdfs_client=hdfs_client
)
if frac_data is not None:
data[:, :, slice_t_from:slice_t_to] = frac_data
return data
def load_frac(self, frac_id, slice=None, hdfs_client=None,
return_none=False):
"""
Load a single fraction.
This returns data or None if the fraction is empty
Args:
slice: A tuple of tuple ((ymin, ymax), (xmin, xmax), (tmin, tmax))
specifying the slice to load (in FRACTION coords). If None,
defaults to the whole fraction
return_none: Wether to return None if the fraction is empty
Otherwise, returns an empty array
Returns:
An array of the shape of slice
"""
assert len(frac_id) == 2, "frac_id should be (frac_num, frac_t_chunk)"
if slice is None:
slice = (
(0, self.frac_height),
(0, self.frac_width),
(0, self.frac_ndates)
)
data = read_frac(self.frac_fname(frac_id), hdfs_client)
if data is not None:
return data[slice[0][0]:slice[0][1],
slice[1][0]:slice[1][1],
slice[2][0]:slice[2][1]]
else:
if return_none:
return None
else:
height = slice[1] - slice[0]
width = slice[3] - slice[2]
ndates = slice[5] - slice[4]
data = np.zeros(
[height, width, ndates] + list(self.shape[3:]),
dtype=self.dtype
)
return data
def write_frac_by_num(self, frac_num, data, hdfs_client=None):
"""
Write all the dates for a single fraction
"""
assert data.shape[2] == self.shape[2],\
"You must provide a fraction with all dates to write_frac_by_num"
assert np.dtype(data.dtype) == self.dtype
# Write each date slice frac
for frac_d in xrange(self.num_dates_fracs):
t1, t2 = self.frac_time_range(frac_d)
frac_id = (frac_num, frac_d)
d_data = data[:, :, t1:t2]
self.write_frac(frac_id, d_data)
def write_frac(self, frac_id, data, hdfs_client=None):
"""
Write a single fraction
"""
assert len(frac_id) == 2, "frac_id should be (frac_num, frac_t_chunk)"
assert np.dtype(data.dtype) == self.dtype
# Protect against fraction bigger than frac_ndates
assert data.shape[2] <= self.frac_ndates, \
'Corrupted fraction %s, shape[2] is %d, header frac_ndates=%d' % (
str(frac_id), data.shape[2], self.frac_ndates)
write_frac(self.frac_fname(frac_id), data, hdfs_client)
def write_all(self, data):
"""
Given an array representing the whole grid, write it to disk
"""
assert np.dtype(data.dtype) == self.dtype
assert data.shape[:3] == (self.height, self.width, self.shape[2])
self.save()
for frac_x in xrange(self.num_x_fracs):
for frac_y in xrange(self.num_y_fracs):
for frac_d in xrange(self.num_dates_fracs):
frac_num = self.frac_num(frac_x, frac_y)
x1, x2, y1, y2 = self.frac_xyranges(frac_num)
t1, t2 = self.frac_time_range(frac_d)
frac_id = (frac_num, frac_d)
self.write_frac(frac_id, data[y1:y2, x1:x2, t1:t2])
def frac_for_xy(self, x, y):
"""
Returns the fraction number that will contains the point (x, y)
"""
assert 0 <= x < self.width
assert 0 <= y < self.height
frac_y = int(np.floor(y / self.frac_height))
frac_x = int(np.floor(x / self.frac_width))
frac_num = frac_y * self.num_x_fracs + frac_x
return frac_num
def fracs_for_rect_xy(self, xy_from, xy_to):
"""
Returns the list of fraction covering the given area
(start is included, not end - like numpy)
"""
# We subtract 1 so that if, for example, frac_width is 50 and
# x_to is 150, we do not get the third fraction (this x_to is excluded)
# In all the other cases, this doesn't change anything
frac_min_x = int(np.floor(xy_from[0] / self.frac_width))
frac_max_x = int(np.floor((xy_to[0] - 1) / self.frac_width))
frac_min_y = int(np.floor(xy_from[1] / self.frac_height))
frac_max_y = int(np.floor((xy_to[1] - 1) / self.frac_height))
fracs = []
# Need to add +1 here because we want to be inclusive on fractions
for frac_x in xrange(frac_min_x, frac_max_x + 1):
for frac_y in xrange(frac_min_y, frac_max_y + 1):
frac_num = frac_y * self.num_x_fracs + frac_x
fracs.append(frac_num)
return list(set(fracs))
def load_slice_xy(self, xy_from, xy_to, t_from=None, t_to=None,
progressbar=False):
"""
Load a subset of the grid corresponding to the given rectangle
(start is included, not end - like numpy)
Returns:
data : the data for the requested subrect
"""
if t_from is None:
t_from = 0
if t_to is None:
t_to = self.shape[2]
assert self.in_bounds_xy(xy_from)
assert self.in_bounds_xy(xy_to)
assert 0 <= t_from < self.shape[2]
assert 0 <= t_to <= self.shape[2] and t_to > t_from
fracs = self.fracs_for_rect_xy(xy_from, xy_to)
sys.stdout.flush()
slice_width = xy_to[0] - xy_from[0]
slice_height = xy_to[1] - xy_from[1]
slice_time = t_to - t_from
d_from = t_from // self.frac_ndates
d_to = t_to // self.frac_ndates + 1
data = np.zeros([slice_height, slice_width, slice_time],
dtype=self.dtype)
# Fill with nodata if we have
if self.nodataval is not None:
data[:] = self.nodataval
nfracs = len(fracs)
if progressbar:
bar = pyprind.ProgBar(nfracs)
for i, frac in enumerate(fracs):
# Frac start/end in grid coords
frac_start = (self.x_start(frac), self.y_start(frac))
frac_end = (self.x_end(frac), self.y_end(frac))
# Compute the slice of fraction we should take (in grid coords)
grid_fx1 = max(frac_start[0], xy_from[0])
grid_fx2 = min(frac_end[0], xy_to[0])
grid_fy1 = max(frac_start[1], xy_from[1])
grid_fy2 = min(frac_end[1], xy_to[1])
# Now, assign the slice of fraction to our grid slice
slice_fx1 = grid_fx1 - xy_from[0]
slice_fx2 = grid_fx2 - xy_from[0]
slice_fy1 = grid_fy1 - xy_from[1]
slice_fy2 = grid_fy2 - xy_from[1]
frac_fx1 = grid_fx1 - frac_start[0]
frac_fx2 = grid_fx2 - frac_start[0]
frac_fy1 = grid_fy1 - frac_start[1]
frac_fy2 = grid_fy2 - frac_start[1]
for d in range(d_from, d_to):
frac_t_range = self.frac_time_range(d)
# Compute the time slice we should take from the fraction
frac_t_from = max(t_from - frac_t_range[0],
0)
frac_t_to = min(t_to - frac_t_range[0],
frac_t_range[1] - frac_t_range[0])
assert frac_t_to >= 0
if frac_t_to - frac_t_from == 0:
continue
slice_t_from = frac_t_from + frac_t_range[0] - t_from
# This correctly handles the truncated time axis case
slice_t_to = frac_t_to + frac_t_range[0] - t_from
assert slice_t_from >= 0
assert slice_t_to >= 0
# sanity check
assert slice_t_to - slice_t_from == frac_t_to - frac_t_from
frac_id = (frac, d)
frac_data = self.load_frac(
frac_id,
return_none=True,
slice=((frac_fy1, frac_fy2),
(frac_fx1, frac_fx2),
(frac_t_from, frac_t_to))
)
if frac_data is not None:
data[slice_fy1:slice_fy2, slice_fx1:slice_fx2,
slice_t_from:slice_t_to] = frac_data
if progressbar:
bar.update()
return data
def load_slice_latlng(self, tl_latlng, br_latlng, t_from=None, t_to=None):
"""
Load a subset of the grid corresponding to the given rectangle
(start and end are inclusive) and a given timeslice
Returns:
data : the data for the requested subrect
xy_from : the position of data[0,0] in the grid
"""
assert tl_latlng[0] > br_latlng[0]
assert tl_latlng[1] < br_latlng[1]
xy_from = self.latlng2xy(tl_latlng)
xy_to = self.latlng2xy(br_latlng)
assert self.in_bounds_xy(xy_from)
assert self.in_bounds_xy(xy_to)
data = self.load_slice_xy(xy_from, xy_to, t_from, t_to)
return data, xy_from
def in_bounds_xy(self, xy):
return 0 <= xy[0] < self.width and 0 <= xy[1] < self.height
def list_available_fractions(self, hdfs_client=None):
"""
Returns the list of available (existing) fractions ids.
Returns:
a list of tuple (frac_num, time_chunk)
"""
data_dir = os.path.join(self.grid_root, 'jdata')
if not rasterio.fs_exists(data_dir, hdfs_client):
return []
else:
fractions = rasterio.fs_list(data_dir, hdfs_client)
# fractions is a list of fractions filenames (e.g. 14123.jdata)
fractions = [frac_id_from_fname(fname) for fname in fractions
if fname.endswith('jdata')]
return fractions
def list_available_fracnums(self, **kwargs):
"""
Returns a list of available frac nums
"""
fracs = self.list_available_fractions(**kwargs)
# extract the frac_num
return sorted(set(list([f[0] for f in fracs])))
def to_dict(self):
d = {
'width': self.width,
'height': self.height,
'fracWidth': self.frac_width,
'fracHeight': self.frac_height,
'fracNDates': self.frac_ndates,
'spatialRefWKT': self.spatialref.ExportToWkt(),
'dtype': self.dtype.str,
'geot': self.geot,
'shape': self.shape,
'meta': self.meta
}
return d
@staticmethod
def from_dict(grid_root, d):
return Header(
grid_root=grid_root,
width=d['width'],
height=d['height'],
frac_width=d['fracWidth'],
frac_height=d['fracHeight'],
frac_ndates=d['fracNDates'],
dtype=d['dtype'],
sr_wkt=d['spatialRefWKT'],
geot=d['geot'],
meta=d['meta'],
shape=d['shape'],
)
def save(self, hdfs_client=None):
fname = os.path.join(self.grid_root, 'header.jghdr3')
blob = json.dumps(self.to_dict())
rasterio.fs_write(fname, blob, hdfs_client)
@staticmethod
def exists(grid_root, hdfs_client=None):
fname = os.path.join(grid_root, 'header.jghdr3')
return rasterio.fs_exists(fname, hdfs_client)
@staticmethod
def load(grid_root, hdfs_client=None):
fname = os.path.join(grid_root, | |
"973": 15992,
"OTA": 15993,
"ï": 15994,
"38)": 15995,
"vent": 15996,
"▁Wij": 15997,
"228": 15998,
"ging": 15999,
"▁Aldri": 16000,
"▁Zvez": 16001,
"0.1": 16002,
"▁Hoy": 16003,
"▁cool": 16004,
"▁es": 16005,
"books": 16006,
"▁Date": 16007,
"kla": 16008,
"▁Fresh": 16009,
"▁Popular": 16010,
"▁flu": 16011,
"hri": 16012,
"Vir": 16013,
"kovi": 16014,
"91)": 16015,
"MAT": 16016,
"UD": 16017,
"acha": 16018,
"▁Lumi": 16019,
"▁Oliveira": 16020,
"Good": 16021,
"Roman": 16022,
"▁Praw": 16023,
'="': 16024,
"▁Wien": 16025,
"▁Camel": 16026,
"▁Gina": 16027,
"Hub": 16028,
"cale": 16029,
"pata": 16030,
"▁Nun": 16031,
"▁fluor": 16032,
"Mus": 16033,
"Tak": 16034,
"cris": 16035,
"ged": 16036,
"pier": 16037,
"▁Lun": 16038,
"▁15:00": 16039,
"▁Norte": 16040,
"李": 16041,
"▁Panorama": 16042,
"▁suc": 16043,
"Oh": 16044,
"riad": 16045,
"NAH": 16046,
"ify": 16047,
"▁Jelen": 16048,
"▁April": 16049,
"▁IG": 16050,
"▁OneDrive": 16051,
"kada": 16052,
"▁Cir": 16053,
"▁Circ": 16054,
"▁Maison": 16055,
"▁Mert": 16056,
"▁Letter": 16057,
"AFA": 16058,
"nych": 16059,
"▁Koma": 16060,
"ils": 16061,
"▁Heaven": 16062,
"▁Louboutin": 16063,
"eff": 16064,
"▁Estate": 16065,
"▁Svi": 16066,
"▁Journey": 16067,
"▁cou": 16068,
"▁kang": 16069,
"6.2": 16070,
"MAP": 16071,
"enda": 16072,
"Õ": 16073,
"▁Flora": 16074,
"▁Grin": 16075,
"▁Gujarat": 16076,
"▁Ima": 16077,
"▁Twilight": 16078,
"39)": 16079,
"wijk": 16080,
"Pass": 16081,
"owicz": 16082,
"▁VY": 16083,
"AVI": 16084,
"▁(11)": 16085,
"▁twitter": 16086,
"erne": 16087,
"laga": 16088,
"dun": 16089,
"tional": 16090,
"118": 16091,
"▁Jou": 16092,
"▁Pene": 16093,
"lad": 16094,
"pc": 16095,
"▁pas": 16096,
"PAS": 16097,
"hold": 16098,
"▁tém": 16099,
"▁Wester": 16100,
"#": 16101,
"Lab": 16102,
"ismo": 16103,
"▁Hau": 16104,
"▁Josep": 16105,
"▁Poker": 16106,
"▁est": 16107,
"▁intermedi": 16108,
"ando": 16109,
"▁Rudolf": 16110,
"▁Yana": 16111,
"yes": 16112,
"▁Blogger": 16113,
"▁Desc": 16114,
"cero": 16115,
"chant": 16116,
"▁Panel": 16117,
"Pol": 16118,
"▁Satan": 16119,
"itus": 16120,
"▁Wilt": 16121,
"sid": 16122,
"▁Button": 16123,
"▁Hospital": 16124,
"▁Graz": 16125,
"▁Mitte": 16126,
"▁Willem": 16127,
"lü": 16128,
"How": 16129,
"▁Nell": 16130,
"▁Rouge": 16131,
"kko": 16132,
"▁Aja": 16133,
"▁Regina": 16134,
"ür": 16135,
"ː": 16136,
"▁Addis": 16137,
"▁Banda": 16138,
"balu": 16139,
"▁Scout": 16140,
"▁When": 16141,
"LU": 16142,
"▁CU": 16143,
"▁Mest": 16144,
"2-1": 16145,
"dz": 16146,
"▁Abo": 16147,
"geri": 16148,
"omat": 16149,
"▁Faisal": 16150,
"▁Lok": 16151,
"▁Santander": 16152,
"▁Zeman": 16153,
"▁ebook": 16154,
".1.1": 16155,
"has": 16156,
"ugu": 16157,
"▁Chance": 16158,
"▁admin": 16159,
"45)": 16160,
"▁Door": 16161,
"▁cer": 16162,
"▁pus": 16163,
"▁text": 16164,
"▁Tere": 16165,
"5.2": 16166,
"rato": 16167,
"▁Mint": 16168,
"Tre": 16169,
"lach": 16170,
"lusi": 16171,
"▁München": 16172,
"▁Target": 16173,
"hei": 16174,
"ilin": 16175,
"ITS": 16176,
"spir": 16177,
"cl": 16178,
"cover": 16179,
"ped": 16180,
"▁NAS": 16181,
"maq": 16182,
"164": 16183,
"motiv": 16184,
"yat": 16185,
"▁2017/18": 16186,
"▁viet": 16187,
"öl": 16188,
"▁Gand": 16189,
"▁Abad": 16190,
"▁Gaming": 16191,
"▁Mene": 16192,
"▁pur": 16193,
"stry": 16194,
"▁Dak": 16195,
"into": 16196,
"▁Spo": 16197,
"▁poker": 16198,
"Met": 16199,
"esi": 16200,
"ées": 16201,
"▁AW": 16202,
"▁plug": 16203,
"cik": 16204,
"gumi": 16205,
"nicu": 16206,
"▁Forte": 16207,
"hom": 16208,
"Ẫ": 16209,
"▁Cabo": 16210,
"▁project": 16211,
"02)": 16212,
"ECT": 16213,
"1918": 16214,
"Facebook": 16215,
"IDI": 16216,
"ahu": 16217,
"▁Simona": 16218,
"▁Ska": 16219,
"Hor": 16220,
"riza": 16221,
"▁DAR": 16222,
"▁Eile": 16223,
"▁Vill": 16224,
"And": 16225,
"bino": 16226,
"▁Board": 16227,
"▁Bod": 16228,
"▁Sr": 16229,
"ave": 16230,
"eter": 16231,
"Mobil": 16232,
"rev": 16233,
"▁Arri": 16234,
"Tbilisi": 16235,
"▁Crist": 16236,
"Pra": 16237,
"udin": 16238,
"▁Violet": 16239,
"rist": 16240,
"▁2015/2016": 16241,
"Austr": 16242,
"▁06:00": 16243,
"▁1950-": 16244,
"▁Amen": 16245,
"▁Prop": 16246,
"87)": 16247,
"MCA": 16248,
"ako": 16249,
"hini": 16250,
"ying": 16251,
"mach": 16252,
"App": 16253,
"▁hyper": 16254,
"inen": 16255,
"▁16:00": 16256,
"wahi": 16257,
"▁Sankt": 16258,
"▁Terre": 16259,
"皇": 16260,
"dim": 16261,
"▁ASC": 16262,
"Brexit": 16263,
"oten": 16264,
"▁Saturn": 16265,
"arth": 16266,
"▁FS": 16267,
"▁snack": 16268,
"▁Jie": 16269,
"▁Other": 16270,
"Stra": 16271,
"kker": 16272,
"Team": 16273,
"bl": 16274,
"▁see": 16275,
"DER": 16276,
"Mul": 16277,
"know": 16278,
"967": 16279,
"Share": 16280,
"iyo": 16281,
"±": 16282,
"enter": 16283,
"▁Hamza": 16284,
"▁cardio": 16285,
"dico": 16286,
"garh": 16287,
"jing": 16288,
"found": 16289,
"ś": 16290,
"▁DHL": 16291,
"cian": 16292,
"zani": 16293,
"ın": 16294,
"brew": 16295,
"weil": 16296,
"▁Slavi": 16297,
"▁Rana": 16298,
"▁black": 16299,
"▁Transport": 16300,
"cah": 16301,
"como": 16302,
"▁Defender": 16303,
"▁JE": 16304,
"▁Orchestra": 16305,
"rias": 16306,
"▁Zim": 16307,
"▁argent": 16308,
"fli": 16309,
"hiri": 16310,
"▁Ola": 16311,
"▁ur": 16312,
"ppen": 16313,
"pris": 16314,
"▁40-": 16315,
"▁Claus": 16316,
"▁Runner": 16317,
"▁Uri": 16318,
"▁hum": 16319,
"server": 16320,
"▁Naz": 16321,
"▁Salle": 16322,
"Andre": 16323,
"▁fast": 16324,
"▁Sigur": 16325,
"Guard": 16326,
"mez": 16327,
"tensi": 16328,
"ugi": 16329,
"▁Fonte": 16330,
"▁President": 16331,
"▁della": 16332,
"Google": 16333,
"eti": 16334,
"spel": 16335,
"▁Mey": 16336,
"▁Stig": 16337,
"CTA": 16338,
"handler": 16339,
"olesterol": 16340,
"otta": 16341,
"▁Volk": 16342,
"Ten": 16343,
"liance": 16344,
"▁Crew": 16345,
"▁JO": 16346,
"▁Paro": 16347,
"▁Vier": 16348,
"gha": 16349,
"▁major": 16350,
"scope": 16351,
"▁Azure": 16352,
"sys": 16353,
"▁Assist": 16354,
"gali": 16355,
"▁Picture": 16356,
"ZE": 16357,
"hub": 16358,
"ior": 16359,
"▁Eid": 16360,
"▁Elias": 16361,
"Speak": 16362,
"deur": 16363,
"Ana": 16364,
"Bretagne": 16365,
"icul": 16366,
"Sky": 16367,
"heu": 16368,
"ove": 16369,
"▁Hussain": 16370,
"▁fin": 16371,
"FSC": 16372,
"gere": 16373,
"▁RED": 16374,
"god": 16375,
"▁Sit": 16376,
"WIN": 16377,
"nne": 16378,
"raw": 16379,
"vula": 16380,
"▁Where": 16381,
"▁proti": 16382,
"▁true": 16383,
"93)": 16384,
"Dr": 16385,
"High": 16386,
"nika": 16387,
"▁Francesc": 16388,
"▁Haz": 16389,
"▁Northern": 16390,
"▁451": 16391,
"▁Ira": 16392,
"Os": 16393,
"zbek": 16394,
"▁Sada": 16395,
"▁fal": 16396,
"▁pose": 16397,
"▁engine": 16398,
"▁mat": 16399,
"ejo": 16400,
"lv": 16401,
"rage": 16402,
"▁Lat": 16403,
"▁Minas": 16404,
"90)": 16405,
"arum": 16406,
"gach": 16407,
"▁Hind": 16408,
"▁benze": 16409,
"▁Lombok": 16410,
"CSI": 16411,
"alus": 16412,
"KO": 16413,
"profe": 16414,
"yad": 16415,
"▁profile": 16416,
"▁05.": 16417,
"▁Vent": 16418,
"▁broker": 16419,
"COS": 16420,
"▁ama": 16421,
"▁level": 16422,
"rig": 16423,
"Card": 16424,
"drop": 16425,
"ggio": 16426,
"taire": 16427,
"▁Aval": 16428,
"seb": 16429,
"▁Tomorrow": 16430,
"Def": 16431,
"FAR": 16432,
"cius": 16433,
"tip": 16434,
"venant": 16435,
"▁02.": 16436,
"▁Miroslav": 16437,
"enz": 16438,
"▁kaki": 16439,
"▁lit": 16440,
"gryp": 16441,
"▁Mannschaft": 16442,
"EK": 16443,
"avour": 16444,
"cali": 16445,
"▁Stick": 16446,
"▁Terri": 16447,
"RAS": 16448,
"oza": 16449,
"vika": 16450,
"yur": 16451,
"▁Hav": 16452,
"▁rac": 16453,
"YD": 16454,
"ching": 16455,
"mics": 16456,
"▁Ricci": 16457,
"▁Vali": 16458,
"yum": 16459,
"▁Source": 16460,
"▁Gwyn": 16461,
"▁Lager": 16462,
"▁[4]": 16463,
"take": 16464,
"▁Quattro": 16465,
"▁Scop": 16466,
"▁comp": 16467,
"bid": 16468,
"plat": 16469,
"▁Zer": 16470,
"▁indi": 16471,
"copi": 16472,
"schen": 16473,
"▁Dum": 16474,
"xas": 16475,
"▁BY": 16476,
"▁mala": 16477,
"YEN": 16478,
"▁Remote": 16479,
"▁Sura": 16480,
"▁voice": 16481,
"crit": 16482,
"Brit": 16483,
"kere": 16484,
"▁Dir": 16485,
"▁Riga": 16486,
"Louis": 16487,
"cir": 16488,
"tig": 16489,
"▁Police": 16490,
"▁Sign": 16491,
"▁Suf": 16492,
"Son": 16493,
"▁Baca": 16494,
"▁Grave": 16495,
"▁Mato": 16496,
"bhu": 16497,
"jana": 16498,
"rozen": 16499,
"vind": 16500,
"▁Properties": 16501,
"▁Tuan": 16502,
"1920": 16503,
"osis": 16504,
"▁Diet": 16505,
"▁Mayor": 16506,
"rici": 16507,
"▁Lauri": 16508,
"▁Loft": 16509,
"centr": 16510,
"sports": 16511,
"▁Holm": 16512,
"▁Innovation": 16513,
"▁painting": 16514,
"BRI": 16515,
"andri": 16516,
"plash": 16517,
"redo": 16518,
"6000": 16519,
"▁look": 16520,
"▁Safety": 16521,
"WP": 16522,
"▁Funk": 16523,
"▁Order": 16524,
"Tai": 16525,
"▁Application": 16526,
"▁Isla": 16527,
"▁Pai": 16528,
"▁Primo": 16529,
"▁toner": 16530,
"count": 16531,
"hira": 16532,
"rahman": 16533,
"▁Laut": 16534,
"▁Bela": 16535,
"▁Found": 16536,
"▁VAN": 16537,
"▁site": 16538,
"bron": 16539,
"▁Dock": 16540,
"▁Shab": 16541,
"clu": 16542,
"▁14-": 16543,
"▁2016/17": 16544,
"▁lup": 16545,
"Packard": 16546,
"str": 16547,
"Ẻ": 16548,
"▁Kina": 16549,
"▁Liza": 16550,
"▁Radeon": 16551,
"pé": 16552,
"▁Pik": 16553,
"block": 16554,
"ecto": 16555,
"gum": 16556,
"▁Kimi": 16557,
":40": 16558,
"voy": 16559,
"▁Damian": 16560,
"▁Equi": 16561,
"▁González": 16562,
"ngkok": 16563,
"talk": 16564,
"▁Ner": 16565,
"▁René": 16566,
"▁fili": 16567,
"hami": 16568,
"scape": 16569,
"▁Sector": 16570,
"Back": 16571,
"Chef": 16572,
"raman": 16573,
"Del": 16574,
"IGA": 16575,
"NAC": 16576,
"rib": 16577,
"▁Bihar": 16578,
"Bos": 16579,
"Mak": 16580,
"▁Eduard": 16581,
"▁Modul": 16582,
"ATO": 16583,
"▁Television": 16584,
"▁ann": 16585,
"Market": 16586,
"hah": 16587,
"▁LR": 16588,
"ló": 16589,
"mind": 16590,
"▁Rev": 16591,
"MEI": 16592,
"▁Tava": 16593,
"▁val": 16594,
"SIA": 16595,
"▁st": 16596,
"owie": 16597,
"steroid": 16598,
"ulum": 16599,
"▁Alla": 16600,
"▁Moment": 16601,
"WM": 16602,
"ool": 16603,
"▁Anwar": 16604,
"▁Contest": 16605,
"▁bike": 16606,
"jia": 16607,
"▁Poll": 16608,
"▁mens": 16609,
"▁sp": 16610,
"2.3": 16611,
"Alex": 16612,
"▁Happ": 16613,
"▁Majest": 16614,
"Ran": 16615,
"tala": 16616,
"▁Vasco": 16617,
"OV": 16618,
"▁Teng": 16619,
"▁Animation": 16620,
"▁Guard": 16621,
"▁Jaa": 16622,
"▁Rais": 16623,
"▁toe": 16624,
"IJ": 16625,
"▁Camilla": 16626,
"▁Haus": 16627,
"▁Hawa": 16628,
"▁Jis": 16629,
"Donald": 16630,
"Qui": 16631,
| |
<reponame>TimSweering/PolyReach
"""
This file contains function / classes to get the Polyflow operator / error bound
"""
from typing import Type, Tuple
from typing import List
import json
import cvxpy as cp
import numpy as np
import numba as nb
from scipy.linalg import expm
from scipy import optimize
from sympy.printing.aesaracode import aesara_function
from sympy import Matrix, Poly, symbols
from sympy.core.symbol import Symbol
from sympy.polys.polymatrix import PolyMatrix
from scripts.misc_functions import get_carleman_to_poly
from scripts.dreal_error_bound import DrealErrorBound as De
# from scripts.dreal_error_bound import Z3ErrorBound as Ze
class Domain:
"""
This class describes the domain of Polyflow.
It auto generates the mesh grid for a given boundary set and step size
"""
center: np.ndarray
axis_length: np.ndarray
dim_low: int
def __init__(self, axes_desc: np.ndarray):
""" The constructor of Domain class """
self.grid_list = self.__generate_grid(axes_desc)
self.dim_low = len(self.grid_list)
self.bounds = axes_desc[:, :2]
self.center = np.sum(self.bounds, axis=1).reshape((-1, 1)) / 2
self.axis_length = np.abs(axes_desc[:, 0].reshape((-1, 1)) - self.center)
self.axes_desc = axes_desc
def get_box(self, doi=None) -> List[List[float]]:
""" Get the projected hyper rectangle in the specified plane.
Parameters
----------
doi : List[int]
indices of the plane of interest
"""
if doi is None:
doi = [0, 1]
return [[self.bounds[doi[0], 0], self.bounds[doi[0], 1], self.bounds[doi[0], 1], self.bounds[doi[0], 0],
self.bounds[doi[0], 0]],
[self.bounds[doi[1], 0], self.bounds[doi[1], 0], self.bounds[doi[1], 1], self.bounds[doi[1], 1],
self.bounds[doi[1], 0]]]
def get_bounds(self) -> np.ndarray:
""" Returns the lower and upper bound of each element of the domain """
return self.bounds
def get_grid(self) -> tuple:
""" Returns the mesh grid of the domain for each dimension"""
return self.grid_list
def get_n_points(self) -> int:
""" Returns the amount of points of the grid """
return len(self.grid_list[0])
def get_dim_low(self) -> int:
""" Returns the dimension of the system """
return self.dim_low
def get_center(self) -> np.ndarray:
""" Returns the center of the domain """
return self.center
def to_dict(self) -> dict:
""" Converts the domain object to a dictionary """
return {'domain': self.axes_desc.tolist()}
def to_json(self) -> str:
""" Converts the Domain object to a string in json format. """
return json.dumps(self.to_dict())
@staticmethod
def __generate_grid(domain_description_in: np.ndarray) -> tuple:
"""
This function generates all points of the grid
The output is a tuple. Each elements contains all values of the respective dimension
Parameters
----------
domain_description_in:
description of the grid where each row is defined as [left bound, right bound, stepsize]
Returns
-------
List of tuples which contain the coordinates of the mesh grid
"""
n = domain_description_in.shape[0] # Get dimension of system
grid_tuple = (None,) # Initiate Tuple
for i in range(0, n):
grid_tuple += (np.arange(domain_description_in[i, 0],
domain_description_in[i, 1] + domain_description_in[i, 2],
domain_description_in[i, 2]),)
mesh = np.array(np.meshgrid(*grid_tuple[1:])) # All grid points excluding the None variable
return tuple(mesh.T.reshape(-1, n).T) # Reshape matrix to get an n x m matrix
class PolyFlow:
"""
PolyFlow class is used to define the linear matrix
"""
operator = None
carl_to_poly_reduced = None
@staticmethod
def __evaluate_lie_derivatives(lie_list_in: List[aesara_function], domain_obj: Domain) -> \
Tuple[np.ndarray, np.ndarray]:
"""
Evaluates all Lie derivatives over all grid points
Parameters
----------
lie_list_in
variable containing theano functions of the lie derivatives
domain_obj
variable containing the grid values
"""
m = len(lie_list_in) # amount of Lie functions
n = len(domain_obj.get_grid()) # dimension of system,
grid_n = len(domain_obj.get_grid()[0]) # Amount of grid points in domain
# Allocate space for Matrix A
known_lie = np.zeros((n * grid_n, m - 1))
# Evaluate all {0, N-1} Lie derivatives at the grid points
for i in range(0, m - 1):
known_lie[:, i] = lie_list_in[i](*domain_obj.get_grid()).reshape((n, -1)).T.reshape((-1, 1)).ravel()
# Get Nth Lie derivative which is to be approximated by polyflow
to_be_estimated_lie = lie_list_in[-1](*domain_obj.get_grid()).reshape((n, -1)).T.reshape((-1, 1))
return known_lie, to_be_estimated_lie
def __get_all_lie_derivatives(self, diff_function_in: PolyMatrix, sym_list: Tuple[symbols],
max_derivative: int) -> Tuple[list, List[PolyMatrix]]:
"""
Calculates all Lie derivatives from 0 to max_derivative
The next Lie derivative are obtained by using __get_next_lie_derivative.
After the Lie derivative it is converted to an aesara function for fast evaluation speed.
Parameters
----------
diff_function_in
Matrix containing polynomial symbolic functions
sym_list
Matrix containing all symbolics of the differential equation.
Order has to be the same as in the differential function
max_derivative
The amount of Lie derivatives used for the Polyflow and is equal
Returns
-------
All Lie derivatives up to order max_derivative
"""
# Initiate list
lie_derivative_aesara_list = [aesara_function] * (max_derivative + 1)
lie_derivative_sympy_list = [Type[PolyMatrix]] * (max_derivative + 1)
# Set first 0th Lie derivative as current function
current_lie = PolyMatrix(sym_list)
# Create dictionary for theano function all symbolics have dimension
dims_arg = dict((key_i, 1) for key_i in sym_list)
dtype_arg = dict((key_i, 'float64') for key_i in sym_list)
# Get Lie derivative function for 0th order
lie_derivative_aesara_list[0] = aesara_function(sym_list, [current_lie], dims=dims_arg, dtypes=dtype_arg)
lie_derivative_sympy_list[0] = PolyMatrix([Poly(current_lie[i], sym_list) for i in range(0, len(current_lie))])
# Set first function as current function
current_lie = lie_derivative_sympy_list[0]
# Get higher order lie derivatives and create theano function of it
for i in range(1, max_derivative + 1):
current_lie = self.__get_next_lie_derivative(current_lie, diff_function_in, sym_list)
current_func_non_poly_obj = Matrix([current_lie[j].as_expr() for j in range(0, len(sym_list))])
lie_derivative_aesara_list[i] = aesara_function(sym_list, [current_func_non_poly_obj], dims=dims_arg,
dtypes=dtype_arg)
lie_derivative_sympy_list[i] = current_lie
return lie_derivative_aesara_list, lie_derivative_sympy_list
@staticmethod
def __get_next_lie_derivative(current_function: PolyMatrix, f: PolyMatrix, diff_symbols: Tuple[symbols]) \
-> PolyMatrix:
"""
Calculates the next Lie Derivative of the input by taking the Jacobian of the function and multiplying it with
the differential equation.
Parameters
----------
current_function
k-1 th Lie derivative
f
Differential equation of the nonlinear system
diff_symbols
Symbolics of the differential equation
Returns
-------
k th Lie derivative
"""
m1 = current_function.jacobian(diff_symbols)
return m1 * f
def to_dict(self) -> dict:
""" Wraps the Polyflow object in a dictionary which is compatible with json format """
output_dict = {}
key_list = ['input_differential_eq', 'symbol_tuple', 'domain_obj', 'max_lie_order', 'time_step',
'scale_factor', 'extra_eig', 'bloat_scale', "polyflow_error"]
for key_i in key_list:
output_dict.update(to_json_el(self, key_i))
output_dict.update(self.get_overrides())
return output_dict
# Type hinting for PyCharm
lie_sympy_list: List[PolyMatrix]
symbol_tuple: Tuple[symbols]
domain_obj: Domain
polyflow_error: np.ndarray
continuous_matrix_list: List[np.ndarray]
scale_factor: float
from_dict_bool: bool
extra_eig: float
solver: str
time_step: float
smt_solver: str
polyflow_smt_tol: List[float]
operator_list = [None]
@staticmethod
def _create_scale_list(scale_factor, max_lie_order):
""" Create scale list for coordinate transformation """
return np.array([scale_factor ** -(max_lie_order - i - 1) for i in range(max_lie_order)])
def _init_cvx_problems(self, input_differential_eq, symbol_tuple, max_lie_order,
domain_obj, dim_low, extra_eig, scale_factor):
""" Create CVX object in order to optimize the Lambda values of the Polyflow """
lie_list, lie_sympy_list = self.__get_all_lie_derivatives(input_differential_eq, symbol_tuple,
max_lie_order)
# Evaluate Lie derivatives for optimization problem
known_lie, to_be_estimated_lie = self.__evaluate_lie_derivatives(lie_list, domain_obj)
# update constraints for optimization problem
model_list, var_list = self.__get_cvx_obj(dim_low, known_lie, to_be_estimated_lie,
extra_eig, scale_factor)
return lie_list, lie_sympy_list, known_lie, \
to_be_estimated_lie, model_list, var_list
def __init__(self, input_differential_eq: PolyMatrix, symbol_tuple: Tuple[symbols],
domain_description_in: np.ndarray, max_lie_order: int, time_step: float,
**kwargs):
"""
Constructor of class object PolyFlow
Parameters
----------
input_differential_eq
Differential equation of the nonlinear system
symbol_tuple
All symbolics of the differential equation
domain_description_in
Description of the domain
max_lie_order
The order of the Lie derivative that is to be estimated
time_step
Time step of the reachability algorithm
kwargs
from_dict_bool
lie_sympy_list
lambda_list
polyflow_error_factors
exponent_factors
bloat_scale
scale_factor
Factor used for the coordinate transformation
extra_eig
Relaxation of the eigen value constraint. This variable decides how much the spectral radius may be above
the scaling factor spectral_allowed = scale_factor*(1 + extra_eig)
"""
prop_defaults = {'from_dict_bool': False,
'solver': 'SCS',
'smt_solver': 'dreal',
'map_matrix': None,
'lambda_variable_matrices': None,
'scale_factor': 1.0,
'extra_eig': 0.2,
'lambda_list': [Type[np.ndarray], Type[np.ndarray]],
'projection_matrix': np.empty((5, 10)),
'flowpipe_smt_tol': None,
'polyflow_smt_tol': None,
'model_list': [],
'operator_list': Type[list]
}
# Set variables with default argument
for prop, default in prop_defaults.items():
setattr(self, prop, kwargs.get(prop, default))
if prop in kwargs.keys():
kwargs.pop(prop)
# Set variables
for key_i, value in kwargs.items():
setattr(self, key_i, value)
# Set necessary defined variables
self.time_step = time_step
self.max_lie_order = max_lie_order
self.input_differential_eq = input_differential_eq
self.symbol_tuple = symbol_tuple
self.scale_list = self._create_scale_list(self.scale_factor, max_lie_order)
dim_low = len(symbol_tuple)
# initial value of line search for the Polyflow error
self.min_error = np.zeros(dim_low)
# Domain variable (necessary coupling)
self.domain_obj = Domain(domain_description_in)
if not self.from_dict_bool:
# Get all Lie derivatives aesara function/symbolic
self.lie_list, self.lie_sympy_list = self.__get_all_lie_derivatives(input_differential_eq, symbol_tuple,
max_lie_order)
# Evaluate Lie derivatives for optimization problem
self.known_lie, self.to_be_estimated_lie = self.__evaluate_lie_derivatives(self.lie_list, self.domain_obj)
# update constraints for optimization problem
self.model_list, self.var_list = self.__get_cvx_obj(dim_low,
self.known_lie,
self.to_be_estimated_lie,
self.extra_eig,
self.scale_factor)
self.lie_list, self.lie_sympy_list, self.known_lie, \
self.to_be_estimated_lie, self.model_list, self.var_list = \
self._init_cvx_problems(input_differential_eq, symbol_tuple, max_lie_order,
self.domain_obj, dim_low, self.extra_eig, self.scale_factor)
| |
assert_labels(Assert):
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
labels = ["foo", "bar=baz", {"hello": "world"}]
expected = {"foo": "", "bar": "baz", "hello": "world"}
for item in (name, alias):
if item is None:
continue
testcase.assertEqual(
salt.utils.docker.translate_input(self.translator, **{item: labels}),
testcase.apply_defaults({name: expected}),
)
# Error case: Passed a mutli-element dict in dictlist
bad_labels = copy.deepcopy(labels)
bad_labels[-1]["bad"] = "input"
with testcase.assertRaisesRegex(
CommandExecutionError, r"Invalid label\(s\)"
):
salt.utils.docker.translate_input(self.translator, **{item: bad_labels})
return self.func(testcase, *args, **kwargs)
class assert_device_rates(Assert):
"""
Tests for device_{read,write}_{bps,iops}. The bps values have a "Rate"
value expressed in bytes/kb/mb/gb, while the iops values have a "Rate"
expressed as a simple integer.
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
# Error case: Not an absolute path
path = os.path.join("foo", "bar", "baz")
with testcase.assertRaisesRegex(
CommandExecutionError,
"Path '{0}' is not absolute".format(path.replace("\\", "\\\\")),
):
salt.utils.docker.translate_input(
self.translator, **{item: "{0}:1048576".format(path)}
)
if name.endswith("_bps"):
# Both integer bytes and a string providing a shorthand for kb,
# mb, or gb can be used, so we need to test for both.
expected = ({}, [])
vals = "/dev/sda:1048576,/dev/sdb:1048576"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1048576},
{"Path": "/dev/sdb", "Rate": 1048576},
]
}
),
)
vals = "/dev/sda:1mb,/dev/sdb:5mb"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": "1mb"},
{"Path": "/dev/sdb", "Rate": "5mb"},
]
}
),
)
if alias is not None:
# Test collision
test_kwargs = {
name: "/dev/sda:1048576,/dev/sdb:1048576",
alias: "/dev/sda:1mb,/dev/sdb:5mb",
}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1048576},
{"Path": "/dev/sdb", "Rate": 1048576},
]
}
),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
else:
# The "Rate" value must be an integer
vals = "/dev/sda:1000,/dev/sdb:500"
for val in (vals, vals.split(",")):
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, **{item: val}
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1000},
{"Path": "/dev/sdb", "Rate": 500},
]
}
),
)
# Test non-integer input
expected = (
{},
{item: "Rate '5mb' for path '/dev/sdb' is non-numeric"},
[],
)
vals = "/dev/sda:1000,/dev/sdb:5mb"
for val in (vals, vals.split(",")):
with testcase.assertRaisesRegex(
CommandExecutionError,
"Rate '5mb' for path '/dev/sdb' is non-numeric",
):
salt.utils.docker.translate_input(
self.translator, **{item: val}
)
if alias is not None:
# Test collision
test_kwargs = {
name: "/dev/sda:1000,/dev/sdb:500",
alias: "/dev/sda:888,/dev/sdb:999",
}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults(
{
name: [
{"Path": "/dev/sda", "Rate": 1000},
{"Path": "/dev/sdb", "Rate": 500},
]
}
),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class assert_subnet(Assert):
"""
Test an IPv4 or IPv6 subnet
"""
def wrap(self, testcase, *args, **kwargs): # pylint: disable=arguments-differ
# Strip off the "test_" from the function name
name = self.func.__name__[5:]
alias = self.translator.ALIASES_REVMAP.get(name)
for item in (name, alias):
if item is None:
continue
for val in ("127.0.0.1/32", "::1/128"):
log.debug("Verifying '%s' is a valid subnet", val)
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: val}
),
testcase.apply_defaults({name: val}),
)
# Error case: invalid subnet caught by validation
for val in (
"127.0.0.1",
"999.999.999.999/24",
"10.0.0.0/33",
"::1",
"feaz::1/128",
"::1/129",
):
log.debug("Verifying '%s' is not a valid subnet", val)
with testcase.assertRaisesRegex(
CommandExecutionError, "'{0}' is not a valid subnet".format(val)
):
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=True, **{item: val}
)
# This is not valid input but it will test whether or not subnet
# validation happened
val = "foo"
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, validate_ip_addrs=False, **{item: val}
),
testcase.apply_defaults({name: val}),
)
if alias is not None:
# Test collision
test_kwargs = {name: "10.0.0.0/24", alias: "192.168.50.128/25"}
testcase.assertEqual(
salt.utils.docker.translate_input(
self.translator, ignore_collisions=True, **test_kwargs
),
testcase.apply_defaults({name: test_kwargs[name]}),
)
with testcase.assertRaisesRegex(
CommandExecutionError, "is an alias for.+cannot both be used"
):
salt.utils.docker.translate_input(
self.translator, ignore_collisions=False, **test_kwargs
)
return self.func(testcase, *args, **kwargs)
class TranslateBase(TestCase):
maxDiff = None
translator = None # Must be overridden in the subclass
def apply_defaults(self, ret, skip_translate=None):
if skip_translate is not True:
defaults = getattr(self.translator, "DEFAULTS", {})
for key, val in six.iteritems(defaults):
if key not in ret:
ret[key] = val
return ret
@staticmethod
def normalize_ports(ret):
"""
When we translate exposed ports, we can end up with a mixture of ints
(representing TCP ports) and tuples (representing UDP ports). Python 2
will sort an iterable containing these mixed types, but Python 3 will
not. This helper is used to munge the ports in the return data so that
the resulting list is sorted in a way that can reliably be compared to
the expected results in the test.
This helper should only be needed for port_bindings and ports.
"""
if "ports" in ret[0]:
tcp_ports = []
udp_ports = []
for item in ret[0]["ports"]:
if isinstance(item, six.integer_types):
tcp_ports.append(item)
else:
udp_ports.append(item)
ret[0]["ports"] = sorted(tcp_ports) + sorted(udp_ports)
return ret
def tearDown(self):
"""
Test skip_translate kwarg
"""
name = self.id().split(".")[-1][5:]
# The below is not valid input for the Docker API, but these
# assertions confirm that we successfully skipped translation.
for val in (True, name, [name]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, skip_translate=val, **{name: "foo"}
),
self.apply_defaults({name: "foo"}, skip_translate=val),
)
class TranslateContainerInputTestCase(TranslateBase):
"""
Tests for salt.utils.docker.translate_input(), invoked using
salt.utils.docker.translate.container as the translator module.
"""
translator = salt.utils.docker.translate.container
@staticmethod
def normalize_ports(ret):
"""
When we translate exposed ports, we can end up with a mixture of ints
(representing TCP ports) and tuples (representing UDP ports). Python 2
will sort an iterable containing these mixed types, but Python 3 will
not. This helper is used to munge the ports in the return data so that
the resulting list is sorted in a way that can reliably be compared to
the expected results in the test.
This helper should only be needed for port_bindings and ports.
"""
if "ports" in ret:
tcp_ports = []
udp_ports = []
for item in ret["ports"]:
if isinstance(item, six.integer_types):
tcp_ports.append(item)
else:
udp_ports.append(item)
ret["ports"] = sorted(tcp_ports) + sorted(udp_ports)
return ret
@assert_bool(salt.utils.docker.translate.container)
def test_auto_remove(self):
"""
Should be a bool or converted to one
"""
def test_binds(self):
"""
Test the "binds" kwarg. Any volumes not defined in the "volumes" kwarg
should be added to the results.
"""
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, binds="/srv/www:/var/www:ro", volumes="/testing"
),
{"binds": ["/srv/www:/var/www:ro"], "volumes": ["/testing", "/var/www"]},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, binds=["/srv/www:/var/www:ro"], volumes="/testing"
),
{"binds": ["/srv/www:/var/www:ro"], "volumes": ["/testing", "/var/www"]},
)
self.assertEqual(
salt.utils.docker.translate_input(
self.translator,
binds={"/srv/www": {"bind": "/var/www", "mode": "ro"}},
volumes="/testing",
),
{
"binds": {"/srv/www": {"bind": "/var/www", "mode": "ro"}},
"volumes": ["/testing", "/var/www"],
},
)
@assert_int(salt.utils.docker.translate.container)
def test_blkio_weight(self):
"""
Should be an int or converted to one
"""
def test_blkio_weight_device(self):
"""
Should translate a list of PATH:WEIGHT pairs to a list of dictionaries
with the following format: {'Path': PATH, 'Weight': WEIGHT}
"""
for val in ("/dev/sda:100,/dev/sdb:200", ["/dev/sda:100", "/dev/sdb:200"]):
self.assertEqual(
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="/dev/sda:100,/dev/sdb:200"
),
{
"blkio_weight_device": [
{"Path": "/dev/sda", "Weight": 100},
{"Path": "/dev/sdb", "Weight": 200},
]
},
)
# Error cases
with self.assertRaisesRegex(
CommandExecutionError, r"'foo' contains 1 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo"
)
with self.assertRaisesRegex(
CommandExecutionError, r"'foo:bar:baz' contains 3 value\(s\) \(expected 2\)"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device="foo:bar:baz"
)
with self.assertRaisesRegex(
CommandExecutionError, r"Weight 'foo' for path '/dev/sdb' is not an integer"
):
salt.utils.docker.translate_input(
self.translator, blkio_weight_device=["/dev/sda:100", "/dev/sdb:foo"]
)
@assert_stringlist(salt.utils.docker.translate.container)
def test_cap_add(self):
"""
Should be a list of strings or converted to one
"""
@assert_stringlist(salt.utils.docker.translate.container)
def test_cap_drop(self):
"""
Should be a list of strings or converted to one
"""
@assert_cmd(salt.utils.docker.translate.container)
def test_command(self):
"""
Can either be a string or a comma-separated or Python list of strings.
"""
@assert_string(salt.utils.docker.translate.container)
def test_cpuset_cpus(self):
"""
Should be a string or converted to one
"""
@assert_string(salt.utils.docker.translate.container)
def test_cpuset_mems(self):
"""
Should be a string or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_group(self):
"""
Should be an int or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_period(self):
"""
Should be an int or converted to one
"""
@assert_int(salt.utils.docker.translate.container)
def test_cpu_shares(self):
"""
Should be an int or converted to one
"""
@assert_bool(salt.utils.docker.translate.container)
def test_detach(self):
"""
Should be | |
<reponame>pulumi/pulumi-libvirt
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'DomainBootDevice',
'DomainConsole',
'DomainCpu',
'DomainDisk',
'DomainFilesystem',
'DomainGraphics',
'DomainNetworkInterface',
'DomainNvram',
'DomainTpm',
'DomainVideo',
'DomainXml',
'NetworkDhcp',
'NetworkDns',
'NetworkDnsForwarder',
'NetworkDnsHost',
'NetworkDnsSrv',
'NetworkDnsmasqOptions',
'NetworkDnsmasqOptionsOption',
'NetworkRoute',
'NetworkXml',
'PoolXml',
'VolumeXml',
]
@pulumi.output_type
class DomainBootDevice(dict):
def __init__(__self__, *,
devs: Optional[Sequence[str]] = None):
if devs is not None:
pulumi.set(__self__, "devs", devs)
@property
@pulumi.getter
def devs(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "devs")
@pulumi.output_type
class DomainConsole(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetPort":
suggest = "target_port"
elif key == "sourceHost":
suggest = "source_host"
elif key == "sourcePath":
suggest = "source_path"
elif key == "sourceService":
suggest = "source_service"
elif key == "targetType":
suggest = "target_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainConsole. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainConsole.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainConsole.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_port: str,
type: str,
source_host: Optional[str] = None,
source_path: Optional[str] = None,
source_service: Optional[str] = None,
target_type: Optional[str] = None):
"""
:param str target_port: Target port
:param str type: Console device type. Valid values are "pty" and "tcp".
:param str source_host: IP address to listen on. Defaults to 127.0.0.1.
:param str source_path: Source path
:param str source_service: Port number or a service name. Defaults to a
random port.
:param str target_type: for the first console and defaults to `serial`.
Subsequent `console` blocks must have a different type - usually `virtio`.
"""
pulumi.set(__self__, "target_port", target_port)
pulumi.set(__self__, "type", type)
if source_host is not None:
pulumi.set(__self__, "source_host", source_host)
if source_path is not None:
pulumi.set(__self__, "source_path", source_path)
if source_service is not None:
pulumi.set(__self__, "source_service", source_service)
if target_type is not None:
pulumi.set(__self__, "target_type", target_type)
@property
@pulumi.getter(name="targetPort")
def target_port(self) -> str:
"""
Target port
"""
return pulumi.get(self, "target_port")
@property
@pulumi.getter
def type(self) -> str:
"""
Console device type. Valid values are "pty" and "tcp".
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="sourceHost")
def source_host(self) -> Optional[str]:
"""
IP address to listen on. Defaults to 127.0.0.1.
"""
return pulumi.get(self, "source_host")
@property
@pulumi.getter(name="sourcePath")
def source_path(self) -> Optional[str]:
"""
Source path
"""
return pulumi.get(self, "source_path")
@property
@pulumi.getter(name="sourceService")
def source_service(self) -> Optional[str]:
"""
Port number or a service name. Defaults to a
random port.
"""
return pulumi.get(self, "source_service")
@property
@pulumi.getter(name="targetType")
def target_type(self) -> Optional[str]:
"""
for the first console and defaults to `serial`.
Subsequent `console` blocks must have a different type - usually `virtio`.
"""
return pulumi.get(self, "target_type")
@pulumi.output_type
class DomainCpu(dict):
def __init__(__self__, *,
mode: str):
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def mode(self) -> str:
return pulumi.get(self, "mode")
@pulumi.output_type
class DomainDisk(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "blockDevice":
suggest = "block_device"
elif key == "volumeId":
suggest = "volume_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainDisk. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainDisk.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainDisk.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
block_device: Optional[str] = None,
file: Optional[str] = None,
scsi: Optional[bool] = None,
url: Optional[str] = None,
volume_id: Optional[str] = None,
wwn: Optional[str] = None):
"""
:param str block_device: The path to the host device to use as the block device for this disk.
:param str file: The filename to use as the block device for this disk (read-only)
:param bool scsi: Use a scsi controller for this disk. The controller
model is set to `virtio-scsi`
:param str url: The http url to use as the block device for this disk (read-only)
:param str volume_id: The volume id to use for this disk.
:param str wwn: Specify a WWN to use for the disk if the disk is using
a scsi controller, if not specified then a random wwn is generated for the disk
"""
if block_device is not None:
pulumi.set(__self__, "block_device", block_device)
if file is not None:
pulumi.set(__self__, "file", file)
if scsi is not None:
pulumi.set(__self__, "scsi", scsi)
if url is not None:
pulumi.set(__self__, "url", url)
if volume_id is not None:
pulumi.set(__self__, "volume_id", volume_id)
if wwn is not None:
pulumi.set(__self__, "wwn", wwn)
@property
@pulumi.getter(name="blockDevice")
def block_device(self) -> Optional[str]:
"""
The path to the host device to use as the block device for this disk.
"""
return pulumi.get(self, "block_device")
@property
@pulumi.getter
def file(self) -> Optional[str]:
"""
The filename to use as the block device for this disk (read-only)
"""
return pulumi.get(self, "file")
@property
@pulumi.getter
def scsi(self) -> Optional[bool]:
"""
Use a scsi controller for this disk. The controller
model is set to `virtio-scsi`
"""
return pulumi.get(self, "scsi")
@property
@pulumi.getter
def url(self) -> Optional[str]:
"""
The http url to use as the block device for this disk (read-only)
"""
return pulumi.get(self, "url")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> Optional[str]:
"""
The volume id to use for this disk.
"""
return pulumi.get(self, "volume_id")
@property
@pulumi.getter
def wwn(self) -> Optional[str]:
"""
Specify a WWN to use for the disk if the disk is using
a scsi controller, if not specified then a random wwn is generated for the disk
"""
return pulumi.get(self, "wwn")
@pulumi.output_type
class DomainFilesystem(dict):
def __init__(__self__, *,
source: str,
target: str,
accessmode: Optional[str] = None,
readonly: Optional[bool] = None):
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "target", target)
if accessmode is not None:
pulumi.set(__self__, "accessmode", accessmode)
if readonly is not None:
pulumi.set(__self__, "readonly", readonly)
@property
@pulumi.getter
def source(self) -> str:
return pulumi.get(self, "source")
@property
@pulumi.getter
def target(self) -> str:
return pulumi.get(self, "target")
@property
@pulumi.getter
def accessmode(self) -> Optional[str]:
return pulumi.get(self, "accessmode")
@property
@pulumi.getter
def readonly(self) -> Optional[bool]:
return pulumi.get(self, "readonly")
@pulumi.output_type
class DomainGraphics(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "listenAddress":
suggest = "listen_address"
elif key == "listenType":
suggest = "listen_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainGraphics. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainGraphics.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainGraphics.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
autoport: Optional[bool] = None,
listen_address: Optional[str] = None,
listen_type: Optional[str] = None,
type: Optional[str] = None,
websocket: Optional[int] = None):
"""
:param bool autoport: defaults to "yes"
:param str listen_address: IP Address where the VNC listener should be started if
`listen_type` is set to `address`. Defaults to 127.0.0.1
:param str listen_type: "listen type", defaults to "none"
:param str type: Console device type. Valid values are "pty" and "tcp".
:param int websocket: Port to listen on for VNC WebSocket functionality (-1 meaning auto-allocation)
"""
if autoport is not None:
pulumi.set(__self__, "autoport", autoport)
if listen_address is not None:
pulumi.set(__self__, "listen_address", listen_address)
if listen_type is not None:
pulumi.set(__self__, "listen_type", listen_type)
if type is not None:
pulumi.set(__self__, "type", type)
if websocket is not None:
pulumi.set(__self__, "websocket", websocket)
@property
@pulumi.getter
def autoport(self) -> Optional[bool]:
"""
defaults to "yes"
"""
return pulumi.get(self, "autoport")
@property
@pulumi.getter(name="listenAddress")
def listen_address(self) -> Optional[str]:
"""
IP Address where the VNC listener should be started if
`listen_type` is set to `address`. Defaults to 127.0.0.1
"""
return pulumi.get(self, "listen_address")
@property
@pulumi.getter(name="listenType")
def listen_type(self) -> Optional[str]:
"""
"listen type", defaults to "none"
"""
return pulumi.get(self, "listen_type")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Console device type. Valid values are "pty" and "tcp".
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def websocket(self) -> Optional[int]:
"""
Port to listen on for VNC WebSocket functionality (-1 meaning auto-allocation)
"""
return pulumi.get(self, "websocket")
@pulumi.output_type
class DomainNetworkInterface(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkId":
suggest = "network_id"
elif key == "networkName":
suggest = "network_name"
elif key == "waitForLease":
suggest = "wait_for_lease"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainNetworkInterface. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainNetworkInterface.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainNetworkInterface.__key_warning(key)
return super().get(key, default)
def | |
"""------------------for AC"""
import enum
import logging
from typing import Optional
from .device import (
UNIT_TEMP_CELSIUS,
UNIT_TEMP_FAHRENHEIT,
Device,
DeviceStatus,
)
from . import (
FEAT_ENERGY_CURRENT,
FEAT_HUMIDITY,
FEAT_HOT_WATER_TEMP,
FEAT_IN_WATER_TEMP,
FEAT_OUT_WATER_TEMP,
)
from .core_exceptions import InvalidRequestError
LABEL_VANE_HSTEP = "@AC_MAIN_WIND_DIRECTION_STEP_LEFT_RIGHT_W"
LABEL_VANE_VSTEP = "@AC_MAIN_WIND_DIRECTION_STEP_UP_DOWN_W"
LABEL_VANE_HSWING = "@AC_MAIN_WIND_DIRECTION_SWING_LEFT_RIGHT_W"
LABEL_VANE_VSWING = "@AC_MAIN_WIND_DIRECTION_SWING_UP_DOWN_W"
LABEL_VANE_SWIRL = "@AC_MAIN_WIND_DIRECTION_SWIRL_W"
AC_CTRL_BASIC = ["Control", "basicCtrl"]
AC_CTRL_WIND_DIRECTION = ["Control", "wDirCtrl"]
AC_CTRL_MISC = ["Control", "miscCtrl"]
# AC_CTRL_SETTING = "settingInfo"
# AC_CTRL_WIND_MODE = "wModeCtrl"
AC_DUCT_ZONE_V1 = "DuctZone"
AC_STATE_POWER_V1 = "InOutInstantPower"
SUPPORT_AC_OPERATION_MODE = ["SupportOpMode", "support.airState.opMode"]
SUPPORT_AC_WIND_STRENGTH = ["SupportWindStrength", "support.airState.windStrength"]
SUPPORT_AC_RAC_SUBMODE = ["SupportRACSubMode", "support.racSubMode"]
AC_STATE_OPERATION = ["Operation", "airState.operation"]
AC_STATE_OPERATION_MODE = ["OpMode", "airState.opMode"]
AC_STATE_CURRENT_TEMP = ["TempCur", "airState.tempState.current"]
AC_STATE_HOT_WATER_TEMP = ["HotWaterTempCur", "airState.tempState.hotWaterCurrent"]
AC_STATE_IN_WATER_TEMP = ["WaterInTempCur", "airState.tempState.inWaterCurrent"]
AC_STATE_OUT_WATER_TEMP = ["WaterTempCur", "airState.tempState.outWaterCurrent"]
AC_STATE_TARGET_TEMP = ["TempCfg", "airState.tempState.target"]
AC_STATE_WIND_STRENGTH = ["WindStrength", "airState.windStrength"]
AC_STATE_WDIR_HSTEP = ["WDirHStep", "airState.wDir.hStep"]
AC_STATE_WDIR_VSTEP = ["WDirVStep", "airState.wDir.vStep"]
AC_STATE_WDIR_HSWING = ["WDirLeftRight", "airState.wDir.leftRight"]
AC_STATE_WDIR_VSWING = ["WDirUpDown", "airState.wDir.upDown"]
AC_STATE_POWER = [AC_STATE_POWER_V1, "airState.energy.onCurrent"]
AC_STATE_HUMIDITY = ["SensorHumidity", "airState.humidity.current"]
AC_STATE_DUCT_ZONE = ["DuctZoneType", "airState.ductZone.state"]
CMD_STATE_OPERATION = [AC_CTRL_BASIC, "Set", AC_STATE_OPERATION]
CMD_STATE_OP_MODE = [AC_CTRL_BASIC, "Set", AC_STATE_OPERATION_MODE]
CMD_STATE_TARGET_TEMP = [AC_CTRL_BASIC, "Set", AC_STATE_TARGET_TEMP]
CMD_STATE_WIND_STRENGTH = [AC_CTRL_BASIC, "Set", AC_STATE_WIND_STRENGTH]
CMD_STATE_WDIR_HSTEP = [AC_CTRL_WIND_DIRECTION, "Set", AC_STATE_WDIR_HSTEP]
CMD_STATE_WDIR_VSTEP = [AC_CTRL_WIND_DIRECTION, "Set", AC_STATE_WDIR_VSTEP]
CMD_STATE_WDIR_HSWING = [AC_CTRL_WIND_DIRECTION, "Set", AC_STATE_WDIR_HSWING]
CMD_STATE_WDIR_VSWING = [AC_CTRL_WIND_DIRECTION, "Set", AC_STATE_WDIR_VSWING]
CMD_STATE_DUCT_ZONES = [
AC_CTRL_MISC, "Set", [AC_DUCT_ZONE_V1, "airState.ductZone.control"]
]
CMD_ENABLE_EVENT_V2 = ["allEventEnable", "Set", "airState.mon.timeout"]
# AC_STATE_CURRENT_HUMIDITY_V2 = "airState.humidity.current"
# AC_STATE_AUTODRY_MODE_V2 = "airState.miscFuncState.autoDry"
# AC_STATE_AIRCLEAN_MODE_V2 = "airState.wMode.airClean"
# AC_STATE_FILTER_MAX_TIME_V2 = "airState.filterMngStates.maxTime"
# AC_STATE_FILTER_REMAIN_TIME_V2 = "airState.filterMngStates.useTime"
DEFAULT_MIN_TEMP = 16
DEFAULT_MAX_TEMP = 30
MIN_AWHP_TEMP = 5
MAX_AWHP_TEMP = 80
TEMP_STEP_WHOLE = 1.0
TEMP_STEP_HALF = 0.5
ADD_FEAT_POLL_INTERVAL = 300 # 5 minutes
ZONE_OFF = "0"
ZONE_ON = "1"
ZONE_ST_CUR = "current"
ZONE_ST_NEW = "new"
_LOGGER = logging.getLogger(__name__)
class ACOp(enum.Enum):
"""Whether a device is on or off."""
OFF = "@AC_MAIN_OPERATION_OFF_W"
ON = "@AC_MAIN_OPERATION_ON_W"
RIGHT_ON = "@AC_MAIN_OPERATION_RIGHT_ON_W" # Right fan only.
LEFT_ON = "@AC_MAIN_OPERATION_LEFT_ON_W" # Left fan only.
ALL_ON = "@AC_MAIN_OPERATION_ALL_ON_W" # Both fans (or only fan) on.
class ACMode(enum.Enum):
"""The operation mode for an AC/HVAC device."""
COOL = "@AC_MAIN_OPERATION_MODE_COOL_W"
DRY = "@AC_MAIN_OPERATION_MODE_DRY_W"
FAN = "@AC_MAIN_OPERATION_MODE_FAN_W"
HEAT = "@AC_MAIN_OPERATION_MODE_HEAT_W"
ACO = "@AC_MAIN_OPERATION_MODE_ACO_W"
AI = "@AC_MAIN_OPERATION_MODE_AI_W"
AIRCLEAN = "@AC_MAIN_OPERATION_MODE_AIRCLEAN_W"
AROMA = "@AC_MAIN_OPERATION_MODE_AROMA_W"
ENERGY_SAVING = "@AC_MAIN_OPERATION_MODE_ENERGY_SAVING_W"
ENERGY_SAVER = "@AC_MAIN_OPERATION_MODE_ENERGY_SAVER_W"
class ACFanSpeed(enum.Enum):
"""The fan speed for an AC/HVAC device."""
SLOW = "@AC_MAIN_WIND_STRENGTH_SLOW_W"
SLOW_LOW = "@AC_MAIN_WIND_STRENGTH_SLOW_LOW_W"
LOW = "@AC_MAIN_WIND_STRENGTH_LOW_W"
LOW_MID = "@AC_MAIN_WIND_STRENGTH_LOW_MID_W"
MID = "@AC_MAIN_WIND_STRENGTH_MID_W"
MID_HIGH = "@AC_MAIN_WIND_STRENGTH_MID_HIGH_W"
HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_W"
POWER = "@AC_MAIN_WIND_STRENGTH_POWER_W"
AUTO = "@AC_MAIN_WIND_STRENGTH_AUTO_W"
NATURE = "@AC_MAIN_WIND_STRENGTH_NATURE_W"
R_LOW = "@AC_MAIN_WIND_STRENGTH_LOW_RIGHT_W"
R_MID = "@AC_MAIN_WIND_STRENGTH_MID_RIGHT_W"
R_HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_RIGHT_W"
L_LOW = "@AC_MAIN_WIND_STRENGTH_LOW_LEFT_W"
L_MID = "@AC_MAIN_WIND_STRENGTH_MID_LEFT_W"
L_HIGH = "@AC_MAIN_WIND_STRENGTH_HIGH_LEFT_W"
class ACVStepMode(enum.Enum):
"""The vertical step mode for an AC/HVAC device.
Blades are numbered vertically from 1 (topmost)
to 6.
All is 100.
"""
Off = "@OFF"
Top = "@1"
MiddleTop1 = "@2"
MiddleTop2 = "@3"
MiddleBottom2 = "@4"
MiddleBottom1 = "@5"
Bottom = "@6"
Swing = "@100"
class ACHStepMode(enum.Enum):
"""The horizontal step mode for an AC/HVAC device.
Blades are numbered horizontally from 1 (leftmost)
to 5.
Left half goes from 1-3, and right half goes from
3-5.
All is 100.
"""
Off = "@OFF"
Left = "@1"
MiddleLeft = "@2"
Center = "@3"
MiddleRight = "@4"
Right = "@5"
LeftHalf = "@13"
RightHalf = "@35"
Swing = "@100"
class ACSwingMode(enum.Enum):
"""The swing mode for an AC/HVAC device."""
SwingOff = "@OFF"
SwingOn = "@ON"
class AirConditionerDevice(Device):
"""A higher-level interface for a AC."""
def __init__(self, client, device, temp_unit=UNIT_TEMP_CELSIUS):
super().__init__(client, device, AirConditionerStatus(self, None))
self._temperature_unit = (
UNIT_TEMP_FAHRENHEIT if temp_unit == UNIT_TEMP_FAHRENHEIT else UNIT_TEMP_CELSIUS
)
self._is_air_to_water = None
self._supported_operation = None
self._supported_op_modes = None
self._supported_fan_speeds = None
self._supported_horizontal_steps = None
self._supported_horizontal_swings = None
self._supported_vertical_steps = None
self._supported_vertical_swings = None
self._temperature_range = None
self._temperature_step = TEMP_STEP_WHOLE
self._duct_zones = {}
self._current_power = 0
self._current_power_supported = True
self._f2c_map = None
self._c2f_map = None
def _f2c(self, value):
"""Get a dictionary mapping Fahrenheit to Celsius temperatures for
this device.
Unbelievably, SmartThinQ devices have their own lookup tables
for mapping the two temperature scales. You can get *close* by
using a real conversion between the two temperature scales, but
precise control requires using the custom LUT.
"""
if self._temperature_unit == UNIT_TEMP_CELSIUS:
return value
if self._f2c_map is None:
mapping = self.model_info.value("TempFahToCel").options
self._f2c_map = {int(f): c for f, c in mapping.items()}
return self._f2c_map.get(value, value)
def conv_temp_unit(self, value):
"""Get an inverse mapping from Celsius to Fahrenheit.
Just as unbelievably, this is not exactly the inverse of the
`f2c` map. There are a few values in this reverse mapping that
are not in the other.
"""
if self._temperature_unit == UNIT_TEMP_CELSIUS:
return float(value)
if self._c2f_map is None:
mapping = self.model_info.value("TempCelToFah").options
out = {}
for c, f in mapping.items():
try:
c_num = int(c)
except ValueError:
c_num = float(c)
out[c_num] = f
self._c2f_map = out
return self._c2f_map.get(value, value)
def _adjust_temperature_step(self, target_temp):
if self._temperature_step != TEMP_STEP_WHOLE:
return
if target_temp is None:
return
if int(target_temp) != target_temp:
self._temperature_step = TEMP_STEP_HALF
def _get_supported_operations(self):
"""Get a list of the ACOp Operations the device supports."""
if not self._supported_operation:
key = self._get_state_key(AC_STATE_OPERATION)
mapping = self.model_info.value(key).options
self._supported_operation = [ACOp(o) for o in mapping.values()]
return self._supported_operation
def _supported_on_operation(self):
"""Get the most correct "On" operation the device supports.
:raises ValueError: If ALL_ON is not supported, but there are
multiple supported ON operations. If a model raises this,
its behaviour needs to be determined so this function can
make a better decision.
"""
operations = self._get_supported_operations().copy()
operations.remove(ACOp.OFF)
# This ON operation appears to be supported in newer AC models
if ACOp.ALL_ON in operations:
return ACOp.ALL_ON
# This ON operation appears to be supported in V2 AC models, to check
if ACOp.ON in operations:
return ACOp.ON
# Older models, or possibly just the LP1419IVSM, do not support ALL_ON,
# instead advertising only a single operation of RIGHT_ON.
# Thus, if there's only one ON operation, we use that.
if len(operations) == 1:
return operations[0]
# Hypothetically, the API could return multiple ON operations, neither
# of which are ALL_ON. This will raise in that case, as we don't know
# what that model will expect us to do to turn everything on.
# Or, this code will never actually be reached! We can only hope. :)
raise ValueError(
f"could not determine correct 'on' operation:"
f" too many reported operations: '{str(operations)}'"
)
def _get_temperature_range(self):
"""Get valid temperature range for model."""
if not self._temperature_range:
if not self.model_info:
return None
if self.is_air_to_water:
min_temp = MIN_AWHP_TEMP
max_temp = MAX_AWHP_TEMP
else:
key = self._get_state_key(AC_STATE_TARGET_TEMP)
range_info = self.model_info.value(key)
if not range_info:
min_temp = DEFAULT_MIN_TEMP
max_temp = DEFAULT_MAX_TEMP
else:
min_temp = min(range_info.min, DEFAULT_MIN_TEMP)
max_temp = max(range_info.max, DEFAULT_MAX_TEMP)
self._temperature_range = [min_temp, max_temp]
return self._temperature_range
def _is_vane_mode_supported(self, mode):
"""Check if a specific vane mode is supported."""
supp_key = self._get_state_key(SUPPORT_AC_RAC_SUBMODE)
if not self.model_info.enum_value(supp_key, mode):
return False
return True
def is_duct_zone_enabled(self, zone: str) -> bool:
"""Get if a specific zone is enabled"""
return zone in self._duct_zones
def get_duct_zone(self, zone: str) -> bool:
"""Get the status for a specific zone"""
if zone not in self._duct_zones:
return False
cur_zone = self._duct_zones[zone]
if ZONE_ST_NEW in cur_zone:
return cur_zone[ZONE_ST_NEW] == ZONE_ON
return cur_zone[ZONE_ST_CUR] == ZONE_ON
def set_duct_zone(self, zone: str, status: bool):
"""Set the status for a specific zone"""
if zone not in self._duct_zones:
return
self._duct_zones[zone][ZONE_ST_NEW] = ZONE_ON if status else ZONE_OFF
@property
def duct_zones(self) -> list:
"""Return a list of available duct zones"""
return [key for key in self._duct_zones]
def update_duct_zones(self):
"""Update the current duct zones status."""
states = self._get_duct_zones()
if not states:
return
duct_zones = {}
send_update = False
for zone, state in states.items():
cur_status = state[ZONE_ST_CUR]
new_status = None
if zone in self._duct_zones:
new_status = self._duct_zones[zone].get(ZONE_ST_NEW)
if new_status and new_status != cur_status:
send_update = True
duct_zones[zone] = {ZONE_ST_CUR: new_status or cur_status}
self._duct_zones = duct_zones
if send_update:
self._set_duct_zones(duct_zones)
def _get_duct_zones(self) -> dict:
"""Get the status of the zones (for ThinQ1 only zone configured).
return value is a dict with this format:
- key: The zone index. A string containing a number
- value: another dict with:
- key: "current"
- value: "1" if zone is ON else "0"
"""
# first check if duct is supported
if not self._status:
return {}
duct_state = self._status.duct_zones_state
if not duct_state:
return {}
# get real duct zones states
"""
For ThinQ2 we transform the value in the status in binary
and than we create the result. We always have 8 duct zone.
"""
if not self._should_poll:
bin_arr = [x for x in reversed(f"{duct_state:08b}")]
return {
str(v+1): {ZONE_ST_CUR: k} for v, k in enumerate(bin_arr)
}
"""
For ThinQ1 devices result is a list of dicts with | |
<filename>appengine/flexible/hello_world/app/firestore_service.py
import datetime
import firebase_admin
from flask import session
from firebase_admin import firestore
app = firebase_admin.initialize_app()
db = firestore.client()
## when you use document.get() return a list []
## when you use collection.stream() return a generator
#
# RESERVATIONS
#
def put_reservation(reservation):
# print( reservation['address'])
reservation__ref = db.collection('reservations').document()
reservation__ref.set({
'address' : reservation['address'],
'date' : reservation['date'],
'hour' : reservation['hour'],
'createDate' : datetime.datetime.now(),
'customer' : "",
'products' : reservation['group1'],
'reference' : reservation['group2'],
'state' : 'new',
})
# print(reservation__ref.get().to_dict())
return reservation__ref
def put_customer_into_reservation(reservation, formData):
print( reservation, formData )
reservation__ref = db.collection('reservations').document(reservation)
reservation__ref.set({
'customer' : formData,
})
return reservation__ref
def get_reservation(id):
return db.collection('reservations').document(id).get()
#
#USERS
#
def get_all_users():
"""
Return a list with all users in your tenant
"""
# return db.collection(session['type__of__tenant']).document(session['tenant']).collection('users').where(, u'!=', u'ADMIN').stream()
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('users').stream()
def get_user(username):
return db.collection('users').document(username).get()
def get_user_with_tenant(username,tenant):
"""
Return single user from DB using username as ID
"""
return db.collection(session['type__of__tenant']).document(tenant).collection('users').document(username).get()
def user_put(user__data):
user_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('users').document(user__data.username)
user_ref.set({'password': <PASSWORD>,'admin': False, 'tenant':session['tenant'], 'gender':user__data.gender, 'fullname':user__data.fullname})
def user_put_into_newsletter(email,tenant):
"""
Add email to newsLetter list in BD
"""
user_ref = db.collection('newsletter').document(email)
user_ref.set({'tenant':tenant})
#
#RECIPES
#
def get_recipes():
#return db.collection(u'collection').where(u'capital', u'==', True).stream()
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('recipes').stream()
def get_recipe(recipe):
doc_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'recipes').document(recipe)
try:
doc = doc_ref.get()
# a = doc.to_dict()
# for i,j in a.items():
# print(i+str(j))
# print(u'Document data: {}'.format(doc.to_dict()))
except google.cloud.exceptions.NotFound:
print('No such document!')
doc = None
return doc
def get_recipe_ingredients(recipe):
return db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'recipes').document(recipe).collection('ingredients').stream()
def recipe_put(recipe):
recipes_collection_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('recipes').document(recipe.title)
recipes_collection_ref.set({
'description' : recipe.description,
'instructions' : recipe.instructions,
'servings' : recipe.servings,
'imageURL' : recipe.imageURL,
'product' : recipe.product,
})
if recipe.ingredients is not None:
recipes_ingredients_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('recipes').document(recipe.title).collection('ingredients')
for k,v in recipe.ingredients.items():
recipes_ingredients_ref.document(k).set(v)
# recipes_ingredients_ref = db.collection('recipes').document(recipe.title).collection('ingredients')
# for k,v in recipe.ingredients.items():
# print ('k:' + str(k))
# print ('v:' + str(v))
# for key,values in v.items():
# print ('key:' + str(key))
# print ('values:' + str(values))
# recipes_ingredients_ref.document(k).set({
# key: str(values),
# })
# recipes_ingredients_ref.document('<NAME>').set({
# 'daniel': 250,
# 'unit' : "gr",
# 'angy' : "ml",
# }, merge=True)
# for k,v in recipe.ingredients.items():
# for key,values in v.items():
# recipes_ingredients_ref.document(k).set({
# key: str(values),
# })
def recipe_update(recipe, old_recipe=None):
if old_recipe is None:
# search for collection recipe reference
# set new content fields
# search for collection ingredients reference
# for each document delete those
# set new ingredients subcollections reference
# set new content
recipes_collection_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('recipes').document(recipe.title)
recipes_collection_ref.set(
{
'description' : recipe.description,
'instructions' : recipe.instructions,
'servings' : recipe.servings,
'imageURL' : recipe.imageURL,
'product' : recipe.product,
}
)
recipes_ingredients_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('recipes').document(recipe.title).collection('ingredients')
delete_collection(recipes_ingredients_ref, 100, 0)
if recipe.ingredients is not None:
for k,v in recipe.ingredients.items():
recipes_ingredients_ref.document(k).set(v)
else:
## TODO: delete old_recipe and call recipe_put(recipe):
pass
def delete_collection(coll_ref, batch_size, counter):
batch = db.batch()
init_counter=counter
docs = coll_ref.limit(500).get()
deleted = 0
for doc in docs:
batch.delete(doc.reference)
deleted = deleted + 1
if deleted >= batch_size:
new_counter= init_counter + deleted
batch.commit()
print("potentially deleted: " + str(new_counter))
return delete_collection(coll_ref, batch_size, new_counter)
batch.commit()
#
#INGREDIENTS
#
def get_list_ingredients():
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('ingredients').stream()
def get_ingredient(ingredient):
doc_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'ingredients').document(ingredient)
try:
doc = doc_ref.get()
# a = doc.to_dict()
# for i,j in a.items():
# print(i+str(j))
# print(u'Document data: {}'.format(doc.to_dict()))
except google.cloud.exceptions.NotFound:
print('No such document!')
doc = None
return doc
def put_ingredient(ingredient):
ingredient_collection_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('ingredients').document(ingredient.title)
ingredient_collection_ref.set({'price': ingredient.price, 'quantity': ingredient.quantity, 'unit': ingredient.unit, 'is_gluten_free': ingredient.is_gluten_free})
def update_ingredient(ingredient, old_ingredient=None):
if old_ingredient is None:
ingredient_collection_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('ingredients').document(ingredient.title)
ingredient_collection_ref.set({'price': ingredient.price, 'quantity': ingredient.quantity, 'unit': ingredient.unit, 'is_gluten_free': ingredient.is_gluten_free})
else:
## TODO: delete old_ingredient and call put_ingredient(ingredient):
pass
#
#GUESTS
#
def get_guest(email):
return db.collection('guest').document(email).get()
def guest_put(guest):
recipes_collection_ref = db.collection('guest').document(guest.email)
recipes_collection_ref.set({'email': guest.email, 'name': guest.name, 'phone': guest.phone})
#
# ORDERS
#
def get_list_orders():
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('orders').stream()
def get_order(id):
doc_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'orders').document(id)
try:
doc = doc_ref.get()
except google.cloud.exceptions.NotFound:
print('No such document!')
doc = None
return doc
def get_order_products(orderID):
return db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'orders').document(orderID).collection('products').stream()
def put_order(order):
order_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('orders').document()
order_ref.set({
'store' : order.store,
'createdDate' : datetime.datetime.now(),
'deliveryDate' : order.deliveryDate,
})
if order.products is not None:
products_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('orders').document(order_ref.id).collection('products')
for k,v in order.products.items():
products_ref.document(k).set(v)
return order_ref.id
#
# STORES
#
def get_list_stores():
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('stores').stream()
def get_store(id):
doc_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'stores').document(id)
try:
doc = doc_ref.get()
except google.cloud.exceptions.NotFound:
print('No such document!')
doc = None
return doc
def put_store(store):
store_collection_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('stores')
store_collection_ref.add({
'name' : store.name,
'address' : store.address,
'contactNumber' : store.contactNumber,
'email' : store.email,
'telegram' : store.telegram,
'instagram' : store.instagram,
})
def update_store(store, old_store=None):
if old_store is None:
store_collection_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('stores').document(store.storeID)
store_collection_ref.set({
'name' : store.name,
'address' : store.address,
'contactNumber' : store.contactNumber,
'email' : store.email,
'telegram' : store.telegram,
'instagram' : store.instagram,
})
else:
## TODO: delete old_store and call put_store(store):
pass
#
# VENDORS
#
def get_list_vendors():
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('vendors').stream()
def get_vendor(id):
doc_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'vendors').document(id)
try:
doc = doc_ref.get()
except google.cloud.exceptions.NotFound:
print('No such document!')
doc = None
return doc
def put_vendor(vendor):
vendor__collection__ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('vendors')
vendor__collection__ref.add({
'name' : vendor.name,
'address' : vendor.address,
'contactNumber' : vendor.contactNumber,
'email' : vendor.email,
'telegram' : vendor.telegram,
'instagram' : vendor.instagram,
})
def update_vendor(vendor, old_vendor=None):
if old_vendor is None:
vendor_collection_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('vendors').document(vendor.vendorID)
vendor_collection_ref.set({
'name' : vendor.name,
'address' : vendor.address,
'contactNumber' : vendor.contactNumber,
'email' : vendor.email,
'telegram' : vendor.telegram,
'instagram' : vendor.instagram,
})
else:
## TODO: delete old_vendor and call put_store(vendor):
pass
#
#INVENTORY
#
def get_inventory_products():
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('inventory').where(u'type', u'==', u'product').stream()
def get_inventory_ingredients():
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('inventory').where(u'type', u'==', u'ingredient').stream()
def get_inventory_product_info(productID):
"""
Return info of product
"""
return db.collection(session['type__of__tenant']).document(session['tenant']).collection('inventory').document(productID).get()
def add_inventory(inventory):
"""
Procedure to add a product to the inventory
"""
ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection('inventory').document(inventory.id)
ref.set({
'name' : inventory.name,
'quantity' : inventory.quantity,
'type' : inventory.typeof,
})
def delete_inventory(product):
try:
doc_ref = db.collection(session['type__of__tenant']).document(session['tenant']).collection(u'inventory').document(product)
doc_ref.delete()
# batch = db.batch()
# batch.delete(doc_ref)
# batch.commit()
except google.cloud.exceptions.NotFound:
print('No such document!')
doc = None
#
#TENANT
#
def create_demo_tenant_and_demo_user(email,password,typeTenant='VENDOR'):
"""
Create new demo tenant in sandbox collection
"""
ref = db.collection('createTenant').document(typeTenant).get()
admin__ref = db.collection('createTenant').document(typeTenant).collection('users').document('ADMIN').get()
dicc = ref.to_dict()
dicc__user = admin__ref.to_dict()
dicc['name'] = email
dicc__user['fullname'] = email
dicc__user['tenant'] = email
dicc__user['password'] = password
to__ref = db.collection('sandbox').document(email).set(dicc)
to__ref_user= db.collection('sandbox').document(email).collection('users').document(email).set(dicc__user)
def get_tenat_info(tenant):
return db.collection(session['type__of__tenant']).document(tenant).get()
#
# CAUTION: just for admin and sandbox trigger
#
def import__export_data():
pass
# from_ref= db.collection('ADMIN').stream()
# to_ref = db.collection('tenant').document('ARIANI')
# for doc in from_ref:
# to_ref.document(doc.id).set(doc.to_dict())
def backend_only_create_tenant(newTenant,typeTenant='VENDOR'):
"""
Create new Tenant
"""
#comprobar que NO existe tenant (NO QUEREMOS SOBREESCRIBIR Todo UN CLIENTE POR FAVOR)
#obtener referencia a VENDOR o STORE con typeTenant
#obtener referencia a subcolección users para guardar usuario ADMIN
ref = db.collection('createTenant').document(typeTenant).get()
admin__ref = db.collection('createTenant').document(typeTenant).collection('users').document('ADMIN').get()
dicc = ref.to_dict()
dicc__user = admin__ref.to_dict()
dicc['name']=newTenant
dicc__user['tenant']=newTenant
to__ref = db.collection('sandbox').document(newTenant).set(dicc)
to__ref_user= db.collection('sandbox').document(newTenant).collection('users').document('ADMIN').set(dicc__user)
return True
def backend_only_sandbox_reset():
"""
Restore sandbox state to NEW
Is time to reboot all this mess
Version 2
"""
# obtener la lista de todos los tenants en la colleccion sandbox
# para cada tenant en la lista:
# buscar la referencia del tenant
# buscar sus subcolecciones
# para cada subcoleccione, borrar contenido usando delete_collection(recipes_ingredients_ref, 100, 0)
# borrar sandbox
# DONE
try:
tenants = db.collection('sandbox').stream()
for tenant in tenants:
print(tenant.id)
ref = db.collection('sandbox').document(tenant.id)
ref_obj = ref.get()
dicc = ref_obj.to_dict()
if ref.collection('recipes'):
ref__recipes = ref.collection('recipes')
delete_collection(ref__recipes, 100, 0)
if ref.collection('ingredients'):
ref__ingredients= ref.collection('ingredients')
delete_collection(ref__ingredients, 100, 0)
if ref.collection('orders'):
ref__orders = ref.collection('orders')
delete_collection(ref__orders, 100, 0)
if ref.collection('users'):
ref__users = ref.collection('users')
delete_collection(ref__users, 100, 0)
if ref.collection('inventory'):
ref__inventory = ref.collection('inventory')
delete_collection(ref__inventory, 100, 0)
if ref.collection('stores'):
ref__stores = ref.collection('stores')
delete_collection(ref.collection('stores'), 100, 0)
delete_collection(db.collection('sandbox'), 100, 0)
except Exception as inst:
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst) # __str__ allows args to be printed directly,
# but may be overridden in exception subclasses
return True
def backend_only_sandbox_reset_v1():
"""
Restore sandbox state to NEW
Is time to reboot all this mess
"""
# buscar la referencia a sandbox
# obtener la lista de todos los tenants
# buscar la referencia del tenant
# buscar sus subcolecciones
# para cada subcoleccione, borrar contenido usando delete_collection(recipes_ingredients_ref, 100, 0)
# buscar su tipo de tenant
# buscar refencia al tenant original
# reemplazar tenant por la referencia original
# DONE
try:
tenants = db.collection('sandbox').stream()
for tenant in tenants:
print(tenant.id)
ref = db.collection('sandbox').document(tenant.id)
ref_obj = ref.get()
dicc = ref_obj.to_dict()
typeTenant = dicc['type']
if ref.collection('recipes'):
ref__recipes = ref.collection('recipes')
delete_collection(ref__recipes, 100, 0)
if ref.collection('ingredients'):
ref__ingredients= ref.collection('ingredients')
delete_collection(ref__ingredients, 100, 0)
if ref.collection('orders'):
ref__orders = ref.collection('orders')
delete_collection(ref__orders, 100, 0)
if ref.collection('users'):
ref__users = ref.collection('users')
delete_collection(ref__users, 100, 0)
if ref.collection('inventory'):
ref__inventory = ref.collection('inventory')
delete_collection(ref__inventory, 100, 0)
if ref.collection('stores'):
ref__stores = ref.collection('stores')
delete_collection(ref__stores, 100, 0)
original_ref = db.collection('createTenant').document(typeTenant).get()
admin__user__original_ref = db.collection('createTenant').document(typeTenant).collection('users').document('ADMIN').get()
dicc = original_ref.to_dict()
dicc__user = admin__user__original_ref.to_dict()
dicc['name']=tenant.id
dicc['type']=tenant.id
dicc__user['tenant']=tenant.id
db.collection('sandbox').document(tenant.id).set(dicc)
db.collection('sandbox').document(tenant.id).collection('users').document('ADMIN').set(dicc__user)
except Exception as inst:
print(type(inst)) # the exception | |
import unittest
from bfdpie import *
class Test(unittest.TestCase):
def test_large_vma(self):
b = Binary()
# 32-bit limit test
dis1 = b.disassemble(b"\x90", ARCH_I686, 0x80000000)
dis2 = b.disassemble(b"\x90", ARCH_X86_64, 0x80000000)
self.assertTrue(dis1[0].vma >= 0)
self.assertTrue(dis2[0].vma >= 0)
# 64-bit limit test
dis3 = b.disassemble(b"\x90", ARCH_I686, 0x8000000000000000)
dis4 = b.disassemble(b"\x90", ARCH_X86_64, 0x8000000000000000)
self.assertTrue(dis3[0].vma >= 0)
self.assertTrue(dis4[0].vma >= 0)
def test_arch_i686(self):
# 8048579: 89 e5 mov %esp,%ebp
# 804857b: 53 push %ebx
# 804857c: bb 4c 96 04 08 mov $0x804964c,%ebx
# 8048581: 52 push %edx
b = Binary()
dis = b.disassemble(
b"\x89\xe5" +
b"\x53" +
b"\xbb\x4c\x96\x04\x08" +
b"\x52"
,arch=ARCH_I686
)
self.assertTrue(str(dis[0]) == "mov ebp,esp")
self.assertTrue(str(dis[1]) == "push ebx")
self.assertTrue(str(dis[2]) == "mov ebx,0x804964c")
self.assertTrue(str(dis[3]) == "push edx")
def test_arch_x86_64(self):
# 4006aa: ba 00 04 00 00 mov $0x400,%edx
# 4006af: 48 89 c6 mov %rax,%rsi
# 4006b2: bf 00 00 00 00 mov $0x0,%edi
# 4006b7: b8 00 00 00 00 mov $0x0,%eax
b = Binary()
dis = b.disassemble(
b"\xba\x00\x04\x00\x00" +
b"\x48\x89\xc6" +
b"\xbf\x00\x00\x00\x00" +
b"\xb8\x00\x00\x00\x00",
ARCH_X86_64
)
self.assertTrue(str(dis[0]) == "mov edx,0x400")
self.assertTrue(str(dis[1]) == "mov rsi,rax")
self.assertTrue(str(dis[2]) == "mov edi,0x0")
self.assertTrue(str(dis[3]) == "mov eax,0x0")
def test_arch_armel(self):
# 84c0: e92d4800 push {fp, lr}
# 84c4: e28db004 add fp, sp, #4
# 84c8: e24dd020 sub sp, sp, #32
# 84cc: e24b3024 sub r3, fp, #36 ; 0x24
b = Binary()
dis = b.disassemble(
b"\x00\x48\x2d\xe9" +
b"\x04\xb0\x8d\xe2" +
b"\x20\xd0\x4d\xe2" +
b"\x24\x30\x4b\xe2",
ARCH_ARMEL
)
self.assertTrue(str(dis[0]) == "push {fp, lr}")
self.assertTrue(str(dis[1]) == "add fp, sp, #4")
self.assertTrue(str(dis[2]) == "sub sp, sp, #32")
self.assertTrue(str(dis[3]) == "sub r3, fp, #36 ; 0x24")
def test_arch_armel_thumb(self):
# 84ce: db00 lsls r3, r3, #3
# 84d0: 0020 movs r0, #0
# 84d2: 111c adds r1, r2, #0
# 84d4: 1a1c adds r2, r3, #0
b = Binary()
dis = b.disassemble(
b"\xdb\x00" +
b"\x00\x20" +
b"\x11\x1c" +
b"\x1a\x1c",
ARCH_ARMEL_THUMB
)
self.assertTrue(str(dis[0]) == "lsls r3, r3, #3")
self.assertTrue(str(dis[1]) == "movs r0, #0")
self.assertTrue(str(dis[2]) == "adds r1, r2, #0")
self.assertTrue(str(dis[3]) == "adds r2, r3, #0")
def test_arch_armeb(self):
# 84c0: e92d4800 push {fp, lr}
# 84c4: e28db004 add fp, sp, #4
# 84c8: e24dd020 sub sp, sp, #32
# 84cc: e24b3024 sub r3, fp, #36 ; 0x24
b = Binary()
dis = b.disassemble(
b"\xe9\x2d\x48\x00" +
b"\xe2\x8d\xb0\x04" +
b"\xe2\x4d\xd0\x20" +
b"\xe2\x4b\x30\x24",
ARCH_ARMEB
)
self.assertTrue(str(dis[0]) == "push {fp, lr}")
self.assertTrue(str(dis[1]) == "add fp, sp, #4")
self.assertTrue(str(dis[2]) == "sub sp, sp, #32")
self.assertTrue(str(dis[3]) == "sub r3, fp, #36 ; 0x24")
def test_arch_armeb_thumb(self):
# 84ce: 00db lsls r3, r3, #3
# 84d0: 2000 movs r0, #0
# 84d2: 1c11 adds r1, r2, #0
# 84d4: 1c1a adds r2, r3, #0
b = Binary()
dis = b.disassemble(
b"\x00\xdb" +
b"\x20\x00" +
b"\x1c\x11" +
b"\x1c\x1a",
ARCH_ARMEB_THUMB
)
self.assertTrue(str(dis[0]) == "lsls r3, r3, #3")
self.assertTrue(str(dis[1]) == "movs r0, #0")
self.assertTrue(str(dis[2]) == "adds r1, r2, #0")
self.assertTrue(str(dis[3]) == "adds r2, r3, #0")
def test_arch_mips(self):
# 4009d8: 8fbf001c lw ra,28(sp)
# 4009dc: 00000000 nop
# 4009e0: 03e00008 jr ra
# 4009e4: 27bd0020 addiu sp,sp,32
b = Binary()
dis = b.disassemble(
b"\x8f\xbf\x00\x1c" +
b"\x00\x00\x00\x00" +
b"\x03\xe0\x00\x08" +
b"\x27\xbd\x00\x20",
ARCH_MIPS
)
self.assertTrue(str(dis[0]) == "lw ra,28(sp)")
self.assertTrue(str(dis[1]) == "nop")
self.assertTrue(str(dis[2]) == "jr ra")
self.assertTrue(str(dis[3]) == "addiu sp,sp,32")
def test_arch_mipsel(self):
# 4009d8: 1c00bf8f lw ra,28(sp)
# 4009dc: 00000000 nop
# 4009e0: 0800e003 jr ra
# 4009e4: 2000bd27 addiu sp,sp,32
b = Binary()
dis = b.disassemble(
b"\x1c\x00\xbf\x8f" +
b"\x00\x00\x00\x00" +
b"\x08\x00\xe0\x03" +
b"\x20\x00\xbd\x27",
ARCH_MIPSEL
)
self.assertTrue(str(dis[0]) == "lw ra,28(sp)")
self.assertTrue(str(dis[1]) == "nop")
self.assertTrue(str(dis[2]) == "jr ra")
self.assertTrue(str(dis[3]) == "addiu sp,sp,32")
def test_arch_mips64(self):
# 120000918: 3c1c0002 lui gp,0x2
# 12000091c: 279c843c addiu gp,gp,-31684
# 120000920: 039fe02d daddu gp,gp,ra
# 120000924: df998068 ld t9,-32664(gp)
b = Binary()
dis = b.disassemble(
b"\x3c\x1c\x00\x02" +
b"\x27\x9c\x84\x3c" +
b"\x03\x9f\xe0\x2d" +
b"\xdf\x99\x80\x68",
ARCH_MIPS64
)
self.assertTrue(str(dis[0]) == "lui gp,0x2")
self.assertTrue(str(dis[1]) == "addiu gp,gp,-31684")
self.assertTrue(str(dis[2]) == "daddu gp,gp,ra")
self.assertTrue(str(dis[3]) == "ld t9,-32664(gp)")
def test_arch_mips64el(self):
# 120000918: 02001c3c lui gp,0x2
# 12000091c: 3c849c27 addiu gp,gp,-31684
# 120000920: 2de09f03 daddu gp,gp,ra
# 120000924: 688099df ld t9,-32664(gp)
b = Binary()
dis = b.disassemble(
b"\x02\x00\x1c\x3c" +
b"\x3c\x84\x9c\x27" +
b"\x2d\xe0\x9f\x03" +
b"\x68\x80\x99\xdf",
ARCH_MIPS64EL
)
self.assertTrue(str(dis[0]) == "lui gp,0x2")
self.assertTrue(str(dis[1]) == "addiu gp,gp,-31684")
self.assertTrue(str(dis[2]) == "daddu gp,gp,ra")
self.assertTrue(str(dis[3]) == "ld t9,-32664(gp)")
def test_arch_ppc32(self):
# 1000058c: 80 01 00 14 lwz r0,20(r1)
# 10000590: 38 21 00 10 addi r1,r1,16
# 10000594: 7c 08 03 a6 mtlr r0
# 10000598: 4e 80 00 20 blr
b = Binary()
dis = b.disassemble(
b"\x80\x01\x00\x14" +
b"\x38\x21\x00\x10" +
b"\x7c\x08\x03\xa6" +
b"\x4e\x80\x00\x20",
ARCH_PPC32
)
self.assertTrue(str(dis[0]) == "lwz r0,20(r1)")
self.assertTrue(str(dis[1]) == "addi r1,r1,16")
self.assertTrue(str(dis[2]) == "mtlr r0")
self.assertTrue(str(dis[3]) == "blr")
def test_arch_ppc64(self):
# 100007d4: 38 21 00 70 addi r1,r1,112
# 100007d8: e8 01 00 10 ld r0,16(r1)
# 100007dc: 7c 08 03 a6 mtlr r0
# 100007e0: 4e 80 00 20 blr
b = Binary()
dis = b.disassemble(
b"\x38\x21\x00\x70" +
b"\xe8\x01\x00\x10" +
b"\x7c\x08\x03\xa6" +
b"\x4e\x80\x00\x20",
ARCH_PPC64
)
self.assertTrue(str(dis[0]) == "addi r1,r1,112")
self.assertTrue(str(dis[1]) == "ld r0,16(r1)")
self.assertTrue(str(dis[2]) == "mtlr r0")
self.assertTrue(str(dis[3]) == "blr")
def test_arch_sparc(self):
# 105e4: 9d e3 bf 98 save %sp, -104, %sp
# 105ec: 01 00 00 00 nop
# 105f0: 81 c7 e0 08 ret
# 105f4: 81 e8 00 00 restore
b = Binary()
dis = b.disassemble(
b"\x9d\xe3\xbf\x98" +
b"\x01\x00\x00\x00" +
b"\x81\xc7\xe0\x08" +
b"\x81\xe8\x00\x00",
ARCH_SPARC
)
self.assertTrue(str(dis[0]) == "save %sp, -104, %sp")
self.assertTrue(str(dis[1]) == "nop")
self.assertTrue(str(dis[2]) == "ret")
self.assertTrue(str(dis[3]) == "restore")
def test_arch_sparc64(self):
# 1007a0: 9f c0 40 00 call %g1
# 1007a4: ba 07 7f f8 add %i5, -8, %i5
# 1007a8: c2 5f 40 00 ldx [ %i5 ], %g1
# 1007ac: 80 a0 7f ff cmp %g1, -1
b = Binary()
dis = b.disassemble(
b"\x9f\xc0\x40\x00" +
b"\xba\x07\x7f\xf8" +
b"\xc2\x5f\x40\x00" +
b"\x80\xa0\x7f\xff",
ARCH_SPARC64
)
self.assertTrue(str(dis[0]) == "call %g1")
self.assertTrue(str(dis[1]) == "add %i5, -8, %i5")
self.assertTrue(str(dis[2]) == "ldx [ %i5 ], %g1")
self.assertTrue(str(dis[3]) == "cmp %g1, -1")
def test_arch_sh4(self):
# 400618: 26 4f lds.l @r15+,pr
# 40061a: 0b 00 rts
# 40061c: f6 68 mov.l @r15+,r8
# 40061e: 09 00 nop
b = Binary()
dis = b.disassemble(
b"\x26\x4f" +
b"\x0b\x00" +
b"\xf6\x68" +
b"\x09\x00",
ARCH_SH4
)
self.assertTrue(str(dis[0]) == "lds.l @r15+,pr")
self.assertTrue(str(dis[1]) == "rts")
self.assertTrue(str(dis[2]) == "mov.l @r15+,r8")
self.assertTrue(str(dis[3]) == "nop")
def test_arch_sh4eb(self):
# 400618: 4f 26 lds.l @r15+,pr
# 40061a: 00 0b rts
# 40061c: 68 f6 mov.l @r15+,r8
# 40061e: 00 09 nop
b = Binary()
dis = b.disassemble(
b"\x4f\x26" +
b"\x00\x0b" +
b"\x68\xf6" +
b"\x00\x09",
ARCH_SH4EB
)
self.assertTrue(str(dis[0]) == "lds.l @r15+,pr")
self.assertTrue(str(dis[1]) == "rts")
self.assertTrue(str(dis[2]) == "mov.l @r15+,r8")
self.assertTrue(str(dis[3]) == "nop")
def test_arch_aarch64(self):
# 400624: a9bf7bfd stp x29, x30, [sp,#-16]!
# 400628: 910003fd mov x29, sp
# 40062c: a8c17bfd ldp x29, x30, [sp],#16
# 400630: d65f03c0 ret
b = Binary()
dis = b.disassemble(
b"\xfd\x7b\xbf\xa9" +
b"\xfd\x03\x00\x91" +
b"\xfd\x7b\xc1\xa8" +
b"\xc0\x03\x5f\xd6",
ARCH_AARCH64
)
self.assertTrue(str(dis[0]) == "stp x29, x30, [sp,#-16]!")
self.assertTrue(str(dis[1]) == "mov x29, sp")
self.assertTrue(str(dis[2]) == "ldp x29, x30, [sp],#16")
self.assertTrue(str(dis[3]) == "ret")
def test_arch_alpha(self):
# 1200007e8: 3e 15 c2 43 subq sp,0x10,sp
# 1200007ec: 00 00 5e b7 stq ra,0(sp)
# 1200007f0: 08 00 be b7 stq gp,8(sp)
# 1200007f4: 00 00 fe 2f unop
b = Binary()
dis = b.disassemble(
b"\x3e\x15\xc2\x43" +
b"\x00\x00\x5e\xb7" +
b"\x08\x00\xbe\xb7" +
b"\x00\x00\xfe\x2f",
ARCH_ALPHA
)
self.assertTrue(str(dis[0]) == "subq sp,0x10,sp")
self.assertTrue(str(dis[1]) == "stq ra,0(sp)")
self.assertTrue(str(dis[2]) == "stq gp,8(sp)")
self.assertTrue(str(dis[3]) == "unop")
def test_arch_crisv32(self):
# 80610: 6e0e move.d [$sp+],$r0
# 80612: 31b6 move $r1,$srp
# 80614: 6e1e move.d [$sp+],$r1
# 80616: f0b9 ret
b = Binary()
dis = b.disassemble(
b"\x6e\x0e" +
b"\x31\xb6" +
b"\x6e\x1e" +
b"\xf0\xb9",
ARCH_CRISV32
)
self.assertTrue(str(dis[0]) == "move.d [$sp+],$r0")
self.assertTrue(str(dis[1]) == "move $r1,$srp")
self.assertTrue(str(dis[2]) == "move.d [$sp+],$r1")
self.assertTrue(str(dis[3]) == "ret")
def test_arch_s390x(self):
# 80000724: e3 40 f1 10 00 04 lg %r4,272(%r15)
# 8000072a: eb cf f1 00 00 04 lmg %r12,%r15,256(%r15)
# 80000730: 07 f4 br %r4
# 80000732: 07 07 nopr %r7
b = Binary()
dis = b.disassemble(
b"\xe3\x40\xf1\x10\x00\x04" +
b"\xeb\xcf\xf1\x00\x00\x04" +
b"\x07\xf4" +
b"\x07\x07",
ARCH_S390X
)
self.assertTrue(str(dis[0]) == "lg %r4,272(%r15)")
self.assertTrue(str(dis[1]) == "lmg %r12,%r15,256(%r15)")
self.assertTrue(str(dis[2]) == "br %r4")
self.assertTrue(str(dis[3]) == "nopr %r7")
def test_arch_microblaze(self):
# 10000628: 3021ffe0 addik r1, r1, -32
# | |
<reponame>ratt-ru/codex-africanus<gh_stars>10-100
# -*- coding: utf-8 -*-
from collections import namedtuple
import numpy as np
import numba
from numba.experimental import jitclass
import numba.types
from africanus.constants import c as lightspeed
from africanus.util.numba import generated_jit, njit, is_numba_type_none
from africanus.averaging.support import unique_time, unique_baselines
class RowMapperError(Exception):
pass
@njit(nogil=True, cache=True)
def erf26(x):
"""Implements 7.1.26 erf approximation from Abramowitz and
Stegun (1972), pg. 299. Accurate for abs(eps(x)) <= 1.5e-7."""
# Constants
p = 0.3275911
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
e = 2.718281828
# t
t = 1.0/(1.0 + (p * x))
# Erf calculation
erf = 1.0 - (((((a5 * t + a4) * t + a3) * t + a2) * t + a1) * t)
erf *= e ** -(x ** 2)
return -round(erf, 9) if x < 0 else round(erf, 0)
@njit(nogil=True, cache=True)
def time_decorrelation(u, v, w, max_lm, time_bin_secs, min_wavelength):
sidereal_rotation_rate = 7.292118516e-5
diffraction_limit = min_wavelength / np.sqrt(u**2 + v**2 + w**2)
term = max_lm * time_bin_secs * sidereal_rotation_rate / diffraction_limit
return 1.0 - 1.0645 * erf26(0.8326*term) / term
_SERIES_COEFFS = (1./40, 107./67200, 3197./24192000, 49513./3973939200)
@njit(nogil=True, cache=True, inline='always')
def inv_sinc(sinc_x, tol=1e-12):
# Invalid input
if sinc_x > 1.0:
raise ValueError("sinc_x > 1.0")
# Initial guess from reversion of Taylor series
# https://math.stackexchange.com/questions/3189307/inverse-of-frac-sinxx
x = t_pow = np.sqrt(6*np.abs((1 - sinc_x)))
t_squared = t_pow*t_pow
for coeff in numba.literal_unroll(_SERIES_COEFFS):
t_pow *= t_squared
x += coeff * t_pow
# Use Newton Raphson to go the rest of the way
# https://www.wolframalpha.com/input/?i=simplify+%28sinc%5Bx%5D+-+c%29+%2F+D%5Bsinc%5Bx%5D%2Cx%5D
while True:
# evaluate delta between this iteration sinc(x) and original
sinx = np.sin(x)
𝞓sinc_x = (1.0 if x == 0.0 else sinx/x) - sinc_x
# Stop if converged
if np.abs(𝞓sinc_x) < tol:
break
# Next iteration
x -= (x*x * 𝞓sinc_x) / (x*np.cos(x) - sinx)
return x
@njit(nogil=True, cache=True, inline='always')
def factors(n):
assert n >= 1
result = []
i = 1
while i*i <= n:
quot, rem = divmod(n, i)
if rem == 0:
result.append(i)
if quot != i:
result.append(quot)
i += 1
return np.unique(np.array(result))
@njit(nogil=True, cache=True, inline='always')
def max_chan_width(ref_freq, fractional_bandwidth):
"""
Derive max_𝞓𝝼, the maximum change in bandwidth
before decorrelation occurs in frequency
Fractional Bandwidth is defined by
https://en.wikipedia.org/wiki/Bandwidth_(signal_processing)
for Wideband Antennas as:
(1) 𝞓𝝼/𝝼 = fb = (fh - fl) / (fh + fl)
where fh and fl are the high and low frequencies
of the band.
We set fh = ref_freq + 𝞓𝝼/2, fl = ref_freq - 𝞓𝝼/2
Then, simplifying (1), 𝞓𝝼 = 2 * ref_freq * fb
"""
return 2 * ref_freq * fractional_bandwidth
FinaliseOutput = namedtuple("FinaliseOutput",
["tbin", "time", "interval",
"nchan", "flag"])
class Binner(object):
def __init__(self, row_start, row_end,
max_lm, decorrelation, time_bin_secs,
max_chan_freq):
# Index of the time bin to which all rows in the bin will contribute
self.tbin = 0
# Number of rows in the bin
self.bin_count = 0
# Number of flagged rows in the bin
self.bin_flag_count = 0
# Time sum
self.time_sum = 0.0
# Interval sum
self.interval_sum = 0.0
# Starting row of the bin
self.rs = row_start
# Ending row of the bin
self.re = row_end
# Sinc of half the baseline speed
self.bin_half_Δψ = 0.0
# Maximum band frequency
self.max_chan_freq = max_chan_freq
# Quantities cached to make Binner.method arguments smaller
self.max_lm = max_lm
n = -1.0 if max_lm > 1.0 else np.sqrt(1.0 - max_lm**2) - 1.0
self.n_max = np.abs(n)
self.decorrelation = decorrelation
self.time_bin_secs = time_bin_secs
def reset(self):
self.__init__(0, 0, self.max_lm,
self.decorrelation,
self.time_bin_secs,
self.max_chan_freq)
def start_bin(self, row, time, interval, flag_row):
"""
Starts a new bin
"""
self.rs = row
self.re = row
self.bin_count = 1
self.bin_flag_count = (1 if flag_row is not None and flag_row[row] != 0
else 0)
def add_row(self, row, auto_corr, time, interval, uvw, flag_row):
"""
Attempts to add ``row`` to the current bin.
Returns
-------
success : bool
True if the decorrelation tolerance was not exceeded
and the row was added to the bin.
"""
rs = self.rs
re = self.re
if re == row:
raise ValueError("start_bin should be called to start a bin "
"before add_row is called.")
if auto_corr:
# Fast path for auto-correlated baseline.
# By definition, duvw == (0, 0, 0) for these samples
self.re = row
self.bin_half_Δψ = self.decorrelation
self.bin_count += 1
if flag_row is not None and flag_row[row] != 0:
self.bin_flag_count += 1
return True
time_start = time[rs] - interval[rs] / 2.0
time_end = time[row] + interval[row] / 2.0
# Evaluate the degree of decorrelation
# the sample would add to existing bin
du = uvw[row, 0] - uvw[rs, 0]
dv = uvw[row, 1] - uvw[rs, 1]
dw = uvw[row, 2] - uvw[rs, 2]
dt = time_end - time_start
half_𝞓𝞇 = (np.sqrt(du**2 + dv**2 + dw**2) *
self.max_chan_freq *
np.sin(np.abs(self.max_lm)) *
np.pi / lightspeed) + 1.0e-8
bldecorr = np.sin(half_𝞓𝞇) / half_𝞓𝞇
# fringe rate at the equator
# du = uvw[row, 0] - uvw[rs, 0]
# dv = uvw[row, 1] - uvw[rs, 1]
# dw = uvw[row, 2] - uvw[rs, 2]
# max delta phase occurs when duvw lines up with lmn-1.
# So assume we have an lmn vector such
# that ||(l,m)||=l_max, n_max=|sqrt(1-l_max^2)-1|;
# the max phase change will be ||(du,dv)||*l_max+|dw|*n_max
# duvw = np.sqrt(du**2 + dv**2)
# half_𝞓𝞇 = (2 * np.pi * (self.max_chan_freq/lightspeed) *
# (duvw * self.max_lm + np.abs(dw) * self.n_max)) + 1.0e-8
# bldecorr = np.sin(half_𝞓𝞇) / half_𝞓𝞇
# Do not add the row to the bin as it
# would exceed the decorrelation tolerance
# or the required number of seconds in the bin
if (bldecorr < np.sinc(self.decorrelation) or
dt > self.time_bin_secs):
return False
# Add the row by making it the end of the bin
# and keep a record of the half_𝞓𝞇
self.re = row
self.bin_half_Δψ = half_𝞓𝞇
self.bin_count += 1
if flag_row is not None and flag_row[row] != 0:
self.bin_flag_count += 1
return True
@property
def empty(self):
return self.bin_count == 0
def finalise_bin(self, auto_corr, uvw, time, interval,
nchan_factors, chan_width, chan_freq):
""" Finalise the contents of this bin """
if self.bin_count == 0:
raise ValueError("Attempted to finalise empty bin")
elif self.bin_count == 1:
# Single entry in the bin, no averaging occurs
out = FinaliseOutput(self.tbin,
time[self.rs],
interval[self.rs],
chan_width.size,
self.bin_count == self.bin_flag_count)
self.tbin += 1
return out
rs = self.rs
re = self.re
# Calculate the maximum change in frequency for the bin,
# given the change in phase
if auto_corr:
# Auto-correlated baseline, average all channels
# everything down to a single value
nchan = 1
else:
# Central UVW coordinate of the bin
cu = (uvw[rs, 0] + uvw[re, 0]) / 2
cv = (uvw[rs, 1] + uvw[re, 1]) / 2
cw = (uvw[rs, 2] + uvw[re, 2]) / 2
cuv = np.sqrt(cu**2 + cv**2)
max_abs_dist = np.sqrt(np.abs(cuv)*np.abs(self.max_lm) +
np.abs(cw)*np.abs(self.n_max))
if max_abs_dist == 0.0:
raise ValueError("max_abs_dist == 0.0")
# Given
# (1) acceptable decorrelation
# (2) change in (phase) baseline speed
# derive the frequency phase difference
# from Equation (40) in Atemkeng
# If there's a single sample (rs == re)
# we can't meaningfully calculate baseline speed.
# In this case frequency phase difference
# just becomes the decorrelation factor
# The following is copied from DDFacet. Variables names could
# be changed but wanted to keep the correspondence clear.
# BH: I strongly suspect this is wrong: see eq. 18-19 in SI II
delta_nu = (lightspeed / (2*np.pi)) * \
(self.decorrelation / max_abs_dist)
fracsizeChanBlock = delta_nu / chan_width
fracsizeChanBlockMin = max(fracsizeChanBlock.min(), 1)
assert fracsizeChanBlockMin >= 1
nchan = np.ceil(chan_width.size/fracsizeChanBlockMin)
# Now find the next highest integer factorisation
# of the input number of channels
s = np.searchsorted(nchan_factors, nchan, side='left')
nchan = nchan_factors[min(nchan_factors.shape[0] - 1, s)]
time_start = time[rs] - (interval[rs] / 2.0)
time_end = time[re] + (interval[re] / 2.0)
# Finalise bin values for return
assert self.bin_count >= 1
out = FinaliseOutput(self.tbin,
(time_start + time_end) / 2.0,
time_end - time_start,
nchan,
self.bin_count == self.bin_flag_count)
self.tbin += 1
return out
RowMapOutput = namedtuple("RowMapOutput",
["map", "offsets", "decorr_chan_width",
"time", "interval", "chan_width", "flag_row"])
@generated_jit(nopython=True, nogil=True, cache=True)
def bda_mapper(time, interval, ant1, ant2, uvw,
chan_width, chan_freq,
| |
# coding: utf8
"""
Viewer classes
Original author: <NAME>
"""
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter.filedialog import *
import PIL
from PIL import ImageTk, Image, ImageOps
from PIL.Image import *
from PIL.Image import BOX, LINEAR, NEAREST, EXTENT, fromarray
import numpy
class ResizableCanvas(Canvas):
"""
A class extending the tkinter Canvas class and enabling resizing
"""
def __init__(self, parent, **kwargs):
Canvas.__init__(self, parent, **kwargs)
self.bind("<Configure>", self.on_resize)
self.height = self.winfo_reqheight()
self.width = self.winfo_reqwidth()
def on_resize(self, event):
# determine the ratio of old width/height to new width/height
wscale = float(event.width) / self.width
hscale = float(event.height) / self.height
self.width = event.width
self.height = event.height
# resize the canvas
self.config(width=self.width, height=self.height)
class ViewerTab:
"""
A simple Viewer class
"""
def __init__(self, master, model, dim=800):
self.master = master
self.model = model
self.image_x_abs = 0.
self.image_y_abs = 0.
self.isSlideOn = False
self.isSuperposed = False
self.isFISH = False
self.image = None
self.cmap = None
self.photoimage = None
self.tool = "slide"
self.xref = 0
self.yref = 0
# creation of a frame on the left of the Canvas
# just to put some buttons and informations
self.sideFrame = ttk.Frame(self.master, width=100)
self.sideFrame.pack(side=LEFT, fill=BOTH)
self.zoomPanel = ttk.LabelFrame(self.sideFrame, width=90,
text="Control Panel")
self.zoomPanel.pack(side=TOP)
# creation of a frame on the right of the canvas
# It will hold the labels for the colormap
self.rsideFrame = ttk.Frame(self.master, width=100)
self.rsideFrame.pack(side=RIGHT, fill=BOTH)
self.labelPanel = ttk.LabelFrame(self.rsideFrame, width=90,
text="Labels")
self.labelPanel.pack(side=TOP)
# image container
self.canvas = ResizableCanvas(self.master,
width=dim,
height=dim,
highlightthickness=0,
bg="black")
self.canvas.pack(fill=BOTH, expand=YES)
# canvas bind events
self.canvas.bind("<Button-1>", self.dirbutton)
self.canvas.bind("<B1-Motion>", self.move)
self.canvas.bind("<ButtonRelease-1>", self.nomove)
self.canvas.bind("<Button-2>", self.get_position)
self.buttonzoom = ttk.Button(self.zoomPanel, text="Zoom",
command=self.zoom)
self.buttondezoom = ttk.Button(self.zoomPanel, text="Dezoom",
command=self.dezoom)
self.buttonrotate = ttk.Button(self.zoomPanel, text="Rotate",
command=self.rotate)
self.buttonflip = ttk.Button(self.zoomPanel, text="Flip",
command=self.flip)
self.buttonzoom.pack()
self.buttondezoom.pack()
self.buttonrotate.pack()
self.buttonflip.pack()
self.vars = []
self.values = []
self.buttons = []
self.label_dict = {}
self.select_var = IntVar()
self.select_var.set(1)
self.buttonselect = Checkbutton(self.labelPanel, text='Select All',
var=self.select_var, onvalue=1,
offvalue=0, command=self.selectall)
self.buttonselect.pack()
self.unselect_var = IntVar()
self.unselect_var.set(0)
self.buttonunselect = Checkbutton(self.labelPanel, text='Unselect All',
var=self.unselect_var, onvalue=1,
offvalue=0, command=self.unselectall)
self.buttonunselect.pack()
self.changelabels = ttk.Button(self.labelPanel, text="Change labels",
command=self.popup_labels)
self.changelabels.pack()
def initView(self):
# done
"""
A function that create the image in the canvas
and initialize several variables
"""
# if there is an image in the canvas, delete it
self.canvas.delete('all')
# image creation
self.image = self.model.initImage()
self.image.putalpha(255)
self.model.angle = 0
self.redraw()
self.isSlideOn = True
def initViewSuperposed(self):
# done
"""
A function that adds the color map image to the canvas
"""
# if there is an image in the canvas, delete it
self.canvas.delete('all')
# image creation
self.image = self.model.initImage()
self.cmap = self.model.initImagePng()
self.cmap = PIL.Image.fromarray((self.cmap * 255).astype(numpy.uint8))
self.isSuperposed = True
self.isSlideOn = True
self.set_labels()
self.redrawSuperposed()
def redraw(self):
self.image.putalpha(255)
if self.model.flip:
self.image = ImageOps.mirror(self.image)
self.photoimage = ImageTk.PhotoImage(self.image.rotate(self.model.angle))
self.canvas.delete("image")
self.canvas.create_image(-self.canvas.width,
-self.canvas.height,
anchor=NW,
image=self.photoimage,
tags="image")
self.canvas.pack()
def my_resize(self, size): #Not better than PIL.Image.transform or resize method
needed_y , needed_x = size
size_new_image = max([needed_x,needed_y])
new_image = PIL.Image.new('RGBA',(size_new_image,size_new_image))
n = numpy.array(new_image)
factor = (2**self.model.level)
pixel_size = 1
for key in self.model.positions.keys():
if key != 'size_x' and key != 'size_y':
xo = int( (key[0] *598) / factor)
yo = int( (key[1] *598) /factor)
if self.model.level > 7:
pixel_size = 1
if self.model.level == 7:
pixel_size = 4
if self.model.level == 6:
pixel_size = 9
if self.model.level == 5:
pixel_size = 18
if self.model.level == 4:
pixel_size = 37
if self.model.level == 3:
pixel_size = 74
elif self.model.level == 2:
pixel_size = 149
elif self.model.level == 1:
pixel_size = 299
elif self.model.level == 0:
pixel_size = 598
for i in range(pixel_size):
for j in range(pixel_size):
x_good = xo + i
y_good = yo + j
if x_good > needed_x:
x_good = 0
if y_good > needed_y:
y_good = 0
#print("i want ", x_good,y_good)
n[x_good,y_good] = self.model.positions[key]
new_image = PIL.Image.fromarray(n)
print("cmap size : ",new_image.size,"| slide size :", needed_x,needed_y, "| zoom lvl :", self.model.level, "| pixel size :", pixel_size)
return new_image
def redrawSuperposed(self):
self.image.putalpha(255)
if self.isFISH:
n = numpy.array(self.image)
x, y = numpy.where(n[:, :, 0] > 0)
#print(x,y)
min_x = int(min(x))
min_y = int(min(y))
max_x = int(max(x))
max_y = int(max(y))
dx = max_x - min_x
dy = max_y - min_y
size = (dy,dx)
#print("Size cmap ",self.cmap.size,"Zoom factor ",self.model.level)
#self.cmap = self.cmap.transform(size,EXTENT,(0,0)+self.cmap.size)
self.cmap = self.my_resize((dx,dy))
self.image.paste(self.cmap,(min_y,min_x),self.cmap)
else:
self.cmap.putalpha(self.model.tcmap)
self.cmap_resize = self.cmap.resize(self.model.slide.level_dimensions[self.model.level], resample=NEAREST)
mod = int(round(self.cmap_resize.size[0]/(self.cmap.size[0]*3)))
image = self.image.copy()
image.paste(self.cmap_resize, (self.model.cmapx, self.model.cmapy+mod), mask=self.cmap_resize)
if self.model.flip:
image = ImageOps.mirror(image)
self.photoimage = ImageTk.PhotoImage(image.rotate(self.model.angle))
self.canvas.delete("image")
self.canvas.create_image(-self.canvas.width,
-self.canvas.height,
anchor=NW,
image=self.photoimage,
tags="image")
self.canvas.pack()
def dirbutton(self, event):
# done
if self.isSlideOn:
if self.tool == "slide":
self.xref = event.x
self.yref = event.y
def move(self, event):
# done
if self.isSlideOn:
if self.tool == "slide":
dpx = (event.x - self.xref)
dpy = (event.y - self.yref)
self.canvas.delete("image")
self.canvas.create_image(-self.canvas.width + dpx,
-self.canvas.height + dpy, anchor=NW,
image=self.photoimage, tags="image")
def nomove(self, event):
# done
if self.isSuperposed:
if self.tool == "slide":
self.image = self.model.translateImage(self.xref,
self.yref,
event)
self.redrawSuperposed()
if self.isSlideOn and self.isSuperposed == False:
if self.tool == "slide":
self.image = self.model.translateImage(self.xref,
self.yref,
event)
self.redraw()
def zoom(self):
if self.isSuperposed:
self.image = self.model.zoomIn()
self.redrawSuperposed()
if self.isSlideOn and self.isSuperposed == False:
# reset level
self.image = self.model.zoomIn()
self.redraw()
def dezoom(self):
if self.isSuperposed:
self.image = self.model.zoomOut()
self.redrawSuperposed()
if self.isSlideOn and self.isSuperposed == False:
self.image = self.model.zoomOut()
self.redraw()
def rotate(self):
if self.isSuperposed:
self.model.angle += 90
if self.model.angle == 360:
self.model.angle = 0
self.redrawSuperposed()
if self.isSlideOn and self.isSuperposed == False:
self.model.angle += 90
self.redraw()
def flip(self):
if self.isSuperposed:
if self.model.flip:
self.model.flip = False
else:
self.model.flip = True
self.redrawSuperposed()
if self.isSlideOn and self.isSuperposed == False:
if self.model.flip:
self.model.flip = False
else:
self.model.flip = True
self.redraw()
def get_position(self, event):
factory = (-1)*int(numpy.sin(numpy.radians(self.model.angle))) + int(numpy.cos(numpy.radians(self.model.angle)))
factorx = int(numpy.sin(numpy.radians(self.model.angle))) + int(numpy.cos(numpy.radians(self.model.angle)))*(-1)**(self.model.angle/90)
if self.model.flip:
event.x = self.canvas.width - event.x
if self.model.angle % 180 == 0:
abs_x = factorx*event.x + self.canvas.width*2**(self.model.angle/180) - self.model.cmapx
abs_y = factory*event.y + self.canvas.height*2**(self.model.angle/180) - self.model.cmapy
else:
abs_x = factory*event.y + (3*self.canvas.width+self.canvas.height*(factorx))/2 - self.model.cmapx
abs_y = factorx*event.x + (3*self.canvas.height+self.canvas.width*(factory))/2 - self.model.cmapy
factor_resize_x = self.cmap_resize.size[0]/self.model.cmap_png.shape[0]
factor_resize_y = self.cmap_resize.size[1]/self.model.cmap_png.shape[1]
index_x = int(abs_x/factor_resize_x)
index_y = int(abs_y/factor_resize_y)
messagebox.showinfo('Patch coordinates', 'X: % d \n Y: % d' % (index_x, index_y))
class ViewerTabV2(ViewerTab):
def __init__(self, master, model, dim=800):
ViewerTab.__init__(self, master, model, dim)
# variable for spinbox
self.spinval = IntVar()
self.cmap_trans = IntVar()
# add a slider
self.thresholdPanel = ttk.LabelFrame(self.sideFrame, width=90,
text="Threshold Panel")
self.thresholdPanel.pack(side=TOP)
self.scale = ttk.Scale(master=self.thresholdPanel, command=self.accept_whole_number_only, orient=VERTICAL, from_=51, to=255)
self.scale.bind("<ButtonRelease-1>", self.update_annotations)
self.scale.pack(side=LEFT)
self.threshspinbox = Spinbox(master=self.thresholdPanel, from_=51, to=255, textvariable=self.spinval, command=self.update, width=10)
self.threshspinbox.pack(side=LEFT)
# add a slider
self.CmapTransparency = ttk.LabelFrame(self.sideFrame, width=90,
text="Transparency Cmap")
self.CmapTransparency.pack(side=TOP)
self.scale_cmap = ttk.Scale(master=self.CmapTransparency, command=self.accept_whole_number_only_cmap, orient=VERTICAL, from_=0, to=255)
self.scale_cmap.pack(side=LEFT)
self.cmapspinbox = Spinbox(master=self.CmapTransparency, from_=0, to=255, textvariable=self.cmap_trans, command=self.update_cmap, width=10)
self.cmapspinbox.pack(side=LEFT)
def accept_whole_number_only(self, e=None):
value = self.scale.get()
if int(value) != value:
self.scale.set(round(value))
self.spinval.set(int(round(value)))
self.model.thresh = self.spinval.get()
def update(self, e=None):
"""Updates the scale and spinbox"""
self.scale.set(self.threshspinbox.get())
self.model.thresh = self.spinval.get()
def update_annotations(self, event):
# can call any function that update annotations in the model
self.image = self.model.updateImage()
self.redraw()
def accept_whole_number_only_cmap(self, e=None):
value = self.scale_cmap.get()
if int(value) != value:
self.scale_cmap.set(round(value))
#self.cmap_trans.set(int(round(value)))
#self.model.tcmap = self.cmap_trans.get()
self.model.tcmap = int(self.scale_cmap.get())
self.cmap_trans.set(int(self.scale_cmap.get()))
if self.isSuperposed:
self.redrawSuperposed()
def update_cmap(self, e=None):
"""Updates the scale and spinbox"""
self.scale_cmap.set(self.cmapspinbox.get())
#self.model.tcmap = self.cmap_trans.get()
self.model.tcmap = int(self.cmapspinbox.get())
def change_dict(self):
n = 0
temp_dict = {(int(c[0])): (float(c[1]), float(c[2]), float(c[3])) for c in self.model.original_color_dict}
for i in range(len(self.vars)):
value = self.vars[i].get()
if not value:
temp_dict[self.values[i]] = (0.3216,0.3294,0.6392)
self.select_var.set(0)
n += 1
self.model.color_dict = temp_dict
image = numpy.array([[self.model.color_dict[x] for x in row] for row in self.model.cmap_png.astype(int)])
self.cmap = numpy.transpose(image, (1, 0, 2))
self.cmap = PIL.Image.fromarray((self.cmap * 255).astype(numpy.uint8))
self.redrawSuperposed()
if n <= len(self.vars): self.unselect_var.set(0)
return
def popup_labels(self):
top = Toplevel()
top.title("Dictionary of labels")
Options = ['Select cluster']
Options.extend(self.values)
variable = StringVar()
variable.set(Options[0])
msg = Message(top, text='Select class to rename')
msg.pack()
w = OptionMenu(top, variable, *Options)
w.pack()
msg = Message(top, text='Introduce the name of the new class')
msg.pack()
text = Entry(top)
text.pack()
button = Button(top, text="Accept", command=lambda: [self.change_label(text.get(), variable.get()), top.destroy()])
button.pack()
def change_label(self, name, cluster):
self.buttons[int(cluster)].config(text='{}: {}'.format(cluster, name))
self.label_dict[cluster] = name
return
def set_labels(self):
for i in range(self.model.max_cluster + 1):
value = i
self.values.append(value)
var = StringVar(value=value)
self.vars.append(var)
colors = self.model.color_dict[i]
r = int(colors[0]*255)
g = int(colors[1]*255)
b = int(colors[2]*255)
cb = Checkbutton(self.labelPanel, var=var, text=value,
onvalue=value, offvalue="",
command=lambda: self.change_dict(),
bg='#%02x%02x%02x' % (r,g,b))
cb.pack(side="top", fill="x", anchor="w")
self.buttons.append(cb)
def selectall(self):
for var in | |
was not initially created by from_db,
# assume content has been modified.
content_modified = True
if content_modified:
# If content has been modified, then save normally.
return super().save(**kwargs)
else:
# If content has not been modified, then exclude all of the
# Metadata fields as well as modified_datetime.
fields = ({f.name for f in self._meta.fields}
- {f.name for f in Metadata._meta.fields}
- {'id', 'modified_datetime'})
return super().save(update_fields=fields, **kwargs)
class ArchivedProject(Metadata, UnpublishedProject, SubmissionInfo):
"""
An archived project. Created when (maps to archive_reason):
1. A user chooses to 'delete' their ActiveProject.
2. An ActiveProject is not submitted for too long.
3. An ActiveProject is submitted and rejected.
4. An ActiveProject is submitted and times out.
"""
archive_datetime = models.DateTimeField(auto_now_add=True)
archive_reason = models.PositiveSmallIntegerField()
# Where all the archived project files are kept
FILE_ROOT = os.path.join(settings.MEDIA_ROOT, 'archived-projects')
def __str__(self):
return ('{0} v{1}'.format(self.title, self.version))
class ActiveProject(Metadata, UnpublishedProject, SubmissionInfo):
"""
The project used for submitting
The submission_status field:
- 0 : Not submitted
- 10 : Submitting author submits. Awaiting editor assignment.
- 20 : Editor assigned. Awaiting editor decision.
- 30 : Revisions requested. Waiting for resubmission. Loops back
to 20 when author resubmits.
- 40 : Accepted. In copyedit stage. Awaiting editor to copyedit.
- 50 : Editor completes copyedit. Awaiting authors to approve.
- 60 : Authors approve copyedit. Ready for editor to publish
"""
submission_status = models.PositiveSmallIntegerField(default=0)
# Max number of active submitting projects a user is allowed to have
MAX_SUBMITTING_PROJECTS = 10
INDIVIDUAL_FILE_SIZE_LIMIT = 10 * 1024**3
# Where all the active project files are kept
FILE_ROOT = os.path.join(settings.MEDIA_ROOT, 'active-projects')
REQUIRED_FIELDS = (
# 0: Database
('title', 'abstract', 'background', 'methods', 'content_description',
'usage_notes', 'conflicts_of_interest', 'version', 'license',
'short_description'),
# 1: Software
('title', 'abstract', 'background', 'content_description',
'usage_notes', 'installation', 'conflicts_of_interest', 'version',
'license', 'short_description'),
# 2: Challenge
('title', 'abstract', 'background', 'methods', 'content_description',
'usage_notes', 'conflicts_of_interest', 'version', 'license',
'short_description'),
# 3: Model
('title', 'abstract', 'background', 'methods', 'content_description',
'usage_notes', 'installation', 'conflicts_of_interest', 'version',
'license', 'short_description'),
)
# Custom labels that don't match model field names
LABELS = (
# 0: Database
{'content_description': 'Data Description'},
# 1: Software
{'content_description': 'Software Description',
'methods': 'Technical Implementation',
'installation': 'Installation and Requirements'},
# 2: Challenge
{'background': 'Objective',
'methods': 'Participation',
'content_description': 'Data Description',
'usage_notes': 'Evaluation'},
# 3: Model
{'content_description': 'Model Description',
'methods': 'Technical Implementation',
'installation': 'Installation and Requirements'},
)
SUBMISSION_STATUS_LABELS = {
0: 'Not submitted.',
10: 'Awaiting editor assignment.',
20: 'Awaiting editor decision.',
30: 'Revisions requested.',
40: 'Submission accepted; awaiting editor copyedits.',
50: 'Awaiting authors to approve publication.',
60: 'Awaiting editor to publish.',
}
def storage_used(self):
"""
Total storage used in bytes.
This includes the total size of new files uploaded to this
project, as well as the total size of files published in past
versions of this CoreProject. (The QuotaManager should ensure
that the same file is not counted twice in this total.)
"""
current = self.quota_manager().bytes_used
published = self.core_project.total_published_size
return current + published
def storage_allowance(self):
"""
Storage allowed in bytes
"""
return self.core_project.storage_allowance
def get_inspect_dir(self, subdir):
"""
Return the folder to inspect if valid. subdir joined onto
the file root of this project.
"""
# Sanitize subdir for illegal characters
validate_subdir(subdir)
# Folder must be a subfolder of the file root
# (but not necessarily exist or be a directory)
inspect_dir = os.path.join(self.file_root(), subdir)
if inspect_dir.startswith(self.file_root()):
return inspect_dir
else:
raise Exception('Invalid directory request')
def file_url(self, subdir, file):
"""
Url of a file to download in this project
"""
return reverse('serve_active_project_file',
args=(self.slug, os.path.join(subdir, file)))
def file_display_url(self, subdir, file):
"""
URL of a file to display in this project
"""
return reverse('display_active_project_file',
args=(self.slug, os.path.join(subdir, file)))
def under_submission(self):
"""
Whether the project is under submission
"""
return bool(self.submission_status)
def submission_deadline(self):
return self.creation_datetime + timedelta(days=180)
def submission_days_remaining(self):
return (self.submission_deadline() - timezone.now()).days
def submission_status_label(self):
return ActiveProject.SUBMISSION_STATUS_LABELS[self.submission_status]
def author_editable(self):
"""
Whether the project can be edited by its authors
"""
if self.submission_status in [0, 30]:
return True
def copyeditable(self):
"""
Whether the project can be copyedited
"""
if self.submission_status == 40:
return True
def archive(self, archive_reason):
"""
Archive the project. Create an ArchivedProject object, copy over
the fields, and delete this object
"""
archived_project = ArchivedProject(archive_reason=archive_reason,
slug=self.slug)
modified_datetime = self.modified_datetime
# Direct copy over fields
for attr in [f.name for f in Metadata._meta.fields] + [f.name for f in SubmissionInfo._meta.fields]:
setattr(archived_project, attr, getattr(self, attr))
archived_project.save()
# Redirect the related objects
for reference in self.references.all():
reference.project = archived_project
reference.save()
for publication in self.publications.all():
publication.project = archived_project
publication.save()
for topic in self.topics.all():
topic.project = archived_project
topic.save()
for author in self.authors.all():
author.project = archived_project
author.save()
for edit_log in self.edit_logs.all():
edit_log.project = archived_project
edit_log.save()
for copyedit_log in self.copyedit_logs.all():
copyedit_log.project = archived_project
copyedit_log.save()
for parent_project in self.parent_projects.all():
archived_project.parent_projects.add(parent_project)
if self.resource_type.id == 1:
languages = self.programming_languages.all()
if languages:
archived_project.programming_languages.add(*list(languages))
# Voluntary delete
if archive_reason == 1:
self.clear_files()
else:
# Move over files
os.rename(self.file_root(), archived_project.file_root())
# Copy the ActiveProject timestamp to the ArchivedProject.
# Since this is an auto_now field, save() doesn't allow
# setting an arbitrary value.
queryset = ArchivedProject.objects.filter(id=archived_project.id)
queryset.update(modified_datetime=modified_datetime)
return self.delete()
def fake_delete(self):
"""
Appear to delete this project. Actually archive it.
"""
self.archive(archive_reason=1)
def check_integrity(self):
"""
Run integrity tests on metadata fields and return whether the
project passes the checks
"""
self.integrity_errors = ErrorList()
# Invitations
for invitation in self.authorinvitations.filter(is_active=True):
self.integrity_errors.append(
'Outstanding author invitation to {0}'.format(invitation.email))
# Storage requests
for storage_request in self.storagerequests.filter(
is_active=True):
self.integrity_errors.append('Outstanding storage request')
# Authors
for author in self.authors.all().order_by('display_order'):
if not author.get_full_name():
self.integrity_errors.append('Author {0} has not fill in name'.format(author.user.username))
if not author.affiliations.all():
self.integrity_errors.append('Author {0} has not filled in affiliations'.format(author.user.username))
# Metadata
for attr in ActiveProject.REQUIRED_FIELDS[self.resource_type.id]:
value = getattr(self, attr)
text = unescape(strip_tags(str(value)))
if value is None or not text or text.isspace():
l = self.LABELS[self.resource_type.id][attr] if attr in self.LABELS[self.resource_type.id] else attr.title().replace('_', ' ')
self.integrity_errors.append('Missing required field: {0}'.format(l))
published_projects = self.core_project.publishedprojects.all()
if published_projects:
published_versions = [p.version for p in published_projects]
if self.version in published_versions:
self.integrity_errors.append('The version matches a previously published version.')
self.version_clash = True
else:
self.version_clash = False
if self.integrity_errors:
return False
else:
return True
def is_submittable(self):
"""
Whether the project can be submitted
"""
return (not self.under_submission() and self.check_integrity())
def submit(self, author_comments):
"""
Submit the project for review.
"""
if not self.is_submittable():
raise Exception('ActiveProject is not submittable')
self.submission_status = 10
self.submission_datetime = timezone.now()
self.author_comments = author_comments
self.save()
# Create the first edit log
EditLog.objects.create(project=self, author_comments=author_comments)
def set_submitting_author(self):
"""
Used to save query time in templates
"""
self.submitting_author = self.submitting_author()
def assign_editor(self, editor):
"""
Assign an editor to the project and set the submission status to the
edit stage.
"""
self.editor = editor
self.submission_status = 20
self.editor_assignment_datetime = timezone.now()
self.save()
def reassign_editor(self, editor):
"""
Reassign the current project editor with new editor
"""
self.editor = editor
self.save()
def reject(self):
"""
Reject a project under submission
"""
self.archive(archive_reason=3)
def is_resubmittable(self):
"""
Submit the project for review.
"""
return (self.submission_status == 30 and self.check_integrity())
def resubmit(self, author_comments):
"""
"""
if not self.is_resubmittable():
raise Exception('ActiveProject is not resubmittable')
with transaction.atomic():
self.submission_status = 20
self.resubmission_datetime = timezone.now()
self.save()
# Create a new edit log
EditLog.objects.create(project=self, is_resubmission=True,
author_comments=author_comments)
def reopen_copyedit(self):
"""
Reopen the project for copyediting
"""
if self.submission_status == 50:
self.submission_status = 40
self.copyedit_completion_datetime = None
self.save()
CopyeditLog.objects.create(project=self, is_reedit=True)
self.authors.all().update(approval_datetime=None)
def approve_author(self, author):
""""
Approve an author. Move the project into the next state if the
author is the final outstanding one. Return whether the
process was successful.
"""
if self.submission_status == 50 and not author.approval_datetime:
now = timezone.now()
author.approval_datetime = now
author.save()
if self.all_authors_approved():
self.author_approval_datetime = now
self.submission_status = 60
self.save()
return True
def all_authors_approved(self):
"""
Whether all authors have approved the publication
"""
authors = self.authors.all()
return len(authors) == len(authors.filter(
approval_datetime__isnull=False))
def is_publishable(self):
"""
Check whether a project may be published
"""
if self.submission_status == 60 and self.check_integrity() and self.all_authors_approved():
return True
return False
def clear_files(self):
"""
Delete the project file directory
"""
shutil.rmtree(self.file_root())
def publish(self, slug=None, make_zip=True, title=None):
"""
Create a published version of this project and update the
submission status.
Parameters
----------
slug : the | |
time.time() + last_time)
last_time = time.time()
else:
print("malformed rhythm pattern: " + rhythm_substr)
break
# print("AUDIO THREAD: Beginning post_ems display: time since start: " + str(time.time()-time_naught))
if post_ems_test_flag:
for i in range(post_ems_repeats): # present the rhythm with appropriate number of repeats
for j in range(len(rhythm_substr)): # go through each eighthnote in the pattern
if (rhythm_substr[j] == '1'): # this is a note
audio_onset_times.append(time.time() - time_naught)
eighteighty_tone.play()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
eighteighty_tone.stop()
elif(rhythm_substr[j] == '0'): # rest
eighteighty_tone.stop()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
else:
print("malformed rhythm pattern: " + rhythm_substr)
break
def metronome_tone(milliseconds_per_eighthnote, total_str_len):
# plays tone on the beat repeatedly
AUDIO_DELAY = 0.0023
time.sleep(AUDIO_DELAY) # sleep for 2 ms to let audio catch up
last_time = time.time()
counter = 0
for i in range(total_str_len):
counter = counter + 1
if counter == 1:
fourfourty_tone.play()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
fourfourty_tone.stop()
else:
if counter == 8:
counter = 0
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
def read_contact_trace(ser, len_rhythm_presentation_ms, samp_period_ms, readings_list, x_values_list, time_naught_contact_trace):
# reads from contact detection serial object every sample period. Saves results to a list
# time.sleep(1)
# print("thread time since start " + str(time.time()- time_naught))
check_repeats = int(np.floor((len_rhythm_presentation_ms/samp_period_ms)))
print("read thread begun")
while (time.time()-time_naught_contact_trace)*1000 < len_rhythm_presentation_ms:
if ser.in_waiting:
out = ser.readline().decode('utf-8')
time_measured = time.time()
# if int(out[:-2]) > 5:
# print(int(out[:-2]))
readings_list.append(int(out[:-2]))
x_values_list.append(1000*(time_measured-time_naught_contact_trace)) #from seconds to milliseconds
print("done reading trace")
# print("mean samp period and stdv: " + str(mean_contact_samp_period) + " +/- " + str(stdv_contact_samp_period))
return readings_list, x_values_list
def rhythm_string_to_stim_trace_and_audio_trace(count_in_substr, rhythm_substr, actual_stim_length, bpm, repeats, \
samp_period, delay, audio_repeats, post_ems_repeats):
# takes in the count-in string, the actual rhythm string, the length of stimulation in ms, beats per minute,
# stim repeats number, requested sample period of resulting trace (in ms). Returns stim_trace numpy array
# with 0 values for time points of no stim and 1000 values for stim. This is offset /delay/ amount in ms
# from audio stimulus (also returned in same size array). Final value returned is a time array, steps in
# samp_period.
milliseconds_per_eighthnote = 30000/bpm
array_len_per_eighthnote = int(np.floor(milliseconds_per_eighthnote/samp_period))
delay_array_len = int(np.floor(delay/samp_period))
actual_stim_len_array_indices = int(np.floor(actual_stim_length/samp_period))
eighthnotes_pres = len(count_in_substr) + (audio_repeats+repeats+post_ems_repeats) * len(rhythm_substr)
trace_array_len = array_len_per_eighthnote * eighthnotes_pres + delay_array_len
stim_trace = np.zeros((trace_array_len,))
audio_trace = np.zeros((trace_array_len,))
x_array = np.arange(0, trace_array_len) * samp_period
for i in range(len(count_in_substr)): # write in count-in traces.
if count_in_substr[i] == '1':
stim_begin_ind = i * array_len_per_eighthnote
stim_end_ind = stim_begin_ind + actual_stim_len_array_indices
stim_trace[stim_begin_ind:stim_end_ind] = 1
audio_begin_ind = stim_begin_ind+delay_array_len
audio_end_ind = audio_begin_ind + array_len_per_eighthnote
audio_trace[audio_begin_ind:audio_end_ind] = 1
start_index_audio = len(count_in_substr) * array_len_per_eighthnote + delay_array_len
if audio_repeats > 0:
for i in range(audio_repeats): # write the audio trace for any audio pre-stim presentation
for j in range(len(rhythm_substr)):
if rhythm_substr[j] == '1':
audio_begin_ind = start_index_audio + (j * array_len_per_eighthnote)
audio_end_ind = audio_begin_ind + array_len_per_eighthnote
audio_trace[audio_begin_ind:audio_end_ind] = 1
start_index_audio = start_index_audio + (array_len_per_eighthnote * len(rhythm_substr))
start_index_stim = array_len_per_eighthnote * (len(count_in_substr) + (audio_repeats * len(rhythm_substr)))
for i in range(repeats): # now writing for actual rhythm display and actuation
for j in range(len(rhythm_substr)):
if rhythm_substr[j] == '1':
stim_begin_ind = start_index_stim + (j * array_len_per_eighthnote)
stim_end_ind = stim_begin_ind + actual_stim_len_array_indices
stim_trace[stim_begin_ind:stim_end_ind] = 1
audio_begin_ind = stim_begin_ind+delay_array_len
audio_end_ind = audio_begin_ind + array_len_per_eighthnote
audio_trace[audio_begin_ind:audio_end_ind] = 1
audio_trace[audio_end_ind] = 0
start_index_stim = start_index_stim + (array_len_per_eighthnote * len(rhythm_substr))
return stim_trace, audio_trace, x_array
def plot_contact_trace_and_rhythm(reading_list, contact_x_values, stim_trace, audio_trace, x_array, samp_period, legend_labels):
fig, ax = plt.subplots()
ax.plot(contact_x_values, reading_list)
ax.set_yticks(np.arange(0, 500, 100))
ax.set_xticks(np.arange(0, (len(reading_list) * samp_period), 10000))
ax.plot(x_array, stim_trace*np.max(reading_list))
ax.plot(x_array, audio_trace*np.max(reading_list))
ax.legend(legend_labels)
plt.ion()
plt.show()
plt.draw()
plt.pause(0.01)
def onset_times_to_traces(audio_onset_times, audio_hold_ms, stim_onset_times, stim_hold_ms, samp_period):
# take a series of onset time points and craft plottable traces.
array_value_audio_hold = int(np.floor(audio_hold_ms/samp_period))
array_value_stim_time = int(np.floor(stim_hold_ms/samp_period))
final_time_point = int(np.floor(np.max(audio_onset_times) + audio_hold_ms))
x_vec = np.arange(0, final_time_point, samp_period)
audio_trace = np.zeros_like(x_vec)
stim_trace = np.zeros_like(x_vec)
for time_val in audio_onset_times:
array_ind_begin = int(np.floor(time_val/samp_period))
array_ind_end = array_ind_begin + array_value_audio_hold
audio_trace[array_ind_begin:array_ind_end] = 1
for time_val in stim_onset_times:
array_ind_begin = int(np.floor(time_val/samp_period))
array_ind_end = array_ind_begin + array_value_stim_time
stim_trace[array_ind_begin:array_ind_end] = 1
return x_vec, audio_trace, stim_trace
def spike_times_to_traces(onset_times, hold_length, x_vector, samp_period):
# take a series of onset time points and craft plottable traces.
array_value_stim_time = int(np.floor(hold_length/samp_period))
trace = np.zeros_like(x_vector)
for time_val in onset_times:
array_ind_begin = int(np.floor(time_val/samp_period))
array_ind_end = array_ind_begin + array_value_stim_time
trace[array_ind_begin:array_ind_end] = 1
return trace
def trace_to_spike_times(baseline_mean, baseline_sd, reading_results_list, x_values, sd_more_than_multiplier, baseline_subtractor):
# take a trace and threshold to pull spike times.
reading_results_array = np.array(reading_results_list)
x_vals_array = np.array(x_values)
bool_list = reading_results_array < baseline_subtractor
reading_results_array[bool_list] = 0 # anything below this baseline is 0'd out
bool_selector = reading_results_array > baseline_mean + baseline_sd*sd_more_than_multiplier
time_points = x_vals_array[bool_selector]
return time_points
def zero_sensor(contact_ser, sleep_len_ms, samp_period_ms):
print("DON't TOUCH - zeroing")
time.sleep(0.5)
initial_outlist = []
initial_x_results = []
first_time_naught = time.time()
read_contact_trace(contact_ser, sleep_len_ms, samp_period_ms, initial_outlist, initial_x_results, \
first_time_naught)
baseline_mean = np.mean(np.array(initial_outlist))
baseline_sd = np.std(np.array(initial_outlist))
print("Mean basline was " + str(baseline_mean) + " +/- " + str(baseline_sd))
print("DONE ZEROING")
return baseline_mean, baseline_sd
def measure_delay(ems_serial, contact_ser, actual_stim_length, trial_num, sleep_len, samp_period_ms, sd_more_than_mult, baseline_subtractor, baseline_mean, baseline_sd):
# uses a set of trials and random stims and determines the average delay from EMS command to contact registration.
times_stimmed = []
reading_results = []
x_value_results = []
rand_values = np.divide(np.random.rand(trial_num), 2) #between 0 and 0.5 second random delay
len_pres = 3000 + (trial_num * sleep_len + np.sum(rand_values)) * 1000 # ms
time_naught_delay = time.time()
print("time naught delay: " + str(time_naught_delay))
read_thread = threading.Thread(target=read_contact_trace, args= (contact_ser, len_pres, \
samp_period_ms, reading_results, x_value_results, time_naught_delay))
time_naught_main = time.time()
print("time naught main thread: " + str(time_naught_main))
read_thread.start()
# time.sleep(1)
# print("time since start: " + str(time.time() - time_naught_main))
print("calibrating delay in 3")
time.sleep(1)
print("calibrating delay in 2")
time.sleep(1)
print("calibrating delay in 1")
time.sleep(1)
for i in range(trial_num):
command_bytes = "xC1I100T" + str(actual_stim_length) + "G \n" # metronome intro
byt_com = bytes(command_bytes, encoding='utf8')
ems_serial.write(byt_com)
times_stimmed.append(time.time()-time_naught_main)
print("STIM " + str(i))
time.sleep(sleep_len)
time.sleep(rand_values[i])
read_thread.join()
times_responded_ms = trace_to_spike_times(baseline_mean, baseline_sd, reading_results, x_value_results, sd_more_than_mult, baseline_subtractor)
times_stimmed_ms = 1000*np.array(times_stimmed)
first_responses_post_stim = []
diffs = []
for i in range(len(times_stimmed_ms)):
# get earliest response threshold crossing
temp = np.copy(times_responded_ms)
before_bool = np.subtract(times_responded_ms, times_stimmed_ms[i]) < 0 # subtract stimmed time from response times to find
# only responses after stim. then get bools above 0.
temp[before_bool] = np.max(times_responded_ms) # set befores to maximum to avoid finding a close one before stim
first_threshold_cross_post_stim = np.argmin(temp)
first_responses_post_stim.append(times_responded_ms[first_threshold_cross_post_stim])
diffs.append(times_responded_ms[first_threshold_cross_post_stim] - times_stimmed_ms[i])
first_responses_post_stim = np.array(first_responses_post_stim)
mean_delay = np.mean(diffs)
std_delay = np.std(diffs)
return mean_delay, std_delay, first_responses_post_stim, times_stimmed_ms, reading_results, x_value_results
def test_double_stroke(ems_serial, actual_stim_length, bpm, double_stroke_rhythm):
# tests sensation of double and triple strokes. This depends on stim length, stim intensity, and bom.
temp = 0
reps = 1
metronome = 0
milliseconds_per_eighthnote = 30000/bpm
milliseconds_wait = milliseconds_per_eighthnote - actual_stim_length
rhythm_display_flag = 1
audio_pre_display_flag = 0
pre_repeats = 0
run_rhythm_ems(rhythm_display_flag, ems_serial, temp, [], reps, double_stroke_rhythm, actual_stim_length, \
milliseconds_per_eighthnote, metronome, [], audio_pre_display_flag, pre_repeats)
def process_contact_trace_to_hit_times(contact_trace_array, x_values_array, threshold, surpression_window):
bool_list = contact_trace_array > threshold # find indices of contact trace array that exceed threshold
time_points = x_values_array[bool_list] # get the time points of those points in trace
time_points_cop = np.copy(time_points) # make a shallow copy so as not to modify the array we are looping through (i think...)
for i in range(len(time_points)):
if np.isnan(time_points_cop[i]): # if the point is nan do not surpress.
continue
max_time_surpress = time_points[i] + surpression_window
indices_to_surpress_bools = np.logical_and((time_points > time_points[i]), (time_points <= max_time_surpress))
time_points_cop[indices_to_surpress_bools] = np.nan
nonsurpressedselector_bool = np.logical_not(np.isnan(time_points_cop))
time_points_out = time_points[nonsurpressedselector_bool]
return time_points_out
def test_double_stroke(ems_serial):
out = input("test double stroke sensation?")
if out == 'y':
contin = True
while contin:
test_double_stroke(ems_serial, ems_constants.actual_stim_length, ems_constants.bpm, ems_constants.double_stroke_rhythm)
out = input("adjust? a / continue? c")
if out == 'c':
contin = False
# example: command_str = "C0I100T750G \n"
if __name__ == '__main__':
tic = time.time()
### load sound ##
global fourfourty_tone
fourfourty_tone = vlc.MediaPlayer("440Hz_44100Hz_16bit_05sec.mp3");
global eighteighty_tone
eighteighty_tone = vlc.MediaPlayer("880hz.mp3")
fourfourty_tone.play()
eighteighty_tone.play()
time.sleep(0.3)
time_before = time.time()
fourfourty_tone.stop()
eighteighty_tone.stop()
time_to_stop_tones = time.time() - time_before
print("time to stop tones: " + str(time_to_stop_tones))
#### read and write to arduino ###
import serial
import time
# port = '/dev/ttys000' for bluetooth
port = '/dev/tty.usbserial-18DNB483'
ems_serial = | |
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '14:30'
, end = '16:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:30'
, work_location = '1'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-10')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '16:00'
, end = '16:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:30'
, end = '12:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '14:30'
, end = '16:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '14:00'
, end = '14:30'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '13:00'
, end = '14:00'
, work_location = '1'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-11')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '17:30'
, end = '18:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '12:00'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '14:00'
, end = '15:00'
, work_location = '1'
, wp = '7'
)
db.time_record.create \
( daily_record = dr
, start = '15:00'
, end = '16:00'
, work_location = '1'
, wp = '7'
)
db.time_record.create \
( daily_record = dr
, start = '16:00'
, end = '17:00'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:30'
, end = '10:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '10:00'
, end = '11:00'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '11:00'
, end = '12:00'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '21:00'
, end = '22:00'
, work_location = '2'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '17:00'
, end = '17:30'
, work_location = '1'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-12')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '14:00'
, end = '16:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:30'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:00'
, work_location = '1'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-13')
, weekend_allowed = 0
, required_overtime = 0
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-14')
, weekend_allowed = 0
, required_overtime = 0
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-15')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '17:30'
, end = '17:45'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '11:00'
, end = '13:00'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '09:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:00'
, end = '10:00'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '10:00'
, end = '11:00'
, work_location = '1'
, wp = '5'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '16:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '16:30'
, end = '17:30'
, work_location = '1'
, wp = '5'
)
db.time_record.create \
( daily_record = dr
, start = '21:30'
, end = '22:30'
, work_location = '1'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-16')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '18:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '10:00'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:30'
, end = '10:00'
, work_location = '1'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-17')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '18:30'
, time_activity = '7'
, work_location = '3'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '12:00'
, end = '13:00'
, time_activity = '7'
, work_location = '3'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '05:45'
, end = '12:00'
, time_activity = '10'
, work_location = '3'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-18')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '16:30'
, end = '23:45'
, time_activity = '10'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '08:00'
, end = '13:00'
, time_activity = '7'
, work_location = '3'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '16:30'
, time_activity = '10'
, work_location = '3'
, wp = '40'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-09-19')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '00:00'
, end = '00:45'
, time_activity = '10'
, work_location = '3'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '18:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '11:00'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:30'
, end = '10:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '10:00'
, end = '11:00'
, work_location = '1'
, wp = '11'
)
dr = db.daily_record.create \
( user = user
, date = | |
# -*- coding: utf-8 -*-
from __future__ import print_function
import bitso
import requests
from pprint import pformat as pf
import datetime
import csv
import time
import sys
# from ..helpers import ottoHelpers # Need to update to Python3
class Bitso(object):
""" Class to perform Bitso trades over crypto-currencies
and store prices in a local DB for future analysis.
Bitso Class attrs:
- api : (bitso.Api) Bitso API object with authentification
- acc_status : (bitso.AccountStatus) Bitso Account Status object
- books : (list) Dictionaries with trading limits of each Currency-Pair
- books_avail : (list) Keys of all available Currency-Pairs in Bitso
- fees : (dict) Trading Fee in percentage indexed by Currency-Pair key
- balances : (dict) Total and Available balances indexed by Currency key
- currencies : (list) Available currencies
"""
def __init__(self, api_key=None, secret=None):
""" Constructor
Params:
-----
- api_key : (str) API KEY provided by Bitso
- secret : (str) API SECRET provided by Bitso
"""
if api_key is not None and secret is not None:
self.api = bitso.Api(api_key, secret)
else:
self.api = bitso.Api()
# Show Books
self.get_books() # Set True to show limits
# Show Fees
self.get_fees()
# Show Account limits
self.get_limits()
# Get Balances
self.get_balances()
#def get_auth(self):
# import time
# import hmac
# import hashlib
# import requests
# bitso_key = "BITSO_KEY"
# bitso_secret = "BITSO_SECRET"
# nonce = str(int(round(time.time() * 1000)))
# http_method = "GET"
# request_path = "/v3/balance/"
# json_payload = ""
# # Create signature
# message = nonce+http_method+request_path+json_payload
# signature = hmac.new(bitso_secret.encode('utf-8'),
# message.encode('utf-8'),
# hashlib.sha256).hexdigest()
# # Build the auth header
# auth_header = 'Bitso %s:%s:%s' % (bitso_key, nonce, signature)
# # Send request
# response = requests.get("https://api.bitso.com/v3/balance/", headers={"Authorization": auth_header})
# print response.content
def get_limits(self):
""" Method to retrieve and show account status
"""
self.acc_status = self.api.account_status()
# wait for a sec.
time.sleep(1)
print("Daily Limit: $", self.acc_status.daily_limit)
print("Daily Remaining: $", self.acc_status.daily_remaining, '\n')
def get_fees(self):
""" Method to retrieve and show fees
"""
_fees = self.api.fees()
# wait for a sec.
time.sleep(1)
# Obtain dict of fees
self.fees = {_f: float(_fees.__dict__[_f].fee_percent) \
for _f in _fees.__dict__.keys()\
if _f in self.books_avail}
print('Fees (%):', pf(self.fees), '\n')
def get_balances(self):
""" Method to retrieve and show account balances
"""
_balances = self.api.balances()
# wait for a sec.
time.sleep(1)
self.currencies = _balances.currencies
self.balances = {_b: {
'available': float(_bv.__dict__['available']),
'total': float(_bv.__dict__['total'])
} \
for _b, _bv in _balances.__dict__.items() \
if _b != 'currencies'}
print('Currencies: ', pf(self.currencies), '\n')
print('Balances: ', pf(self.balances), '\n')
def get_books(self, _show=False):
""" Method to show available books in bitso
Params:
-----
- _show : (bool) Show minimum and maximum order values in Bitso
"""
try:
# Books consultation
_av_books = requests.get("https://api.bitso.com/v3/available_books/")
# wait for a sec.
time.sleep(1)
except requests.exceptions.RequestException as _rexc:
print(_rexc)
return None
# Success verification
if _av_books.json()['success']:
self.books = _av_books.json()['payload']
else:
print('Request has not been successful!')
return None
# Results' display
if _show:
print(pf(self.books))
self.books_avail = [_x['book'] for _x in self.books]
print('Available books:', pf(self.books_avail), '\n')
def price(self, _book):
""" Method to verify Value of defined Pair of currencies
Params:
-----
- _book : (str) Book or Pair of currencies to verify
Returns:
-----
- (dict) Pair exchange values
>>> {
"book": "btc_mxn",
"volume": "22.31349615",
"high": "5750.00",
"last": "5633.98",
"low": "5450.00",
"vwap": "5393.45",
"ask": "5632.24",
"bid": "5520.01",
"created_at": "2016-04-08T17:52:31.000+00:00"
}
"""
try:
# Retrieve Book
_p = requests.get('https://api.bitso.com/v3/ticker/?book={}'.format(_book)).json()
# wait for a sec.
time.sleep(1.5)
except Exception as e:
print(e)
return None
# Success verification
if not _p['success']:
print('Request has not been successful!')
return None
# Save for later analysis
if not self.save_csv(_p['payload'], _p['payload']['book']):
print('Could not save data into file')
return _p['payload']
def all_prices(self, valid='all'):
""" Method to retrieve all prices from valid currencies
Params:
-----
valid: (str | list) 'all' if wants to perform over each currency, otherwise send list of Currency-Pairs
"""
# Validate currencies
if valid == 'all':
_pairs = self.books_avail
else:
_pairs = [_v for _v in valid if _v in self.books_avail]
curr_prices = {}
# Loop over each currency to retrieve price
for _c in _pairs:
max_tries = 3
for _try in range(max_tries):
try:
curr_prices[_c] = float(self.price(_c)['last'])
break
except TypeError:
# In case of type error
print('Could not fetch price, retrying...')
time.sleep(2)
if _try == (max_tries-1):
print('Exceeded trials, shutting down!')
sys.exit()
# Wait for 1 sec. to avoid being blocked
time.sleep(0.5)
print('Current Currency-Pair prices: \n', pf(curr_prices), '\n')
return curr_prices
def save_csv(self, _dict, f_name):
""" Method to convert JSON exchange values and save it into CSV dumps
Params:
- _dict: (dict) Data Values
- f_name: (str) File Name
Returns:
- (bool) Saving Status
"""
try:
# Verify if file existed
f = open('data/{}.csv'.format(f_name), 'r')
print('File existed, appending...')
f.close()
except IOError:
# If new file, write headers
f = open('data/{}.csv'.format(f_name), 'w')
print('Creating file with headers')
writer = csv.DictWriter(f, fieldnames=list(_dict.keys()))
writer.writeheader()
print('File created, appending...')
f.close()
try:
# Append data value into File
f = open('data/{}.csv'.format(f_name), 'a')
writer = csv.DictWriter(f, fieldnames=list(_dict.keys()))
writer.writerow(_dict)
print('Saved {} data!'.format(f_name))
except Exception as e:
print(e)
return False
return True
class BitsoTrade(Bitso):
""" Class to perform trades over Bitso exchange, which inheritates
all methods from Bitso class.
BitsoTrade attrs:
- trade_prices: (dict) Dictionary of last prices indexed by Currency-pairs
- base_lines: (dict) Dictionary of base_line indexed by Currency_pairs
"""
def __init__(self, api_key, secret):
""" Constructor
"""
# Initialize Bitso Parent Class
super(BitsoTrade, self).__init__(api_key, secret)
self.trade_prices = {}
self.base_lines = {}
def in_bounds(self, amount, _pair):
""" Method to check if transaction is within trading bounds in Bitso
For Book Limits:
- minimum_amount: Minimum amount of major when placing an order.
- maximum_amount: Maximum amount of major when placing an order.
Params:
-----
- amount : (float) Amount of Major currency to Trade
- _pair: (str) Currency-Pair key
Returns:
-----
- (bool) : Valid or not.
"""
# Verify if is valid currency-pair
if _pair not in self.books_avail:
print('{} is not a valid Currency-Pair'.format(_pair))
return False
# Fetch book limit info
_tbook = [_b for _b in self.books if _pair == _b['book']][0]
# Compare if above minimum amount
if float(amount) < float(_tbook['minimum_amount']):
print('{} is too small to perform transaction'.format(amount))
return False
# Compare if below maximum amount
if float(amount) > float(_tbook['maximum_amount']):
print('{} is too big to perform transaction'.format(amount))
return False
return True
def fetch_currency(self, _pair, _side):
""" Method to return the correct currency definition to verify in limits
Params:
-----
- _pair: (str) Currency-Pair to Trade (Major_minor)
- _side: (str, 'buy' | 'sell') Trading Position
Returns:
-----
- (dict) Corresponding currency to buy and sell
"""
if _side == 'buy':
return {
"buying": _pair.split('_')[0],
"selling": _pair.split('_')[1]
}
else:
return {
"buying": _pair.split('_')[1],
"selling": _pair.split('_')[0]
}
def enough_balance(self, amount, _pair, _selling):
""" Method to verify there is enough balance in the selling currency
to proceed with the transaction.
Params:
-----
- amount: (str) Major amount to Trade
- _pair: (str) Currency-pair
- _selling: (float) Selling currency
Returns:
-----
- (float) Current balance of the selling currency
- (NoneType) In case there is not enough money to execute transaction
"""
# Update Balances
self.get_balances()
# If selling major compute balance directly
if _selling == _pair.split('_')[0]:
# If not enough balance
print('Selling, so checking Major to verify balance')
if amount > self.balances[_selling]['available']:
print('Balance {} in {} is not enough to perform transaction for {}'
.format(self.balances[_selling]['available'],
_selling,
amount))
return None
print('Balance {} in {} enough to sell {}'
.format(self.balances[_selling]['available'],
_selling,
amount))
return self.balances[_selling]['available']
# If selling minor, get last price of exchange between currencies
exc_price = self.price(_pair)
tmp_balance = self.balances[_selling]['available']
# Converting minor into Major currency equivalence to validate correct balance
print('Buying, so converting minor into Major to verify balance')
if (amount * float(exc_price['last'])) > tmp_balance:
print('{} is not enough balance in {} to perform transaction for {}'
.format(tmp_balance,
_selling,
amount * float(exc_price['last'])))
return None
print('Balance {} in {} enough to sell {}'
.format(tmp_balance,
_selling,
amount * float(exc_price['last'])))
return tmp_balance
def verify_trade(self, _pair, | |
is None
D = {1:2}
def f():
g(D)
return g()
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.knowntype == bool
assert not s.is_constant()
def test_issubtype_and_const(self):
class A(object):
pass
class B(object):
pass
class C(A):
pass
b = B()
c = C()
def g(f):
if f == 1:
x = b
elif f == 2:
x = c
else:
x = C()
t = type(x)
return issubclass(t, A)
a = self.RPythonAnnotator()
x = annmodel.SomeInteger()
x.const = 1
s = a.build_types(g, [x])
assert s.const == False
a = self.RPythonAnnotator()
x = annmodel.SomeInteger()
x.const = 2
s = a.build_types(g, [x])
assert s.const == True
def test_reading_also_generalizes(self):
def f1(i):
d = {'c': i}
return d['not-a-char'], d
a = self.RPythonAnnotator()
s = a.build_types(f1, [int])
assert dictkey(s.items[1]).__class__ == annmodel.SomeString
def f2(i):
d = {'c': i}
return d.get('not-a-char', i+1), d
a = self.RPythonAnnotator()
s = a.build_types(f2, [int])
assert dictkey(s.items[1]).__class__ == annmodel.SomeString
def f3(i):
d = {'c': i}
return 'not-a-char' in d, d
a = self.RPythonAnnotator()
s = a.build_types(f3, [int])
assert dictkey(s.items[1]).__class__ == annmodel.SomeString
def f4():
lst = ['a', 'b', 'c']
return 'not-a-char' in lst, lst
a = self.RPythonAnnotator()
s = a.build_types(f4, [])
assert listitem(s.items[1]).__class__ == annmodel.SomeString
def f5():
lst = ['a', 'b', 'c']
return lst.index('not-a-char'), lst
a = self.RPythonAnnotator()
s = a.build_types(f5, [])
assert listitem(s.items[1]).__class__ == annmodel.SomeString
def test_true_str_is_not_none(self):
def f(s):
if s:
return s
else:
return ''
def g(i):
if i:
return f(None)
else:
return f('')
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert s.knowntype == str
assert not s.can_be_None
def test_true_func_is_not_none(self):
def a1():
pass
def a2():
pass
def f(a):
if a:
return a
else:
return a2
def g(i):
if i:
return f(None)
else:
return f(a1)
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert not s.can_be_None
def test_string_noNUL_canbeNone(self):
def f(a):
if a:
return "abc"
else:
return None
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_unicode_noNUL_canbeNone(self):
def f(a):
if a:
return u"abc"
else:
return None
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_str_or_None(self):
def f(a):
if a:
return "abc"
else:
return None
def g(a):
x = f(a)
if x is None:
return "abcd"
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_unicode_or_None(self):
def f(a):
if a:
return u"abc"
else:
return None
def g(a):
x = f(a)
if x is None:
return u"abcd"
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_emulated_pbc_call_simple(self):
def f(a,b):
return a + b
from rpython.annotator import annrpython
a = annrpython.RPythonAnnotator()
from rpython.annotator import model as annmodel
s_f = a.bookkeeper.immutablevalue(f)
a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()])
a.complete()
a.simplify()
assert a.binding(graphof(a, f).getreturnvar()).knowntype == int
fdesc = a.bookkeeper.getdesc(f)
someint = annmodel.SomeInteger()
assert (fdesc.get_s_signatures((2, (), False))
== [([someint,someint],someint)])
def test_emulated_pbc_call_callback(self):
def f(a,b):
return a + b
from rpython.annotator import annrpython
a = annrpython.RPythonAnnotator()
from rpython.annotator import model as annmodel
memo = []
def callb(ann, graph):
memo.append(annmodel.SomeInteger() == ann.binding(graph.getreturnvar()))
s_f = a.bookkeeper.immutablevalue(f)
s = a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()],
callback=callb)
assert s == annmodel.SomeImpossibleValue()
a.complete()
assert a.binding(graphof(a, f).getreturnvar()).knowntype == int
assert len(memo) >= 1
for t in memo:
assert t
def test_iterator_union(self):
def it(d):
return d.iteritems()
d0 = {1:2}
def f():
it(d0)
return it({1:2})
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeIterator)
assert s.variant == ('items',)
def test_iteritems_str0(self):
def it(d):
return d.iteritems()
def f():
d0 = {'1a': '2a', '3': '4'}
for item in it(d0):
return "%s=%s" % item
raise ValueError
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
def test_iteritems_unicode0(self):
def it(d):
return d.iteritems()
def f():
d0 = {u'1a': u'2a', u'3': u'4'}
for item in it(d0):
return u"%s=%s" % item
raise ValueError
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_no_nul_mod(self):
def f(x):
s = "%d" % x
return s
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
def test_no_nul_mod_unicode(self):
def f(x):
s = u"%d" % x
return s
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_mul_str0(self):
def f(s):
return s*10
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeString(no_nul=True)])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeUnicodeString(no_nul=True)])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_reverse_mul_str0(self):
def f(s):
return 10*s
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeString(no_nul=True)])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeUnicodeString(no_nul=True)])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_getitem_str0(self):
def f(s, n):
if n == 1:
return s[0]
elif n == 2:
return s[1]
elif n == 3:
return s[1:]
return s
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeString(no_nul=True),
annmodel.SomeInteger()])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeUnicodeString(no_nul=True),
annmodel.SomeInteger()])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_non_none_and_none_with_isinstance(self):
class A(object):
pass
class B(A):
pass
def g(x):
if isinstance(x, A):
return x
return None
def f():
g(B())
return g(None)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(B)
def test_type_is_no_improvement(self):
class B(object):
pass
class C(B):
pass
class D(B):
pass
def f(x):
if type(x) is C:
return x
raise Exception
a = self.RPythonAnnotator()
s = a.build_types(f, [D])
assert s == annmodel.SomeImpossibleValue()
def test_is_constant_instance(self):
class A(object):
pass
prebuilt_instance = A()
def f(x):
if x is prebuilt_instance:
return x
raise Exception
a = self.RPythonAnnotator()
s = a.build_types(f, [A])
assert s.is_constant()
assert s.const is prebuilt_instance
def test_call_memoized_function(self):
fr1 = Freezing()
fr2 = Freezing()
def getorbuild(key):
a = 1
if key is fr1:
result = eval("a+2")
else:
result = eval("a+6")
return result
getorbuild._annspecialcase_ = "specialize:memo"
def f1(i):
if i > 0:
fr = fr1
else:
fr = fr2
return getorbuild(fr)
a = self.RPythonAnnotator()
s = a.build_types(f1, [int])
assert s.knowntype == int
def test_call_memoized_function_with_bools(self):
fr1 = Freezing()
fr2 = Freezing()
def getorbuild(key, flag1, flag2):
a = 1
if key is fr1:
result = eval("a+2")
else:
result = eval("a+6")
if flag1:
result += 100
if flag2:
result += 1000
return result
getorbuild._annspecialcase_ = "specialize:memo"
def f1(i):
if i > 0:
fr = fr1
else:
fr = fr2
return getorbuild(fr, i % 2 == 0, i % 3 == 0)
a = self.RPythonAnnotator()
s = a.build_types(f1, [int])
assert s.knowntype == int
def test_stored_bound_method(self):
# issue 129
class H:
def h(self):
return 42
class C:
def __init__(self, func):
self.f = func
def do(self):
return self.f()
def g():
h = H()
c = C(h.h)
return c.do()
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.is_constant()
assert s.const == 42
def test_stored_bound_method_2(self):
# issue 129
class H:
pass
class H1(H):
def h(self):
return 42
class H2(H):
def h(self):
return 17
class C:
def __init__(self, func):
self.f = func
def do(self):
return self.f()
def g(flag):
if flag:
h = H1()
else:
h = H2()
c = C(h.h)
return c.do()
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert s.knowntype == int
assert not s.is_constant()
def test_getorbuild_as_attr(self):
from rpython.rlib.cache import Cache
class SpaceCache(Cache):
def _build(self, callable):
return callable()
class CacheX(Cache):
def _build(self, key):
return key.x
class CacheY(Cache):
def _build(self, key):
return key.y
class X:
def __init__(self, x):
self.x = x
def _freeze_(self):
return True
class Y:
def __init__(self, y):
self.y = y
def _freeze_(self):
return True
X1 = X(1)
Y2 = Y("hello")
fromcache = SpaceCache().getorbuild
def f():
return (fromcache(CacheX).getorbuild(X1),
fromcache(CacheY).getorbuild(Y2))
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.items[0].knowntype == int
assert s.items[1].knowntype == str
def test_constant_bound_method(self):
class C:
def __init__(self, value):
self.value = value
def meth(self):
return self.value
meth = C(1).meth
def f():
return meth()
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.knowntype == int
def test_annotate__del__(self):
class A(object):
def __init__(self):
self.a = 2
def __del__(self):
self.a = 1
def f():
return A().a
a = self.RPythonAnnotator()
t = a.translator
s = a.build_types(f, [])
assert s.knowntype == int
graph = tgraphof(t, A.__del__.im_func)
assert graph.startblock in a.annotated
def test_annotate__del__baseclass(self):
class A(object):
def __init__(self):
self.a = 2
def __del__(self):
self.a = 1
class B(A):
def __init__(self):
self.a = 3
def f():
return B().a
| |
<gh_stars>0
import os
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal
import resqpy.derived_model as rqdm
import resqpy.fault as rqf
import resqpy.grid as grr
import resqpy.lines as rql
import resqpy.model as rq
import resqpy.olio.uuid as bu
import resqpy.olio.xml_et as rqet
import resqpy.property as rqp
# yapf: disable
@pytest.mark.parametrize('inc_list,tmult_dict,expected_mult',
[(['fault_1.inc'], {}, {'fault_1': 1}),
(['fault_1.inc'], {'fault_1': 2}, {'fault_1': 2}),
(['fault_1.inc', 'fault_2.inc'], {'fault_1': 2}, {'fault_1': 2, 'fault_2': 2})])
# yapf: enable
def test_add_connection_set_and_tmults(example_model_with_properties, test_data_path, inc_list, tmult_dict,
expected_mult):
model = example_model_with_properties
inc_list = [os.path.join(test_data_path, inc) for inc in inc_list]
gcs_uuid = rqf.add_connection_set_and_tmults(model, inc_list, tmult_dict)
assert gcs_uuid is not None, 'Grid connection set not generated'
reload_model = rq.Model(epc_file = model.epc_file)
faults = reload_model.parts_list_of_type('obj_FaultInterpretation')
assert len(faults) == len(expected_mult.keys()), \
f'Expected a {len(expected_mult.keys())} faults, found {len(faults)}'
for fault in faults:
metadata = rqet.load_metadata_from_xml(reload_model.root_for_part(fault))
title = reload_model.citation_title_for_part(fault)
expected_str = str(float(expected_mult[title]))
assert metadata["Transmissibility multiplier"] == expected_str, \
f'Expected mult for fault {title} to be {expected_str}, found {metadata["Transmissibility multiplier"]}'
# check that a transmissibility multiplier property has been created
gcs = rqf.GridConnectionSet(reload_model, uuid = gcs_uuid, find_properties = True)
assert gcs is not None
pc = gcs.property_collection
assert pc is not None and pc.number_of_parts() > 0
part = pc.singleton(property_kind = 'transmissibility multiplier')
assert part is not None
# check property values are in expected set
a = pc.cached_part_array_ref(part)
assert a is not None and a.ndim == 1
expect = [x for x in expected_mult.values()]
assert all([v in expect for v in a])
# see if a local property kind has been set up correctly
pku = pc.local_property_kind_uuid(part)
assert pku is not None
pk = rqp.PropertyKind(reload_model, uuid = pku)
assert pk is not None
assert pk.title == 'transmissibility multiplier'
def test_gcs_property_inheritance(tmp_path):
epc = os.path.join(tmp_path, 'gcs_prop_inherit.epc')
model = rq.Model(epc, new_epc = True, create_basics = True, create_hdf5_ext = True)
# create a grid
g = grr.RegularGrid(model, extent_kji = (5, 3, 3), dxyz = (10.0, 10.0, 1.0))
g.write_hdf5()
g.create_xml(title = 'unsplit grid')
# define an L shaped (in plan view) fault
j_faces = np.zeros((g.nk, g.nj - 1, g.ni), dtype = bool)
j_faces[:, 0, 1:] = True
i_faces = np.zeros((g.nk, g.nj, g.ni - 1), dtype = bool)
i_faces[:, 1:, 0] = True
gcs = rqf.GridConnectionSet(model,
grid = g,
j_faces = j_faces,
i_faces = i_faces,
feature_name = 'L fault',
create_organizing_objects_where_needed = True,
create_transmissibility_multiplier_property = False)
# check that connection set has the right number of cell face pairs
assert gcs.count == g.nk * ((g.nj - 1) + (g.ni - 1))
# create a transmissibility multiplier property
tm = np.arange(gcs.count).astype(float)
if gcs.property_collection is None:
gcs.property_collection = rqp.PropertyCollection()
gcs.property_collection.set_support(support = gcs)
pc = gcs.property_collection
pc.add_cached_array_to_imported_list(
tm,
'unit test',
'TMULT',
uom = 'Euc', # actually a ratio of transmissibilities
property_kind = 'transmissibility multiplier',
local_property_kind_uuid = None,
realization = None,
indexable_element = 'faces')
# write gcs which should also write property collection and create a local property kind
gcs.write_hdf5()
gcs.create_xml(write_new_properties = True)
# check that a local property kind has materialised
pk_uuid = model.uuid(obj_type = 'PropertyKind', title = 'transmissibility multiplier')
assert pk_uuid is not None
# create a derived grid connection set using a layer range
thin_gcs, thin_indices = gcs.filtered_by_layer_range(min_k0 = 1, max_k0 = 3, return_indices = True)
assert thin_gcs is not None and thin_indices is not None
assert thin_gcs.count == 3 * ((g.nj - 1) + (g.ni - 1))
# inherit the transmissibility multiplier property
thin_gcs.inherit_properties_for_selected_indices(gcs, thin_indices)
thin_gcs.write_hdf5()
thin_gcs.create_xml() # by default will include write of new properties
# check that the inheritance has worked
assert thin_gcs.property_collection is not None and thin_gcs.property_collection.number_of_parts() > 0
thin_pc = thin_gcs.property_collection
tm_part = thin_pc.singleton(property_kind = 'transmissibility multiplier')
assert tm_part is not None
thin_tm = thin_pc.cached_part_array_ref(tm_part)
assert thin_tm is not None and thin_tm.ndim == 1
assert thin_tm.size == thin_gcs.count
assert_array_almost_equal(thin_tm, tm[thin_indices])
# check that get_combined...() method can execute using property collection
b_a, i_a, f_a = gcs.get_combined_fault_mask_index_value_arrays(min_k = 1,
max_k = 3,
property_name = 'Transmissibility multiplier',
ref_k = 2)
assert b_a is not None and i_a is not None and f_a is not None
# check that transmissibility multiplier values have been sampled correctly from property array
assert f_a.shape == (g.nj, g.ni, 2, 2)
assert np.count_nonzero(np.isnan(f_a)) == 4 * g.nj * g.ni - 2 * ((g.nj - 1) + (g.ni - 1))
assert np.nanmax(f_a) > np.nanmin(f_a)
restore = np.seterr(all = 'ignore')
assert np.all(np.logical_or(np.isnan(f_a), f_a >= np.nanmin(thin_tm)))
assert np.all(np.logical_or(np.isnan(f_a), f_a <= np.nanmax(thin_tm)))
np.seterr(**restore)
def test_pinchout_and_k_gap_gcs(tmp_path):
epc = os.path.join(tmp_path, 'gcs_pinchout_k_gap.epc')
model = rq.new_model(epc)
# create a grid
g = grr.RegularGrid(model, extent_kji = (5, 5, 5), dxyz = (100.0, 100.0, 10.0), as_irregular_grid = True)
# patch points to generate a pinchout
p = g.points_cached
assert p.shape == (6, 6, 6, 3)
p[2, :3, :3] = p[1, :3, :3]
# convert one layer to a K gap with pinchout
p[4, 3:, 3:] = p[3, 3:, 3:]
g.nk -= 1
g.extent_kji = np.array((g.nk, g.nj, g.ni), dtype = int)
g.k_gaps = 1
g.k_gap_after_array = np.zeros(g.nk - 1, dtype = bool)
g.k_gap_after_array[2] = True
g._set_k_raw_index_array()
g.write_hdf5()
g.create_xml(title = 'pinchout k gap grid')
model.store_epc()
# reload the grid
model = rq.Model(epc)
grid = model.grid()
assert grid is not None
assert grid.k_gaps == 1
assert tuple(grid.extent_kji) == (4, 5, 5)
# create a pinchout connection set
po_gcs = rqf.pinchout_connection_set(grid)
assert po_gcs is not None
po_gcs.write_hdf5()
po_gcs.create_xml()
po_uuid = po_gcs.uuid
# create a K gap connection set
kg_gcs = rqf.k_gap_connection_set(grid)
assert kg_gcs is not None
kg_gcs.write_hdf5()
kg_gcs.create_xml()
kg_uuid = kg_gcs.uuid
model.store_epc()
# re-open the model and load the connection sets
model = rq.Model(epc)
po_gcs = rqf.GridConnectionSet(model, uuid = po_uuid)
assert po_gcs is not None
po_gcs.cache_arrays()
kg_gcs = rqf.GridConnectionSet(model, uuid = kg_uuid)
assert kg_gcs is not None
kg_gcs.cache_arrays()
# check face pairs in the pinchout connection set
assert po_gcs.count == 4
assert po_gcs.cell_index_pairs.shape == (4, 2)
assert po_gcs.face_index_pairs.shape == (4, 2)
assert np.all(po_gcs.cell_index_pairs[:, 0] != po_gcs.cell_index_pairs[:, 1])
assert np.all(po_gcs.face_index_pairs[:, 0] != po_gcs.cell_index_pairs[:, 1])
assert np.all(np.logical_or(po_gcs.face_index_pairs == 0, po_gcs.face_index_pairs == 1))
for cell in po_gcs.cell_index_pairs.flatten():
assert cell in [0, 1, 5, 6, 50, 51, 55, 56]
assert np.all(np.abs(po_gcs.cell_index_pairs[:, 1] - po_gcs.cell_index_pairs[:, 0]) == 50)
# check face pairs in K gap connection set
assert kg_gcs.count == 4
assert kg_gcs.cell_index_pairs.shape == (4, 2)
assert kg_gcs.face_index_pairs.shape == (4, 2)
assert np.all(kg_gcs.cell_index_pairs[:, 0] != kg_gcs.cell_index_pairs[:, 1])
assert np.all(kg_gcs.face_index_pairs[:, 0] != kg_gcs.cell_index_pairs[:, 1])
assert np.all(np.logical_or(kg_gcs.face_index_pairs == 0, kg_gcs.face_index_pairs == 1))
for cell in kg_gcs.cell_index_pairs.flatten():
assert cell in [74, 73, 69, 68, 99, 98, 94, 93]
assert np.all(np.abs(kg_gcs.cell_index_pairs[:, 1] - kg_gcs.cell_index_pairs[:, 0]) == 25)
# test compact indices method
ci = po_gcs.compact_indices()
assert ci.shape == (4, 2)
for cf in ci.flatten():
assert cf in [1, 7, 31, 37, 300, 306, 330, 336]
assert np.all(np.abs(ci[:, 1] - ci[:, 0]) == 299)
# test write simulator method
files = ('fault_ff.dat', 'fault_tf.dat', 'fault_ft.dat')
both_sides = (False, True, False)
minus = (False, False, True)
for filename, inc_both_sides, use_minus in zip(files, both_sides, minus):
dat_path = os.path.join(tmp_path, filename)
po_gcs.write_simulator(dat_path, include_both_sides = inc_both_sides, use_minus = use_minus)
assert os.path.exists(dat_path)
def test_two_fault_gcs(tmp_path):
epc = make_epc_with_gcs(tmp_path)
# re-open the model and check the gcs
model = rq.Model(epc)
gcs_uuid = model.uuid(obj_type = 'GridConnectionSetRepresentation')
assert gcs_uuid is not None
gcs = rqf.GridConnectionSet(model, uuid = gcs_uuid)
assert gcs is not None
assert gcs.number_of_features() == 2
feature_names = gcs.list_of_feature_names()
assert len(feature_names) == 2
assert 'F1' in feature_names and 'F2' in feature_names
fault_names = gcs.list_of_fault_names()
assert fault_names == feature_names
for fi in (0, 1):
assert gcs.feature_name_for_feature_index(fi) in ('F1', 'F2')
assert gcs.feature_name_for_feature_index(0) != gcs.feature_name_for_feature_index(1)
fi, f_uuid = gcs.feature_index_and_uuid_for_fault_name('F1')
assert fi is not None and fi in (0, 1)
assert f_uuid is not None
assert gcs.fault_name_for_feature_index(fi) == 'F1'
assert gcs.feature_index_for_cell_face((1, 1, 1), 0, 1) is None
fi_a = gcs.feature_index_for_cell_face((1, 1, 1), 2, 1)
assert fi_a in (0, 1)
fi_b = gcs.feature_index_for_cell_face((1, 1, 1), 1, 1)
assert fi_b in (0, 1)
assert fi_a != fi_b
gcs.rework_face_pairs()
def test_feature_inheritance(tmp_path):
epc = make_epc_with_gcs(tmp_path)
# introduce a split version of the grid
model = rq.Model(epc)
simple_gcs_uuid = model.uuid(obj_type = 'GridConnectionSetRepresentation')
assert simple_gcs_uuid is not None
crs_uuid = model.uuid(obj_type = 'LocalDepth3dCrs')
assert crs_uuid is not None
line_a = rql.Polyline(model,
set_bool = False,
set_crs = crs_uuid,
title = 'line a',
set_coord = | |
<filename>pldm_bej_encoder_decoder.py
"""
PLDM BEJ Encoder/Decoder
File : pldm_bej_encoder_decoder.py
Brief : This file allows encoding a JSON file to PLDM Binary encoded JSON (BEJ) and
decoding a PLDM BEJ file back into JSON.
"""
import argparse
import json
import io
import sys
import os
import re
import string
BEJ_FORMAT_SET = 0x00
BEJ_FORMAT_ARRAY = 0x01
BEJ_FORMAT_NULL = 0x02
BEJ_FORMAT_INTEGER = 0x03
BEJ_FORMAT_ENUM = 0x04
BEJ_FORMAT_STRING = 0x05
BEJ_FORMAT_REAL = 0x06
BEJ_FORMAT_BOOLEAN = 0x07
BEJ_FORMAT_BYTE_STRING = 0x08
BEJ_FORMAT_CHOICE = 0x09
BEJ_FORMAT_PROPERTY_ANNOTATION = 0x0A
BEJ_FORMAT_RESOURCE_LINK = 0x0E
BEJ_FORMAT_RESOURCE_LINK_EXPANSION = 0x0F
BEJ_FORMAT_UNKNOWN = 0xFF
BEJ_DICTIONARY_SELECTOR_MAJOR_SCHEMA = 0x00
BEJ_DICTIONARY_SELECTOR_ANNOTATION = 0x01
NUM_BYTES_FOR_INTEGER = 8
VALID_ASCII_PRINT_CHARS = string.ascii_letters + string.hexdigits + string.punctuation
def print_hex(byte_buf, max_size=None, add_line_number=True, show_ascii=True):
"""
Prints a byte array as hex dump
Args:
byte_buf: byte array to be printed as a hex dump
max_size: Number of bytes to print, None indicates to print all bytes
add_line_number: Set to True to show line numbers
show_ascii: Set to True to print ASCII
"""
ascii_print = ''
limit_size = True if max_size else False
for ii, byte in enumerate(byte_buf):
if limit_size and ii >= max_size:
break
mod = ii % 16
next_mod = (ii + 1) % 16
if add_line_number and mod == 0:
print(format(ii, '#08X')+': ', end="")
print(format(byte, '02X'), end=" ")
byte_char = format(byte, 'c')
if show_ascii:
ascii_print += (byte_char if byte_char in VALID_ASCII_PRINT_CHARS else '.')
if next_mod == 0:
# Print the ascii line
if show_ascii:
print(ascii_print, end="")
ascii_print = ''
print('')
# Add a newline to seperate
print('')
def twos_complement(value, nbits):
return (value + (1 << nbits)) % (1 << nbits)
def find_num_bytes_and_msb(value):
if value == 0:
return 1, 0x00
if value == -1:
return 1, 0xff
# use a big endian byte array (MSB is at index 0) as it is easier to eliminate the padding
value_byte_array = twos_complement(value, 64).to_bytes(NUM_BYTES_FOR_INTEGER, 'big')
for index, val in enumerate(value_byte_array):
if (value > 0 and val != 0x00) or (value < -1 and val != 0xff):
return NUM_BYTES_FOR_INTEGER - index, val
def num_bytes_for_unsigned_integer(value):
num_bytes = 1 if value == 0 else 0
while value != 0:
value >>= 8
num_bytes = num_bytes + 1
return num_bytes
def bej_pack_nnint(stream, value, num_bytes):
"""
The nnint type captures the BEJ encoding of Non-Negative Integers via the following encoding:
The first byte shall consist of metadata for the number of bytes needed to encode the numeric
value in the remaining bytes. Subsequent bytes shall contain the encoded value in
little-endian format. As examples, the value 65 shall be encoded as 0x01 0x41; the value 130
shall be encoded as 0x01 0x82; and the value 1337 shall be encoded as 0x02 0x39 0x05.
Args:
stream:
value:
num_bytes: indicates number of bytes (length) to use to represent the value, if 0 is specified, the most
optimal size is used
Return: -1 if error or no bytes written, >= 0 indicates number of bytes packed
"""
num_bytes_for_value = num_bytes_for_unsigned_integer(value)
if num_bytes and (num_bytes < num_bytes_for_value):
return -1
if num_bytes:
num_bytes_for_value = num_bytes
num_bytes_packed = stream.write(num_bytes_for_value.to_bytes(1, 'little'))
num_bytes_packed += stream.write(value.to_bytes(num_bytes_for_value, 'little'))
return num_bytes_packed
def bej_unpack_nnint(stream):
# read num bytes
num_bytes = int.from_bytes(stream.read(1), 'little')
return int.from_bytes(stream.read(num_bytes), 'little')
def bej_pack_sfl(stream, seq_num, format, length):
# pack seq num as nnint
num_bytes = bej_pack_nnint(stream, seq_num, 0)
# pack format
format = format << 4
num_bytes += stream.write(format.to_bytes(1, 'little'))
# pack length as nnint
num_bytes += bej_pack_nnint(stream, length, 0)
return num_bytes
def bej_unpack_sfl(stream):
# unpack seq
seq = bej_unpack_nnint(stream)
# unpack format
format = int.from_bytes(stream.read(1), 'little') >> 4
# unpack length
length = bej_unpack_nnint(stream)
return seq, format, length
def bej_pack_sflv_string(stream, seq_num, str):
num_bytes_packed = bej_pack_sfl(stream, seq_num, BEJ_FORMAT_STRING, len(str) + 1)
# pack str
null = 0
num_bytes_packed += stream.write(str.encode())
num_bytes_packed += stream.write(null.to_bytes(1, 'little')) # null termination
return num_bytes_packed
def bej_decode_sequence_number(seq):
"""
Returns the sequence number and the dictionary selector
"""
return seq >> 1, seq & 0x01
def bej_unpack_sflv_string(stream):
seq, format, length = bej_unpack_sfl(stream)
val = stream.read(length).decode()
# the last byte in a string decode is the null terminator, remove that and return
return bej_decode_sequence_number(seq), val[:length-1]
def bej_pack_sflv_boolean(stream, seq_num, val):
num_bytes_packed = bej_pack_sfl(stream, seq_num, BEJ_FORMAT_BOOLEAN, 1)
# pack val
if val == True:
num_bytes_packed += stream.write(0x01.to_bytes(1, 'little'))
else:
num_bytes_packed += stream.write(0x00.to_bytes(1, 'little'))
return num_bytes_packed
def bej_unpack_sflv_boolean(stream):
seq, format, length = bej_unpack_sfl(stream)
val = stream.read(length)
bool_val = 'false'
if val[0] == 0x01:
bool_val = 'true'
# the last byte in a string decode is the null terminator, remove that and return
return bej_decode_sequence_number(seq), bool_val
def bej_pack_sflv_integer(stream, seq_num, value):
num_bytes_for_value, msb = find_num_bytes_and_msb(value)
# determine if padding is required to guarantee 2's complement
is_padding_required = False
if value > 0 and (msb & 0x80):
# add one more byte to the msb to guarantee highest MSb is zero
is_padding_required = True
num_bytes_packed = bej_pack_sfl(stream, seq_num, BEJ_FORMAT_INTEGER,
num_bytes_for_value+1 if is_padding_required else num_bytes_for_value)
# pack the value
num_bytes_packed += stream.write(twos_complement(value, 64).to_bytes(8, 'little')[:num_bytes_for_value])
# add padding if needed
if is_padding_required:
pad = 0
num_bytes_packed += stream.write(pad.to_bytes(1, 'little'))
return num_bytes_packed
def bej_unpack_sflv_integer(stream):
seq, format, length = bej_unpack_sfl(stream)
int_array = stream.read(length)
return bej_decode_sequence_number(seq), int.from_bytes(int_array, 'little', signed=True)
def bej_pack_sflv_enum(stream, seq_num, value):
num_bytes_packed = bej_pack_sfl(stream, seq_num, BEJ_FORMAT_ENUM, 1)
num_bytes_packed += bej_pack_nnint(stream, value, 0)
return num_bytes_packed
def bej_unpack_sflv_enum(stream):
seq, format, length = bej_unpack_sfl(stream)
value = bej_unpack_nnint(stream)
return bej_decode_sequence_number(seq), value
def bej_pack_sflv_resource_link(stream, seq_num, pdr):
num_bytes_packed = bej_pack_sfl(stream, seq_num, BEJ_FORMAT_RESOURCE_LINK, num_bytes_for_unsigned_integer(pdr)+1)
num_bytes_packed += bej_pack_nnint(stream, pdr, 0)
return num_bytes_packed
def bej_unpack_sflv_resource_link(stream):
seq, format, length = bej_unpack_sfl(stream)
value = bej_unpack_nnint(stream)
return bej_decode_sequence_number(seq), value
# Globals for bej set - Warning! not thread safe
bej_set_stream_stack = []
def bej_pack_set_start(stream, count):
bej_set_stream_stack.append(stream)
# construct a new stream to start adding set data and pack the count
tmp_stream = io.BytesIO()
bej_pack_nnint(tmp_stream, count, 0)
return tmp_stream
def bej_pack_set_done(stream, seq_num):
# pop the last stream from the stack and add the s, f and l. Length can now be determined from the current stream
length = len(stream.getvalue())
prev_stream = bej_set_stream_stack.pop()
num_bytes_packed = bej_pack_sfl(prev_stream, seq_num, BEJ_FORMAT_SET, length)
# append the current stream to the prev and return prev
prev_stream.write(stream.getvalue())
return num_bytes_packed + len(stream.getvalue())
def bej_pack_array_start(stream, count):
bej_set_stream_stack.append(stream)
# construct a new stream to start adding array data and pack the count
tmp_stream = io.BytesIO()
bej_pack_nnint(tmp_stream, count, 0)
return tmp_stream
def bej_pack_array_done(stream, seq_num):
# pop the last stream from the stack and add the s, f and l. Length can now be determined from the current stream
length = len(stream.getvalue())
prev_stream = bej_set_stream_stack.pop()
num_bytes_packed = bej_pack_sfl(prev_stream, seq_num, BEJ_FORMAT_ARRAY, length)
# append the current stream to the prev and return prev
prev_stream.write(stream.getvalue())
return num_bytes_packed + len(stream.getvalue())
def bej_pack_property_annotation_start(stream):
bej_set_stream_stack.append(stream)
# construct a new stream to start adding annotation data
tmp_stream = io.BytesIO()
return tmp_stream
def bej_pack_property_annotation_done(stream, annotation_seq):
# pop the last stream from the stack and add the s, f and l. Length can now be determined from the current stream
length = len(stream.getvalue())
prev_stream = bej_set_stream_stack.pop()
num_bytes_packed = bej_pack_sfl(prev_stream, annotation_seq, BEJ_FORMAT_PROPERTY_ANNOTATION, length)
# append the current stream to the prev and return prev
prev_stream.write(stream.getvalue())
return num_bytes_packed + len(stream.getvalue())
def bej_unpack_set_start(stream):
'''
:param stream:
:return: sequence_num, count
'''
# move the stream to point to the first element in the set
seq, format, length = bej_unpack_sfl(stream)
# unpack the count
count = bej_unpack_nnint(stream)
return bej_decode_sequence_number(seq), count
def bej_unpack_array_start(stream):
'''
:param stream:
:return: sequence_num, count
'''
# move the stream to point to the first element in the array
seq, format, length = bej_unpack_sfl(stream)
# unpack the count
count = bej_unpack_nnint(stream)
return bej_decode_sequence_number(seq), count
def bej_unpack_property_annotation_start(stream):
'''
:param stream:
:return:
'''
# move the stream to point to the first element in the set
seq, format, length = bej_unpack_sfl(stream)
prop_seq, selector = bej_decode_sequence_number(seq)
annot_seq, selector = bej_decode_sequence_number(bej_sequenceof(stream))
return annot_seq, prop_seq
pass
def bej_unpack_array_done():
pass
def bej_unpack_property_annotation_done():
pass
DICTIONARY_ENTRY_FORMAT = 0
DICTIONARY_ENTRY_SEQUENCE_NUMBER = 1
DICTIONARY_ENTRY_OFFSET = 2
DICTIONARY_ENTRY_CHILD_COUNT = 3
DICTIONARY_ENTRY_NAME = 4
class DictionaryByteArrayStream:
def __init__(self, byte_array, offset=0, child_count=-1):
self._byte_array = byte_array
self._current_index = offset
self._child_count = child_count
self._current_entry = 0
if self._current_index == 0:
# skip thru the header
self.get_int(1) # VersionTag
self.get_int(1) # DictionaryFlags
self.get_int(4) # SchemaVersion
self._total_entries = self.get_int(2) # EntryCount
self.get_int(4) # DictionarySize
self._child_count = 1
def get_offset(self):
return self._current_index
def get_child_count(self):
return self._child_count
def get_int(self, size):
value = int.from_bytes(self._byte_array[self._current_index:self._current_index+size], 'little')
self._current_index += size
return value
def has_entry(self):
return self._current_entry < self._child_count
def get_next_entry(self):
entry = []
current_entry = 0
if self._current_entry < self._child_count or self._child_count == | |
from contextlib import ExitStack
from queue import Queue
from tempfile import gettempdir
from threading import Event
from unittest import TestCase
from unittest.mock import MagicMock, call, patch
from packaging.version import Version
from path import Path
from dakara_player.media_player.base import (
InvalidStateError,
MediaPlayerNotAvailableError,
VersionNotFoundError,
)
from dakara_player.media_player.mpv import (
MediaPlayerMpv,
MediaPlayerMpvOld,
MediaPlayerMpvPost0330,
)
class MediaPlayerMpvTestCase(TestCase):
"""Test the static methods of the abstract MediaPlayerMpv class."""
@patch("dakara_player.media_player.mpv.mpv.MPV")
def test_get_version_postrelease(self, mocked_mpv_class):
"""Test to get the mpv post release version."""
# mock the version of mpv
mocked_mpv_class.return_value.mpv_version = (
"mpv 0.32.0+git.20200402T120653.5824ac7d36"
)
# call the method
version = MediaPlayerMpvOld.get_version()
# assert the result
self.assertEqual(version.base_version, "0.32.0")
self.assertTrue(version.is_postrelease)
@patch("dakara_player.media_player.mpv.mpv.MPV")
def test_get_version(self, mocked_mpv_class):
"""Test to get the mpv stable version."""
# mock the version of mpv
mocked_mpv_class.return_value.mpv_version = "mpv 0.32.0"
# call the method
version = MediaPlayerMpvOld.get_version()
# assert the result
self.assertEqual(version.base_version, "0.32.0")
self.assertFalse(version.is_postrelease)
@patch("dakara_player.media_player.mpv.mpv.MPV")
def test_get_version_not_found(self, mocked_mpv_class):
"""Test to get the mpv version when it is not available."""
# mock the version of mpv
mocked_mpv_class.return_value.mpv_version = "none"
# call the method
with self.assertRaisesRegex(VersionNotFoundError, "Unable to get mpv version"):
MediaPlayerMpvOld.get_version()
@patch.object(MediaPlayerMpv, "get_version")
def test_get_old(self, mocked_get_version):
"""Test to get media player for old version of mpv."""
mocked_get_version.return_value = Version("0.27.0")
self.assertIs(MediaPlayerMpv.get_class_from_version(), MediaPlayerMpvOld)
@patch.object(MediaPlayerMpv, "get_version")
def test_get_post_0330(self, mocked_get_version):
"""Test to get media player for version of mpv newer than 0.33.0."""
mocked_get_version.return_value = Version("0.33.0")
self.assertIs(MediaPlayerMpv.get_class_from_version(), MediaPlayerMpvPost0330)
@patch.object(MediaPlayerMpv, "get_class_from_version")
def test_instanciate(self, mocked_get_class_from_version):
"""Test to instanciate media player mpv class."""
class Dummy:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
mocked_get_class_from_version.return_value = Dummy
instance = MediaPlayerMpv.from_version(1, 2, v3=3, v4=4)
self.assertIsInstance(instance, Dummy)
self.assertEqual(instance.args, (1, 2))
self.assertEqual(instance.kwargs, {"v3": 3, "v4": 4})
@patch("dakara_player.media_player.mpv.mpv.MPV")
def test_is_available_ok_direct(self, mocked_mpv_class):
"""Test to get availability directly."""
self.assertTrue(MediaPlayerMpv.is_available())
@patch("dakara_player.media_player.mpv.mpv.MPV")
def test_is_available_ok_indirect(self, mocked_mpv_class):
"""Test to get availability indirectly."""
mocked_mpv_class.side_effect = [FileNotFoundError(), MagicMock()]
self.assertTrue(MediaPlayerMpv.is_available())
@patch("dakara_player.media_player.mpv.mpv", None)
def test_is_available_ng_no_module(self):
"""Test to get inavailability if mpv module cannot be loaded."""
self.assertFalse(MediaPlayerMpv.is_available())
@patch("dakara_player.media_player.mpv.mpv.MPV")
def test_is_available_ng(self, mocked_mpv_class):
"""Test to get inavailability."""
mocked_mpv_class.side_effect = FileNotFoundError()
self.assertFalse(MediaPlayerMpv.is_available())
class MediaPlayerMpvModelTestCase(TestCase):
"""Test the mpv player class unitary."""
mpv_player_class = None
def setUp(self):
# create playlist entry ID
self.id = 42
# create playlist entry file path
self.song_file_path = Path("file")
self.subtitle_file_path = Path("file_sub")
# create plàylist entry
self.playlist_entry = {
"id": self.id,
"song": {"title": "Song title", "file_path": self.song_file_path},
"owner": "me",
}
def get_instance(
self,
config=None,
):
"""Get a heavily mocked instance of the desired subclass of MediaPlayerMpv.
Args:
config (dict): Configuration passed to the constructor.
Returns:
tuple: Contains the following elements:
MediaPlayerMpv: Instance;
tuple: Contains the mocked objects:
unittest.mock.MagicMock: MPV object.
unittest.mock.MagicMock: BackgroundLoader object.
unittest.mock.MagicMock: TextGenerator object.
tuple: Contains the mocked classes:
unittest.mock.MagicMock: MPV class.
unittest.mock.MagicMock: BackgroundLoader class.
unittest.mock.MagicMock: TextGenerator class.
"""
config = config or {"kara_folder": gettempdir()}
with ExitStack() as stack:
mocked_instance_class = stack.enter_context(
patch("dakara_player.media_player.mpv.mpv.MPV")
)
mocked_background_loader_class = stack.enter_context(
patch("dakara_player.media_player.base.BackgroundLoader")
)
mocked_text_generator_class = stack.enter_context(
patch("dakara_player.media_player.base.TextGenerator")
)
return (
self.mpv_player_class(Event(), Queue(), config, Path("temp")),
(
mocked_instance_class.return_value,
mocked_background_loader_class.return_value,
mocked_text_generator_class.return_value,
),
(
mocked_instance_class,
mocked_background_loader_class,
mocked_text_generator_class,
),
)
def set_playlist_entry(self, mpv_player, started=True):
"""Set a playlist entry and make the player play it.
Args:
mpv_player (MediaPlayerMpv): Instance of the mpv player.
started (bool): If True, make the player play the song.
"""
mpv_player.playlist_entry = self.playlist_entry
# create mocked transition
mpv_player.playlist_entry_data["transition"].path = (
Path(gettempdir()) / "transition.png"
)
# create mocked song
mpv_player.playlist_entry_data["song"].path = (
mpv_player.kara_folder_path / self.song_file_path
)
mpv_player.playlist_entry_data["song"].path_subtitle = (
mpv_player.kara_folder_path / self.subtitle_file_path
)
# set media has started
if started:
mpv_player.player.path = mpv_player.playlist_entry_data["song"].path
mpv_player.player.sub_files = [
mpv_player.playlist_entry_data["song"].path_subtitle
]
mpv_player.player.playlist = [
{
"id": 1,
"filename": mpv_player.player.path,
"current": True,
"playing": True,
}
]
mpv_player.player.pause = False
class MediaPlayerMpvOldTestCase(MediaPlayerMpvModelTestCase):
"""Test the old mpv player class unitary."""
mpv_player_class = MediaPlayerMpvOld
@patch.object(MediaPlayerMpvOld, "is_available")
def test_init_unavailable(self, mocked_is_available):
"""Test when mpv is not available."""
mocked_is_available.return_value = False
with self.assertRaisesRegex(
MediaPlayerNotAvailableError, "mpv is not available"
):
MediaPlayerMpvOld(Event(), Queue(), {}, Path("temp"))
@patch.object(MediaPlayerMpvOld, "is_playing_this")
def test_get_timing(self, mocked_is_playing_this):
"""Test to get timing."""
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mocked_player.time_pos = 42.42
mocked_is_playing_this.return_value = False
self.assertEqual(mpv_player.get_timing(), 42)
mocked_is_playing_this.assert_has_calls([call("idle"), call("transition")])
@patch.object(MediaPlayerMpvOld, "is_playing_this")
def test_get_timing_idle(self, mocked_is_playing_this):
"""Test to get timing when idle."""
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mocked_player.time_pos = 42.42
mocked_is_playing_this.side_effect = [True, False]
self.assertEqual(mpv_player.get_timing(), 0)
@patch.object(MediaPlayerMpvOld, "is_playing_this")
def test_get_timing_transition(self, mocked_is_playing_this):
"""Test to get timing when in transition."""
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mocked_player.time_pos = 42.42
mocked_is_playing_this.side_effect = [False, True]
self.assertEqual(mpv_player.get_timing(), 0)
@patch.object(MediaPlayerMpvOld, "get_version")
def test_load_player(self, mocked_get_version):
"""Test to load the instance."""
# create mock
mocked_get_version.return_value = "0.32.0"
# create instance
mpv_player, _, _ = self.get_instance()
# call the method
with self.assertLogs("dakara_player.media_player.mpv", "DEBUG") as logger:
mpv_player.load_player()
# assert the calls
mocked_get_version.assert_called_with()
# assert the logs
self.assertListEqual(
logger.output, ["INFO:dakara_player.media_player.mpv:mpv 0.32.0"]
)
@patch.object(MediaPlayerMpvOld, "clear_playlist_entry")
@patch.object(MediaPlayerMpvOld, "play")
def test_handle_end_file_transition(self, mocked_play, mocked_clear_playlist_entry):
"""Test end file callback for after a transition."""
# create instance
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mpv_player.set_callback("finished", MagicMock())
self.set_playlist_entry(mpv_player)
mocked_player.playlist[0]["filename"] = Path(gettempdir()) / "transition.png"
# call the method
with self.assertLogs("dakara_player.media_player.mpv", "DEBUG") as logger:
mpv_player.handle_end_file({"event": "end-file"})
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.media_player.mpv:File end callback called",
"DEBUG:dakara_player.media_player.mpv:Will play '{}'".format(
Path(gettempdir()) / self.song_file_path
),
],
)
# assert the call
mocked_play.assert_called_with("song")
mocked_clear_playlist_entry.assert_not_called()
mpv_player.callbacks["finished"].assert_not_called()
@patch.object(MediaPlayerMpvOld, "clear_playlist_entry")
@patch.object(MediaPlayerMpvOld, "play")
def test_handle_end_file_song(self, mocked_play, mocked_clear_playlist_entry):
"""Test end file callback for after a song."""
# create instance
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mpv_player.set_callback("finished", MagicMock())
self.set_playlist_entry(mpv_player)
# call the method
with self.assertLogs("dakara_player.media_player.mpv", "DEBUG") as logger:
mpv_player.handle_end_file({"event": "end-file"})
# assert effect on logs
self.assertListEqual(
logger.output,
["DEBUG:dakara_player.media_player.mpv:File end callback called"],
)
# assert the call
mocked_play.assert_not_called()
mocked_clear_playlist_entry.assert_called_with()
mpv_player.callbacks["finished"].assert_called_with(self.playlist_entry["id"])
@patch.object(MediaPlayerMpvOld, "clear_playlist_entry")
@patch.object(MediaPlayerMpvOld, "play")
def test_handle_end_file_other(self, mocked_play, mocked_clear_playlist_entry):
"""Test end file callback for unknown state."""
# create instance
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mpv_player.set_callback("finished", MagicMock())
self.set_playlist_entry(mpv_player)
mocked_player.playlist[0]["filename"] = Path(gettempdir()) / "other"
self.assertFalse(mpv_player.stop.is_set())
# call the method
with self.assertLogs("dakara_player.media_player.mpv", "DEBUG"):
mpv_player.handle_end_file({"event": "end-file"})
self.assertTrue(mpv_player.stop.is_set())
exception_class, exception, _ = mpv_player.errors.get()
self.assertIs(exception_class, InvalidStateError)
self.assertIn("End file on an undeterminated state", str(exception))
# assert the call
mocked_play.assert_not_called()
mocked_clear_playlist_entry.assert_not_called()
mpv_player.callbacks["finished"].assert_not_called()
@patch.object(MediaPlayerMpvOld, "skip")
def test_handle_log_message(self, mocked_skip):
"""Test log message callback."""
# create instance
mpv_player, _, _ = self.get_instance()
self.set_playlist_entry(mpv_player)
# mock the call
mpv_player.set_callback("error", MagicMock())
# call the method
with self.assertLogs(
"dakara_player.media_player.mpv", "DEBUG"
) as logger, self.assertLogs("mpv", "DEBUG") as logger_mpv:
mpv_player.handle_log_messages("fatal", "mpv.component", "error message")
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.media_player.mpv:Log message callback called",
"ERROR:dakara_player.media_player.mpv:Unable to play '{}'".format(
Path(gettempdir()) / self.song_file_path
),
],
)
self.assertListEqual(
logger_mpv.output, ["CRITICAL:mpv:mpv.component: error message"]
)
# assert the call
mpv_player.callbacks["error"].assert_called_with(
self.playlist_entry["id"], "Unable to play current song: error message"
)
mocked_skip.assert_called_with()
@patch.object(MediaPlayerMpvOld, "is_playing_this")
def test_handle_start_file_transition(self, mocked_is_playing_this):
"""Test start file callback for a transition."""
# create instance
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mpv_player.set_callback("started_transition", MagicMock())
mpv_player.set_callback("started_song", MagicMock())
self.set_playlist_entry(mpv_player)
# create mocks
mocked_is_playing_this.return_value = True
# call the method
with self.assertLogs("dakara_player.media_player.mpv", "DEBUG") as logger:
mpv_player.handle_start_file({})
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.media_player.mpv:Start file callback called",
"INFO:dakara_player.media_player.mpv:Playing transition for "
"'Song title'",
],
)
# assert the call
mpv_player.callbacks["started_transition"].assert_called_with(
self.playlist_entry["id"]
)
mpv_player.callbacks["started_song"].assert_not_called()
mocked_is_playing_this.assert_called_with("transition")
@patch.object(MediaPlayerMpvOld, "is_playing_this")
def test_handle_start_file_song(self, mocked_is_playing_this):
"""Test start file callback for a song."""
# create instance
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mpv_player.set_callback("started_transition", MagicMock())
mpv_player.set_callback("started_song", MagicMock())
self.set_playlist_entry(mpv_player)
# create mocks
mocked_is_playing_this.side_effect = [False, True]
# call the method
with self.assertLogs("dakara_player.media_player.mpv", "DEBUG") as logger:
mpv_player.handle_start_file({})
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.media_player.mpv:Start file callback called",
"INFO:dakara_player.media_player.mpv:Now playing 'Song title' "
"('{}')".format(Path(gettempdir()) / self.song_file_path),
],
)
# assert the call
mpv_player.callbacks["started_transition"].assert_not_called()
mpv_player.callbacks["started_song"].assert_called_with(
self.playlist_entry["id"]
)
# assert the call
mpv_player.callbacks["started_transition"].assert_not_called()
mpv_player.callbacks["started_song"].assert_called_with(
self.playlist_entry["id"]
)
mocked_is_playing_this.assert_has_calls([call("transition"), call("song")])
@patch.object(MediaPlayerMpvOld, "is_playing_this")
def test_handle_start_file_idle(self, mocked_is_playing_this):
"""Test start file callback for a idle."""
# create instance
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mpv_player.set_callback("started_transition", MagicMock())
mpv_player.set_callback("started_song", MagicMock())
self.set_playlist_entry(mpv_player)
# create mocks
mocked_is_playing_this.side_effect = [False, False, True]
# call the method
with self.assertLogs("dakara_player.media_player.mpv", "DEBUG") as logger:
mpv_player.handle_start_file({})
# assert effect on logs
self.assertListEqual(
logger.output,
[
"DEBUG:dakara_player.media_player.mpv:Start file callback called",
"DEBUG:dakara_player.media_player.mpv:Playing idle screen",
],
)
# assert the call
mpv_player.callbacks["started_transition"].assert_not_called()
mpv_player.callbacks["started_song"].assert_not_called()
mocked_is_playing_this.assert_has_calls(
[call("transition"), call("song"), call("idle")]
)
@patch.object(MediaPlayerMpvOld, "is_playing_this")
def test_handle_start_file_unknown(self, mocked_is_playing_this):
"""Test start file callback for an unknown state."""
# create instance
mpv_player, (mocked_player, _, _), _ = self.get_instance()
mpv_player.set_callback("started_transition", MagicMock())
mpv_player.set_callback("started_song", MagicMock())
self.set_playlist_entry(mpv_player)
# create mocks
mocked_is_playing_this.return_value = False
self.assertFalse(mpv_player.stop.is_set())
# call the method
mpv_player.handle_start_file({})
self.assertTrue(mpv_player.stop.is_set())
exception_class, exception, _ = mpv_player.errors.get()
self.assertIs(exception_class, InvalidStateError)
self.assertIn("Start file on an undeterminated state", str(exception))
# assert the | |
#!/usr/bin/env python
# The piwheels project
# Copyright (c) 2017 <NAME> <https://github.com/bennuttall>
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Defines the :class:`PiWheelsSlave` class. An instance of this is the
entry-point for the :program:`piw-slave` script.
.. autoclass:: PiWheelsSlave
:members:
.. autofunction:: duration
"""
import os
import sys
import signal
import logging
import socket
from datetime import datetime, timezone
from time import time, sleep
from random import randint
import dateutil.parser
from wheel import pep425tags
from .. import __version__, terminal, transport, protocols
from ..systemd import get_systemd
from .builder import PiWheelsBuilder, PiWheelsPackage
UTC = timezone.utc
class MasterTimeout(IOError):
"""
Exception raised when the master fails to respond before a timeout.
"""
class PiWheelsSlave:
"""
This is the main class for the :program:`piw-slave` script. It connects
(over 0MQ sockets) to a master (see :program:`piw-master`) then loops
around the slave protocol (see the :doc:`slaves` chapter). It retrieves
source packages directly from `PyPI`_, attempts to build a wheel in a
sandbox directory and, if successful, transmits the results to the master.
.. _PyPI: https://pypi.python.org/
"""
def __init__(self):
self.logger = logging.getLogger('slave')
self.label = socket.gethostname()
self.config = None
self.slave_id = None
self.builder = None
self.pypi_url = None
self.systemd = None
def __call__(self, args=None):
sys.excepthook = terminal.error_handler
parser = terminal.configure_parser("""
The piw-slave script is intended to be run on a standalone machine to build
packages on behalf of the piw-master script. It is intended to be run as an
unprivileged user with a clean home-directory. Any build dependencies you wish
to use must already be installed. The script will run until it is explicitly
terminated, either by Ctrl+C, SIGTERM, or by the remote piw-master script.
""")
parser.add_argument(
'--debug', action='store_true', help="Set logging to debug level")
parser.add_argument(
'-m', '--master', env_var='PIW_MASTER', metavar='HOST',
default='localhost',
help="The IP address or hostname of the master server "
"(default: %(default)s)")
parser.add_argument(
'-t', '--timeout', env_var='PIW_TIMEOUT', metavar='DURATION',
default='3h', type=duration,
help="The time to wait before assuming a build has failed "
"(default: %(default)s)")
self.config = parser.parse_args(args)
if self.config.debug:
self.config.log_level = logging.DEBUG
terminal.configure_logging(self.config.log_level,
self.config.log_file)
self.logger.info('PiWheels Slave version %s', __version__)
if os.geteuid() == 0:
self.logger.fatal('Slave must not be run as root')
return 1
if datetime.now(tz=UTC) < datetime(2019, 1, 1, tzinfo=UTC):
self.logger.fatal('System clock is far in the past')
return 1
self.systemd = get_systemd()
signal.signal(signal.SIGTERM, sig_term)
ctx = transport.Context()
queue = None
try:
while True:
queue = ctx.socket(
transport.REQ, protocol=reversed(protocols.slave_driver),
logger=self.logger)
queue.hwm = 10
queue.connect('tcp://{master}:5555'.format(
master=self.config.master))
self.systemd.ready()
try:
self.slave_id = None
self.main_loop(queue)
except MasterTimeout:
print('except MasterTimeout')
self.systemd.reloading()
self.logger.warning('Resetting connection')
queue.close(linger=1)
finally:
print('finally1')
if self.builder:
self.logger.warning('Discarding current build')
self.builder.clean()
self.builder = None
except SystemExit:
self.logger.warning('Shutting down on SIGTERM')
finally:
self.systemd.stopping()
queue.send_msg('BYE')
queue.close()
ctx.close()
# A general note about the design of the slave: the build slave is
# deliberately designed to be "brittle". In other words to fall over and
# die loudly in the event anything happens to go wrong (other than utterly
# expected failures like wheels occasionally failing to build and file
# transfers occasionally needing a retry). Hence all the apparently silly
# asserts littering the functions below.
# This is in stark constrast to the master which is expected to stay up and
# carry on running even if a build slave goes bat-shit crazy and starts
# sending nonsense (in which case it should calmly ignore it and/or attempt
# to kill said slave with a "BYE" message).
def main_loop(self, queue, timeout=300):
"""
The main messaging loop. Sends the initial request, and dispatches
replies via :meth:`handle_reply`. Implements a *timeout* for responses
from the master and raises :exc:`MasterTimeout` if *timeout* seconds
are exceeded.
"""
msg, data = 'HELLO', [
self.config.timeout,
pep425tags.get_impl_ver(), pep425tags.get_abi_tag(),
pep425tags.get_platform(), self.label
]
while True:
queue.send_msg(msg, data)
start = time()
while True:
self.systemd.watchdog_ping()
if queue.poll(1):
msg, data = queue.recv_msg()
msg, data = self.handle_reply(msg, data)
break
elif time() - start > timeout:
self.logger.warning('Timed out waiting for master')
raise MasterTimeout()
def handle_reply(self, msg, data):
"""
Dispatch a message from the master to an appropriate handler method.
"""
handler = {
'ACK': lambda: self.do_ack(*data),
'SLEEP': self.do_sleep,
'BUILD': lambda: self.do_build(*data),
'SEND': lambda: self.do_send(data),
'DONE': self.do_done,
'DIE': self.do_die,
}[msg]
return handler()
def do_ack(self, new_id, pypi_url):
"""
In response to our initial "HELLO" (detailing our various :pep:`425`
tags), the master is expected to send "ACK" back with an integer
identifier and the URL of the PyPI repository to download from. We use
the identifier in all future log messages for the ease of the
administrator.
We reply with "IDLE" to indicate we're ready to accept a build job.
"""
assert self.slave_id is None, 'Duplicate ACK'
self.slave_id = int(new_id)
self.pypi_url = pypi_url
self.logger = logging.getLogger('slave-%d' % self.slave_id)
self.logger.info('Connected to master')
return 'IDLE', protocols.NoData
def do_sleep(self):
"""
If, in response to an "IDLE" message we receive "SLEEP" this indicates
the master has nothing for us to do currently. Sleep for a little while
then try "IDLE" again.
"""
assert self.slave_id is not None, 'SLEEP before ACK'
self.logger.info('No available jobs; sleeping')
sleep(randint(5, 15))
return 'IDLE', protocols.NoData
def do_build(self, package, version):
"""
Alternatively, in response to "IDLE", the master may send "BUILD"
*package* *version*. We should then attempt to build the specified
wheel and send back a "BUILT" message with a full report of the
outcome.
"""
assert self.slave_id is not None, 'BUILD before ACK'
assert not self.builder, 'Last build still exists'
self.logger.warning('Building package %s version %s', package, version)
self.builder = PiWheelsBuilder(package, version)
if self.builder.build(self.config.timeout, self.pypi_url):
self.logger.info('Build succeeded')
else:
self.logger.warning('Build failed')
return 'BUILT', self.builder.as_message()[2:]
def do_send(self, filename):
"""
If a build succeeds and generates files (detailed in a "BUILT"
message), the master will reply with "SEND" *filename* indicating we
should transfer the specified file (this is done on a separate socket
with a different protocol; see :meth:`builder.PiWheelsPackage.transfer`
for more details). Once the transfers concludes, reply to the master
with "SENT".
"""
assert self.slave_id is not None, 'SEND before ACK'
assert self.builder, 'Send before build / after failed build'
assert self.builder.status, 'Send after failed build'
pkg = [f for f in self.builder.files if f.filename == filename][0]
self.logger.info(
'Sending %s to master on %s', pkg.filename, self.config.master)
ctx = transport.Context()
queue = ctx.socket(transport.DEALER, logger=self.logger)
queue.hwm = 10
queue.connect('tcp://{master}:5556'.format(master=self.config.master))
try:
pkg.transfer(queue, self.slave_id)
finally:
queue.close()
return 'SENT', protocols.NoData
def do_done(self):
"""
After all files have been sent (and successfully verified), the master
will reply with "DONE" indicating we can remove all associated build
artifacts. We respond with "IDLE".
"""
assert self.slave_id is not None, 'DONE before ACK'
assert self.builder, 'Done before build'
self.logger.info('Removing temporary build directories')
self.builder.clean()
self.builder = None
return 'IDLE', protocols.NoData
def do_die(self):
"""
The master may respond with "DIE" at any time indicating we should
immediately terminate (first cleaning up any extant build). We raise
:exc:`SystemExit` to cause :meth:`main_loop` to exit.
"""
self.logger.warning('Master requested termination')
if self.builder is not None:
self.logger.info('Removing temporary build directories')
self.builder.clean()
raise SystemExit(0)
def | |
#!/usr/bin/env python
#
# This file is part of snmpsim software.
#
# Copyright (c) 2010-2018, <NAME> <<EMAIL>>
# License: http://snmplabs.com/snmpsim/license.html
#
# SNMP Simulator MIB to data file converter
#
import getopt
import sys
import os
import time
import socket
import struct
import bisect
import traceback
try:
import pcap
except ImportError:
pcap = None
from pyasn1.type import univ
from pyasn1.codec.ber import decoder
from pyasn1.error import PyAsn1Error
from pysnmp.proto import api, rfc1905
from pysnmp.smi import builder, rfc1902, view, compiler
from pysnmp.carrier.asynsock.dgram import udp
from pyasn1 import debug as pyasn1_debug
from pysnmp import debug as pysnmp_debug
from snmpsim.record import snmprec
from snmpsim import confdir, error, log
# Defaults
verboseFlag = True
mibSources = []
defaultMibSources = ['http://mibs.snmplabs.com/asn1/@mib@']
startOID = univ.ObjectIdentifier('1.3.6')
stopOID = None
promiscuousMode = False
outputDir = '.'
transportIdOffset = 0
variationModuleOptions = ""
variationModuleName = variationModule = None
listenInterface = captureFile = None
packetFilter = 'udp and src port 161'
endpoints = {}
contexts = {}
stats = {
'UDP packets': 0,
'IP packets': 0,
'bad packets': 0,
'empty packets': 0,
'unknown L2 protocol': 0,
'SNMP errors': 0,
'SNMP exceptions': 0,
'agents seen': 0,
'contexts seen': 0,
'snapshots taken': 0,
'Response PDUs seen': 0,
'OIDs seen': 0
}
helpMessage = """Usage: %s [--help]
[--version]
[--debug=<%s>]
[--debug-asn1=<%s>]
[--quiet]
[--logging-method=<%s[:args]>]
[--mib-source=<url>]
[--start-object=<MIB-NAME::[symbol-name]|OID>]
[--stop-object=<MIB-NAME::[symbol-name]|OID>]
[--output-dir=<directory>]
[--transport-id-offset=<number>]
[--capture-file=<filename.pcap>]
[--listen-interface=<device>]
[--promiscuous-mode]
[--packet-filter=<ruleset>]
[--variation-modules-dir=<dir>]
[--variation-module=<module>]
[--variation-module-options=<args>]""" % (
sys.argv[0],
'|'.join([x for x in pysnmp_debug.flagMap.keys() if x != 'mibview']),
'|'.join([x for x in pyasn1_debug.flagMap.keys()]),
'|'.join(log.gMap.keys())
)
try:
opts, params = getopt.getopt(sys.argv[1:], 'hv', [
'help', 'version', 'debug=', 'debug-snmp=', 'debug-asn1=',
'quiet', 'logging-method=', 'start-oid=', 'stop-oid=',
'start-object=', 'stop-object=', 'mib-source=',
'output-dir=', 'transport-id-offset=',
'capture-file=', 'listen-interface=', 'promiscuous-mode',
'packet-filter=',
'variation-modules-dir=', 'variation-module=',
'variation-module-options='
])
except Exception:
sys.stderr.write(
'ERROR: %s\r\n%s\r\n' % (sys.exc_info()[1], helpMessage))
sys.exit(-1)
if params:
sys.stderr.write('ERROR: extra arguments supplied %s\r\n%s\r\n' % (params, helpMessage))
sys.exit(-1)
for opt in opts:
if opt[0] == '-h' or opt[0] == '--help':
sys.stderr.write("""\
Synopsis:
Snoops network traffic for SNMP responses, builds SNMP Simulator
data files.
Can read capture files or listen live network interface.
Documentation:
http://snmplabs.com/snmpsim/
%s
""" % helpMessage)
sys.exit(-1)
if opt[0] == '-v' or opt[0] == '--version':
import snmpsim
import pysmi
import pysnmp
import pyasn1
sys.stderr.write("""\
SNMP Simulator version %s, written by <NAME> <<EMAIL>>
Using foundation libraries: pysmi %s, pysnmp %s, pyasn1 %s.
Python interpreter: %s
Software documentation and support at http://snmplabs.com/snmpsim
%s
""" % (snmpsim.__version__,
hasattr(pysmi, '__version__') and pysmi.__version__ or 'unknown',
hasattr(pysnmp, '__version__') and pysnmp.__version__ or 'unknown',
hasattr(pyasn1, '__version__') and pyasn1.__version__ or 'unknown',
sys.version, helpMessage))
sys.exit(-1)
elif opt[0] in ('--debug', '--debug-snmp'):
pysnmp_debug.setLogger(pysnmp_debug.Debug(*opt[1].split(','), **dict(
loggerName='pcap2dev.pysnmp')))
elif opt[0] == '--debug-asn1':
pyasn1_debug.setLogger(pyasn1_debug.Debug(*opt[1].split(','), **dict(
loggerName='pcap2dev.pyasn1')))
elif opt[0] == '--logging-method':
try:
log.setLogger('pcap2dev', *opt[1].split(':'), **dict(force=True))
except error.SnmpsimError:
sys.stderr.write(
'%s\r\n%s\r\n' % (sys.exc_info()[1], helpMessage))
sys.exit(-1)
if opt[0] == '--quiet':
verboseFlag = False
# obsolete begin
elif opt[0] == '--start-oid':
startOID = univ.ObjectIdentifier(opt[1])
elif opt[0] == '--stop-oid':
stopOID = univ.ObjectIdentifier(opt[1])
# obsolete end
if opt[0] == '--mib-source':
mibSources.append(opt[1])
if opt[0] == '--start-object':
startOID = rfc1902.ObjectIdentity(*opt[1].split('::', 1))
if opt[0] == '--stop-object':
stopOID = rfc1902.ObjectIdentity(*opt[1].split('::', 1),
**dict(last=True))
elif opt[0] == '--output-dir':
outputDir = opt[1]
elif opt[0] == '--transport-id-offset':
try:
transportIdOffset = max(0, int(opt[1]))
except:
sys.stderr.write(
'ERROR: %s\r\n%s\r\n' % (sys.exc_info()[1], helpMessage))
sys.exit(-1)
elif opt[0] == '--listen-interface':
listenInterface = opt[1]
elif opt[0] == '--promiscuous-mode':
promiscuousMode = True
elif opt[0] == '--capture-file':
captureFile = opt[1]
elif opt[0] == '--packet-filter':
packetFilter = opt[1]
elif opt[0] == '--variation-modules-dir':
confdir.variation.insert(0, opt[1])
elif opt[0] == '--variation-module':
variationModuleName = opt[1]
elif opt[0] == '--variation-module-options':
variationModuleOptions = opt[1]
if params:
sys.stderr.write('ERROR: extra arguments supplied %s\r\n%s\r\n' % (
params, helpMessage))
sys.exit(-1)
if not pcap:
sys.stderr.write('ERROR: pylibpcap package is missing!\r\nGet it from http://sourceforge.net/projects/pylibpcap/\r\n%s\r\n' % helpMessage)
sys.exit(-1)
log.setLogger('pcap2dev', 'stdout')
if isinstance(startOID, rfc1902.ObjectIdentity) or \
isinstance(stopOID, rfc1902.ObjectIdentity):
mibBuilder = builder.MibBuilder()
mibViewController = view.MibViewController(mibBuilder)
compiler.addMibCompiler(
mibBuilder, sources=mibSources or defaultMibSources
)
if isinstance(startOID, rfc1902.ObjectIdentity):
startOID.resolveWithMib(mibViewController)
if isinstance(stopOID, rfc1902.ObjectIdentity):
stopOID.resolveWithMib(mibViewController)
# Load variation module
if variationModuleName:
for variationModulesDir in confdir.variation:
log.msg('Scanning "%s" directory for variation modules...' % variationModulesDir)
if not os.path.exists(variationModulesDir):
log.msg('Directory "%s" does not exist' % variationModulesDir)
continue
mod = os.path.join(variationModulesDir, variationModuleName + '.py')
if not os.path.exists(mod):
log.msg('Variation module "%s" not found' % mod)
continue
ctx = {'path': mod, 'moduleContext': {}}
try:
if sys.version_info[0] > 2:
exec(compile(open(mod).read(), mod, 'exec'), ctx)
else:
execfile(mod, ctx)
except Exception:
log.msg('Variation module "%s" execution failure: %s' % (mod, sys.exc_info()[1]))
sys.exit(-1)
else:
variationModule = ctx
log.msg('Variation module "%s" loaded' % variationModuleName)
break
else:
log.msg('ERROR: variation module "%s" not found' % variationModuleName)
sys.exit(-1)
# Variation module initialization
if variationModule:
log.msg('Initializing variation module...')
for x in ('init', 'record', 'shutdown'):
if x not in variationModule:
log.msg('ERROR: missing "%s" handler at variation module "%s"' % (x, variationModuleName))
sys.exit(-1)
try:
variationModule['init'](options=variationModuleOptions,
mode='recording',
startOID=startOID,
stopOID=stopOID)
except Exception:
log.msg('Variation module "%s" initialization FAILED: %s' % (variationModuleName, sys.exc_info()[1]))
else:
log.msg('Variation module "%s" initialization OK' % variationModuleName)
# Data file builder
class SnmprecRecord(snmprec.SnmprecRecord):
def formatValue(self, oid, value, **context):
textOid, textTag, textValue = snmprec.SnmprecRecord.formatValue(
self, oid, value
)
# invoke variation module
if context['variationModule']:
plainOid, plainTag, plainValue = snmprec.SnmprecRecord.formatValue(
self, oid, value, nohex=True
)
if plainTag != textTag:
context['hextag'], context['hexvalue'] = textTag, textValue
else:
textTag, textValue = plainTag, plainValue
textOid, textTag, textValue = context['variationModule'][
'record'](
textOid, textTag, textValue, **context
)
elif 'stopFlag' in context and context['stopFlag']:
raise error.NoDataNotification()
return textOid, textTag, textValue
pcapObj = pcap.pcapObject()
if listenInterface:
if verboseFlag:
log.msg('Listening on interface %s in %spromiscuous mode' % (listenInterface, promiscuousMode is False and 'non-' or ''))
try:
pcapObj.open_live(listenInterface, 65536, promiscuousMode, 1000)
except Exception:
log.msg('Error opening interface %s for snooping: %s' % (listenInterface, sys.exc_info()[1]))
sys.exit(-1)
elif captureFile:
if verboseFlag:
log.msg('Opening capture file %s' % captureFile)
try:
pcapObj.open_offline(captureFile)
except Exception:
log.msg('Error opening capture file %s for reading: %s' % (captureFile, sys.exc_info()[1]))
sys.exit(-1)
else:
sys.stderr.write('ERROR: no capture file or live interface specified\r\n%s\r\n' % helpMessage)
sys.exit(-1)
if packetFilter:
if verboseFlag:
log.msg('Applying packet filter \"%s\"' % packetFilter)
pcapObj.setfilter(packetFilter, 0, 0)
if verboseFlag:
log.msg('Processing records from %s till %s' % (startOID or 'the beginning', stopOID or 'the end'))
def parsePacket(s):
d = {}
# http://www.tcpdump.org/linktypes.html
llHeaders = {
0: 4,
1: 14,
108: 4,
228: 0
}
if pcapObj.datalink() in llHeaders:
s = s[llHeaders[pcapObj.datalink()]:]
else:
stats['unknown L2 protocol'] += 1
d['version'] = (ord(s[0]) & 0xf0) >> 4
d['header_len'] = ord(s[0]) & 0x0f
d['tos'] = ord(s[1])
d['total_len'] = socket.ntohs(struct.unpack('H', s[2:4])[0])
d['id'] = socket.ntohs(struct.unpack('H', s[4:6])[0])
d['flags'] = (ord(s[6]) & 0xe0) >> 5
d['fragment_offset'] = socket.ntohs(struct.unpack('H', s[6:8])[0] & 0x1f)
d['ttl'] = ord(s[8])
d['protocol'] = ord(s[9])
d['checksum'] = socket.ntohs(struct.unpack('H', s[10:12])[0])
d['source_address'] = pcap.ntoa(struct.unpack('i', s[12:16])[0])
d['destination_address'] = pcap.ntoa(struct.unpack('i', s[16:20])[0])
if d['header_len'] > 5:
d['options'] = s[20:4 * (d['header_len'] - 5)]
else:
d['options'] = None
s = s[4 * d['header_len']:]
if d['protocol'] == 17:
d['source_port'] = socket.ntohs(struct.unpack('H', s[0:2])[0])
d['destination_port'] = socket.ntohs(struct.unpack('H', s[2:4])[0])
s = s[8:]
stats['UDP packets'] += 1
d['data'] = s
stats['IP packets'] += 1
return d
def handleSnmpMessage(d, t, private={}):
msgVer = api.decodeMessageVersion(d['data'])
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
stats['bad packets'] += 1
return
try:
rspMsg, wholeMsg = decoder.decode(
d['data'], asn1Spec=pMod.Message(),
)
except PyAsn1Error:
stats['bad packets'] += 1
return
if rspMsg['data'].getName() == 'response':
rspPDU = pMod.apiMessage.getPDU(rspMsg)
errorStatus = pMod.apiPDU.getErrorStatus(rspPDU)
if errorStatus:
stats['SNMP errors'] += 1
else:
endpoint = d['source_address'], d['source_port']
if endpoint not in endpoints:
endpoints[endpoint] = udp.domainName + (transportIdOffset + len(endpoints),)
stats['agents seen'] += 1
context = '%s/%s' % (pMod.ObjectIdentifier(endpoints[endpoint]),
pMod.apiMessage.getCommunity(rspMsg))
if context not in contexts:
contexts[context] = {}
stats['contexts seen'] += 1
context = '%s/%s' % (pMod.ObjectIdentifier(endpoints[endpoint]),
pMod.apiMessage.getCommunity(rspMsg))
stats['Response PDUs seen'] += 1
if 'basetime' not in private:
private['basetime'] = t
for oid, value in pMod.apiPDU.getVarBinds(rspPDU):
if oid < startOID:
continue
if stopOID and oid >= stopOID:
continue
if oid in contexts[context]:
if value != contexts[context][oid]:
stats['snapshots taken'] += 1
else:
contexts[context][oid] = [], []
contexts[context][oid][0].append(t - private['basetime'])
contexts[context][oid][1].append(value)
stats['OIDs seen'] += 1
def handlePacket(pktlen, data, timestamp):
if not data:
stats['empty packets'] += 1
return
else:
handleSnmpMessage(parsePacket(data), timestamp)
exc_info = None
try:
if listenInterface:
log.msg(
'Listening on interface "%s", kill me when you are done.' % listenInterface)
while True:
pcapObj.dispatch(1, handlePacket)
elif captureFile:
log.msg('Processing capture file "%s"....' % captureFile)
args = pcapObj.next()
while args:
handlePacket(*args)
args = pcapObj.next()
except (TypeError, KeyboardInterrupt):
log.msg('Shutting down process...')
except Exception:
exc_info = sys.exc_info()
dataFileHandler = SnmprecRecord()
for context in contexts:
filename = os.path.join(outputDir,
context + os.path.extsep + SnmprecRecord.ext)
if verboseFlag:
log.msg('Creating simulation context %s at %s' % (context, filename))
try:
os.mkdir(os.path.dirname(filename))
except OSError:
pass
try:
outputFile = open(filename, 'wb')
except IOError:
log.msg('ERROR: writing %s: %s' % (filename, sys.exc_info()[1]))
sys.exit(-1)
count = total = iteration = 0
timeOffset = 0
reqTime = time.time()
oids = list(contexts[context].keys())
oids.sort()
oids.append(oids[-1]) # duplicate last OID to trigger stopFlag
while True:
for | |
<reponame>andyruddh/irl-maxent<gh_stars>10-100
"""
Maximum Entropy Inverse Reinforcement Learning and Maximum Causal Entropy
Inverse Reinforcement Learning.
Based on the corresponding paper by <NAME> et al. (2008) and the Thesis
by Ziebart (2010).
"""
import numpy as np
from itertools import product
# -- common functions ----------------------------------------------------------
def feature_expectation_from_trajectories(features, trajectories):
"""
Compute the feature expectation of the given trajectories.
Simply counts the number of visitations to each feature-instance and
divides them by the number of trajectories.
Args:
features: The feature-matrix (e.g. as numpy array), mapping states
to features, i.e. a matrix of shape (n_states x n_features).
trajectories: A list or iterator of `Trajectory` instances.
Returns:
The feature-expectation of the provided trajectories as map
`[state: Integer] -> feature_expectation: Float`.
"""
n_states, n_features = features.shape
fe = np.zeros(n_features)
for t in trajectories:
for s in t.states():
fe += features[s, :]
return fe / len(trajectories)
def initial_probabilities_from_trajectories(n_states, trajectories):
"""
Compute the probability of a state being a starting state using the
given trajectories.
Args:
n_states: The number of states.
trajectories: A list or iterator of `Trajectory` instances.
Returns:
The probability of a state being a starting-state as map
`[state: Integer] -> probability: Float`.
"""
p = np.zeros(n_states)
for t in trajectories:
p[t.transitions()[0][0]] += 1.0
return p / len(trajectories)
def expected_svf_from_policy(p_transition, p_initial, terminal, p_action, eps=1e-5):
"""
Compute the expected state visitation frequency using the given local
action probabilities.
This is the forward pass of Algorithm 1 of the Maximum Entropy IRL paper
by Ziebart et al. (2008). Alternatively, it can also be found as
Algorithm 9.3 in in Ziebart's thesis (2010).
It has been slightly adapted for convergence, by forcing transition
probabilities from terminal stats to be zero.
Args:
p_transition: The transition probabilities of the MDP as table
`[from: Integer, to: Integer, action: Integer] -> probability: Float`
specifying the probability of a transition from state `from` to
state `to` via action `action` to succeed.
p_initial: The probability of a state being an initial state as map
`[state: Integer] -> probability: Float`.
terminal: A list of terminal states.
p_action: Local action probabilities as map
`[state: Integer, action: Integer] -> probability: Float`
as returned by `local_action_probabilities`.
eps: The threshold to be used as convergence criterion. Convergence
is assumed if the expected state visitation frequency changes
less than the threshold on all states in a single iteration.
Returns:
The expected state visitation frequencies as map
`[state: Integer] -> svf: Float`.
"""
n_states, _, n_actions = p_transition.shape
# 'fix' our transition probabilities to allow for convergence
# we will _never_ leave any terminal state
p_transition = np.copy(p_transition)
p_transition[terminal, :, :] = 0.0
# set-up transition matrices for each action
p_transition = [np.array(p_transition[:, :, a]) for a in range(n_actions)]
# actual forward-computation of state expectations
d = np.zeros(n_states)
delta = np.inf
while delta > eps:
d_ = [p_transition[a].T.dot(p_action[:, a] * d) for a in range(n_actions)]
d_ = p_initial + np.array(d_).sum(axis=0)
delta, d = np.max(np.abs(d_ - d)), d_
return d
# -- plain maximum entropy (Ziebart et al. 2008) -------------------------------
def local_action_probabilities(p_transition, terminal, reward):
"""
Compute the local action probabilities (policy) required for the edge
frequency calculation for maximum entropy reinfocement learning.
This is the backward pass of Algorithm 1 of the Maximum Entropy IRL
paper by Ziebart et al. (2008).
Args:
p_transition: The transition probabilities of the MDP as table
`[from: Integer, to: Integer, action: Integer] -> probability: Float`
specifying the probability of a transition from state `from` to
state `to` via action `action` to succeed.
terminal: A set/list of terminal states.
reward: The reward signal per state as table
`[state: Integer] -> reward: Float`.
Returns:
The local action probabilities (policy) as map
`[state: Integer, action: Integer] -> probability: Float`
"""
n_states, _, n_actions = p_transition.shape
er = np.exp(reward)
p = [np.array(p_transition[:, :, a]) for a in range(n_actions)]
# initialize at terminal states
zs = np.zeros(n_states)
zs[terminal] = 1.0
# perform backward pass
# This does not converge, instead we iterate a fixed number of steps. The
# number of steps is chosen to reflect the maximum steps required to
# guarantee propagation from any state to any other state and back in an
# arbitrary MDP defined by p_transition.
for _ in range(2 * n_states):
za = np.array([er * p[a].dot(zs) for a in range(n_actions)]).T
zs = za.sum(axis=1)
# compute local action probabilities
return za / zs[:, None]
def compute_expected_svf(p_transition, p_initial, terminal, reward, eps=1e-5):
"""
Compute the expected state visitation frequency for maximum entropy IRL.
This is an implementation of Algorithm 1 of the Maximum Entropy IRL
paper by Ziebart et al. (2008).
This function combines the backward pass implemented in
`local_action_probabilities` with the forward pass implemented in
`expected_svf_from_policy`.
Args:
p_transition: The transition probabilities of the MDP as table
`[from: Integer, to: Integer, action: Integer] -> probability: Float`
specifying the probability of a transition from state `from` to
state `to` via action `action` to succeed.
p_initial: The probability of a state being an initial state as map
`[state: Integer] -> probability: Float`.
terminal: A list of terminal states.
reward: The reward signal per state as table
`[state: Integer] -> reward: Float`.
eps: The threshold to be used as convergence criterion for the
expected state-visitation frequency. Convergence is assumed if
the expected state visitation frequency changes less than the
threshold on all states in a single iteration.
Returns:
The expected state visitation frequencies as map
`[state: Integer] -> svf: Float`.
"""
p_action = local_action_probabilities(p_transition, terminal, reward)
return expected_svf_from_policy(p_transition, p_initial, terminal, p_action, eps)
def irl(p_transition, features, terminal, trajectories, optim, init, eps=1e-4, eps_esvf=1e-5):
"""
Compute the reward signal given the demonstration trajectories using the
maximum entropy inverse reinforcement learning algorithm proposed in the
corresponding paper by Ziebart et al. (2008).
Args:
p_transition: The transition probabilities of the MDP as table
`[from: Integer, to: Integer, action: Integer] -> probability: Float`
specifying the probability of a transition from state `from` to
state `to` via action `action` to succeed.
features: The feature-matrix (e.g. as numpy array), mapping states
to features, i.e. a matrix of shape (n_states x n_features).
terminal: A list of terminal states.
trajectories: A list of `Trajectory` instances representing the
expert demonstrations.
optim: The `Optimizer` instance to use for gradient-based
optimization.
init: The `Initializer` to use for initialization of the reward
function parameters.
eps: The threshold to be used as convergence criterion for the
reward parameters. Convergence is assumed if all changes in the
scalar parameters are less than the threshold in a single
iteration.
eps_svf: The threshold to be used as convergence criterion for the
expected state-visitation frequency. Convergence is assumed if
the expected state visitation frequency changes less than the
threshold on all states in a single iteration.
Returns:
The reward per state as table `[state: Integer] -> reward: Float`.
"""
n_states, _, n_actions = p_transition.shape
_, n_features = features.shape
# compute static properties from trajectories
e_features = feature_expectation_from_trajectories(features, trajectories)
p_initial = initial_probabilities_from_trajectories(n_states, trajectories)
# basic gradient descent
theta = init(n_features)
delta = np.inf
optim.reset(theta)
while delta > eps:
theta_old = theta.copy()
# compute per-state reward
reward = features.dot(theta)
# compute the gradient
e_svf = compute_expected_svf(p_transition, p_initial, terminal, reward, eps_esvf)
grad = e_features - features.T.dot(e_svf)
# perform optimization step and compute delta for convergence
optim.step(grad)
delta = np.max(np.abs(theta_old - theta))
# re-compute per-state reward and return
return features.dot(theta)
# -- maximum causal entropy (Ziebart 2010) -------------------------------------
def softmax(x1, x2):
"""
Computes a soft maximum of both arguments.
In case `x1` and `x2` are arrays, computes the element-wise softmax.
Args:
x1: Scalar or ndarray.
x2: Scalar or ndarray.
Returns:
The soft maximum of the given arguments, either scalar or ndarray,
depending on the input.
"""
x_max = np.maximum(x1, x2)
x_min = np.minimum(x1, x2)
return x_max + np.log(1.0 + np.exp(x_min - x_max))
def local_causal_action_probabilities(p_transition, terminal, reward, discount, eps=1e-5):
"""
Compute the local action probabilities (policy) required for the edge
frequency calculation for maximum causal entropy reinfocement learning.
This is Algorithm 9.1 from Ziebart's thesis (2010) combined with
discounting for convergence reasons as proposed in the | |
The last global batch only contains data for one replica.
if drop_remainder:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
else:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testUnevenDatasetBatchesMultiWorker(self, input_type, api_type,
iteration_type, drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(9)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]]]
else:
# The last global batch only contains data for one replica.
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]], [[8]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]], [[]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
]))
def testUnevenDatasetBatchesMultiWorkerFourReplicas(self, input_type,
api_type, iteration_type,
drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(15)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(4, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]]]
else:
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]], [[12], [14]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]], [[13], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testBatchSplitting(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
batch_size = 10
dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [[range(i, i+updated_batch_size),
range(i+updated_batch_size, i+2*updated_batch_size)]
for i in range(0, 100, updated_batch_size*2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
enable_get_next_as_optional=[True, False]))
def testBatchSplittingMultiWorker(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
batch_size = 10
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(100).batch(batch_size)
return dataset
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [
[ # pylint: disable=g-complex-comprehension
range(i, i + updated_batch_size),
range(i + updated_batch_size, i + 2 * updated_batch_size)
] for i in range(0, 100, updated_batch_size * 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
))
def testCacheAcrossIteration(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
dataset = dataset_ops.Dataset.range(16).shuffle(16).cache().batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
reshuffle=[True, False]))
def testShuffleAcrossIterations(self, distribution, reshuffle):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
if not reshuffle and not compat.forward_compatible(2020, 5, 22):
self.skipTest("Functionality currently not supported.")
dataset = dataset_ops.Dataset.range(12).shuffle(
12, reshuffle_each_iteration=reshuffle).batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
if reshuffle:
self.assertNotAllEqual(first_epoch, second_epoch)
else:
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShape(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
per_replica_batch_size = batch_size // distribution.num_replicas_in_sync
@def_function.function
def train_fn():
for data in dist_dataset:
data = nest.map_structure(distribution.experimental_local_results, data)
feature = data["feature"]
label = data["label"]
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([per_replica_batch_size, 10],
feature[replica_id].shape)
self.assertEqual([per_replica_batch_size], label[replica_id].shape)
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.OFF]))
def testAutoshardingOption(self, distribution, input_type, api_type,
iteration_type, auto_shard_policy):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
ds_option = dataset_ops.Options()
ds_option.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset_fn = (
lambda _: dataset_ops.Dataset.range(4).with_options(ds_option))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if auto_shard_policy == AutoShardPolicy.AUTO:
if id_in_cluster == 0:
expected_values = [[0], [2]]
else:
expected_values = [[1], [3]]
else:
expected_values = [[0], [1], [2], [3]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["input_fn"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testDifferentDatasetsMultiWorker(self, distribution, input_type, api_type,
iteration_type):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(ctx):
if ctx.input_pipeline_id == 0:
return dataset_ops.Dataset.range(8).batch(2)
else:
return dataset_ops.Dataset.range(9).batch(2)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if id_in_cluster == 0:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[]]]
else:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[8]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"]))
def testLoopOverDatasetInTFFunction(self, strategy):
dataset = dataset_ops.Dataset.range(10).map(lambda x: { # pylint: disable=g-long-lambda
"y": math_ops.cast(x, dtypes.float32) ** 2,
}).batch(4)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
with strategy.scope():
v = variables.Variable(0.0, aggregation=variables.VariableAggregation.SUM)
@def_function.function
def iterator_fn(dist_dataset):
def assign_add_fn(data):
v.assign_add(math_ops.reduce_sum(data["y"]))
for data in dist_dataset:
strategy.run(assign_add_fn, args=(data,))
iterator_fn(dist_dataset)
self.assertEqual(v.numpy(), 285.0)
class DistributedIteratorTensorTypeTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for DistributedDataset with non-dense tensors."""
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
defun_type=["lambda", "tf_function"],
))
def testRaggedSparse(self, distribution, input_type, drop_remainder,
defun_type):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
defun = {"lambda": lambda f: f,
"tf_function": def_function.function}[defun_type]
distribution.extended.experimental_enable_get_next_as_optional = True
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
dataset = self._wrap_dataset(input_type, dataset_or_input_fn,
distribution.extended._input_workers,
len(distribution.extended.worker_devices),
distribution)
# Assert that the tensors are rebatched and sparsity is preserved.
per_replica_batch = defun(lambda x: next(iter(x)))(dataset)
self.assertAllEqual(
distribute_utils.select_replica(0, per_replica_batch["dense"]),
[[0., 0., 0.], [1., 0., 0.], [2., 2., 0.], [3., 3., 3.]])
self.assertAllEqual(
distribute_utils.select_replica(1, per_replica_batch["dense"]),
[[0., 0., 0.], [5., 0., 0.], [6., 6., 0.], [7., 7., 7.]])
# Transitively check the ragged and sparse tensors by densification.
for i in range(2):
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["ragged"]).values,
6)
self.assertAllEqual(
distribute_utils.select_replica(
i, per_replica_batch["ragged"]).to_tensor(),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["sparse"]).indices,
6)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(
distribute_utils.select_replica(i, per_replica_batch["sparse"])),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_for_loop(dataset):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
for batch in dataset:
sums = _reduce(sums, batch)
return sums
def sum_while_loop(iterator, reduce_fn):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
while True:
try:
sums = reduce_fn(sums, iterator)
except (StopIteration, errors.OutOfRangeError):
return sums
while_sums = sum_while_loop(
iter(dataset),
defun(lambda state, iterator: _reduce(state, next(iterator))))
self.assertAllEqual(
nest.flatten(while_sums),
# When there's no partial batch, | |
cnn=False,
leakage_model=LeakageModelType.AES_MULTI,
input_type=AIInputType.SIGNAL,
augment_shuffle=True,
n_hidden_layers=1,
n_hidden_nodes=256,
activation='leakyrelu',
metric_freq=100,
regularizer=None,
reglambda=0.001,
model_suffix=None,
use_bias=True,
batch_norm=True,
hamming=False,
key_low=1,
key_high=3,
loss_type='correlation',
lr=0.001,
epochs=5000,
batch_size=512,
norank=False,
)
it_dummy = AICorrSignalIterator([], conf, batch_size=10000, request_id=None, stream_server=None)
x, y = it_dummy._preprocess_trace_set(trace_set)
# ------------------------------
# Train and obtain encodings
# ------------------------------
model = models.AICorrNet(conf, input_dim=4, name="test")
print(model.info())
rank_cb = rankcallbacks.CorrRankCallback(conf, '/tmp/deleteme/', save_best=False, save_path=None)
rank_cb.set_trace_set(trace_set)
if model.using_regularization:
print("Warning: cant do correlation loss test because regularizer will influence loss function")
return
# Find optimal weights
print("The x (EM samples) and y (leakage model values) are:")
print(x)
print(y)
print("When feeding x through the model without training, the encodings become:")
print(model.predict(x))
print("Training now")
model.train_set(x, y, save=False, epochs=conf.epochs, extra_callbacks=[rank_cb])
print("Done training")
# Get the encodings of the input data using the same approach used in ops.py corrtest (iterate over rows)
result = []
for i in range(0, x.shape[0]):
result.append(model.predict(np.array([x[i,:]], dtype=float))[0]) # Result contains sum of points such that corr with y[key_index] is maximal for all key indices. Shape = [trace, 16]
result = np.array(result)
print("When feeding x through the model after training, the encodings for key bytes %d to %d become:\n %s" % (conf.key_low, conf.key_high, str(result)))
# ------------------------------
# Check loss function
# ------------------------------
# Evaluate the model to get the loss for the encodings
predicted_loss = model.model.evaluate(x, y, verbose=0)
# Manually calculate the loss using numpy to verify that we are learning a correct correlation
calculated_loss = 0
num_keys = (conf.key_high - conf.key_low)
num_outputs = LeakageModel.get_num_outputs(conf) // num_keys
for i in range(0, num_keys):
subkey_hws = y[:, i*num_outputs:(i+1)*num_outputs]
subkey_encodings = result[:, i*num_outputs:(i+1)*num_outputs]
print("Subkey %d HWs : %s" % (i + conf.key_low, str(subkey_hws)))
print("Subkey %d encodings: %s" % (i + conf.key_low, str(subkey_encodings)))
y_key = subkey_hws.reshape([-1, 1])
y_pred = subkey_encodings.reshape([-1, 1])
print("Flattened subkey %d HWs : %s" % (i + conf.key_low, str(y_key)))
print("Flattened subkey %d encodings: %s" % (i + conf.key_low, str(y_pred)))
# Calculate correlation (numpy approach)
corr_key_i = np.corrcoef(y_pred[:, 0], y_key[:, 0], rowvar=False)[1,0]
print("corr_num: %s" % corr_key_i)
calculated_loss += 1.0 - corr_key_i
print("These values should be close:")
print("Predicted loss: %s" % str(predicted_loss))
print("Calculated loss: %s" % str(calculated_loss))
self.assertAlmostEqual(predicted_loss, calculated_loss, places=2)
@unittest.skipIf(UnitTestSettings.TEST_FAST, "fast testing enabled")
def test_autoenctrain(self):
"""
Artificial example to test AutoEncoder
"""
# ------------------------------
# Generate data
# ------------------------------
traces = [ # Contains abs(trace). Shape = [trace, point]
[1, 1, 1, -15],
[-4, 1, 2, -12],
[10, 1, 3, 8],
[8, 1, 1, -14],
[9, 1, -3, 8],
]
plaintexts = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
keys = [
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
# Convert to numpy
traces = np.array(traces)
plaintexts = np.array(plaintexts)
keys = np.array(keys)
trace_set = TraceSet(name='test', traces=traces, plaintexts=plaintexts, keys=keys)
# ------------------------------
# Preprocess data
# ------------------------------
conf = Namespace(
max_cache=0,
augment_roll=False,
augment_noise=False,
normalize=False,
traces_per_set=4,
online=False,
dataset_id='qa',
cnn=False,
leakage_model=LeakageModelType.HAMMING_WEIGHT_SBOX,
input_type=AIInputType.SIGNAL,
augment_shuffle=True,
n_hidden_layers=1,
n_hidden_nodes=256,
activation='leakyrelu',
metric_freq=100,
regularizer=None,
reglambda=0.001,
model_suffix=None,
use_bias=True,
batch_norm=True,
hamming=False,
key_low=2,
key_high=3,
loss_type='correlation',
lr=0.0001,
epochs=2000,
batch_size=512,
norank=False,
)
it_dummy = AutoEncoderSignalIterator([], conf, batch_size=10000, request_id=None, stream_server=None)
x, y = it_dummy._preprocess_trace_set(trace_set)
# ------------------------------
# Train and obtain encodings
# ------------------------------
model = models.AutoEncoder(conf, input_dim=4, name="test")
print(model.info())
# Find optimal weights
print("X, Y")
print(x)
print(y)
print("When feeding x through the model without training, the encodings become:")
print(model.predict(x))
print("Training now")
model.train_set(x, y, epochs=conf.epochs)
print("Done training")
# Get the encodings of the input data using the same approach used in ops.py corrtest (iterate over rows)
result = []
for i in range(0, x.shape[0]):
result.append(model.predict(np.array([x[i, :]], dtype=float))[0]) # Result contains sum of points such that corr with y[key_index] is maximal for all key indices. Shape = [trace, 16]
result = np.array(result)
for i in range(result.shape[0]):
rounded_result = np.round(result[i])
print("Original x : %s" % x[i])
print("Rounded result: %s" % rounded_result)
self.assertListEqual(list(rounded_result), list(x[i]))
def test_softmax(self):
test = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
a = models.softmax(test)
b = models.softmax_np(test)
self.assertEqual(len(a), len(b))
for i in range(0, len(a)):
self.assertAlmostEqual(a[i], b[i], places=6)
class TestRank(unittest.TestCase):
def test_calculate_ranks(self):
dummy_scores = np.array(list(range(1, 257))) # 1, 2, 3, ..., 256 (rank scores)
expected_outcome = list(range(255, -1, -1)) # 255, 254, 253, ..., 0 (resulting ranks)
outcome = list(rankcallbacks.calculate_ranks(dummy_scores))
self.assertListEqual(outcome, expected_outcome)
def test_get_rank_and_confidence(self):
dummy_scores = np.array(list(range(1, 257)))
ranks = rankcallbacks.calculate_ranks(dummy_scores)
rank_value, confidence = rankcallbacks.get_rank_and_confidence(ranks, dummy_scores, 255)
self.assertEqual(confidence, 1)
self.assertEqual(rank_value, 0)
rank_value, _ = rankcallbacks.get_rank_and_confidence(ranks, dummy_scores, 254)
self.assertEqual(rank_value, 1)
rank_value, _ = rankcallbacks.get_rank_and_confidence(ranks, dummy_scores, 154)
self.assertEqual(rank_value, 101)
class TestOps(unittest.TestCase):
def test_align_trace_set(self):
traces = np.array([[0, 1, 0, 8, 10, 8, 0, 1, 0], [8, 8, 11, 8], [8, 10, 8, 0]])
expected = np.array([[8, 10, 8, 0, 1, 0], [8, 11, 8], [8, 10, 8, 0]])
reference_signal = np.array([8, 10, 8])
conf = Namespace(reference_signal=reference_signal, butter_cutoff=0.1, butter_order=1)
ts = TraceSet(traces=traces, name='test')
ops.align_trace_set(ts, None, conf, params=[0, len(reference_signal)])
for i in range(0, len(ts.traces)):
self.assertListEqual(list(ts.traces[i].signal), expected[i])
def test_select_trace_set(self):
test_path = "/tmp/selection.p"
traces = np.array([[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]])
expected = np.array([[3, 4], [3, 4]])
conf = Namespace(windowing_method='rectangular')
with open(test_path, "wb") as f:
pickle.dump(np.array([False, False, True, True, False, False]), f)
ts = TraceSet(traces=traces, name='test')
ops.window_trace_set(ts, None, conf, params=[0, 6])
ops.select_trace_set(ts, None, None, params=[test_path])
for i in range(0, len(ts.traces)):
self.assertListEqual(list(ts.traces[i].signal), list(expected[i]))
def test_filterkey_trace_set(self):
traces = np.array([[0], [1], [2]])
keys = np.array([[0], [1], [2]])
ts = TraceSet(traces=traces, keys=keys)
conf = Namespace()
ops.filterkey_trace_set(ts, None, conf, params=['01'])
self.assertEqual(len(ts.traces), 1)
self.assertListEqual(list(ts.traces[0].signal), list(traces[1]))
def test_spectogram_trace_set(self):
traces = np.array([[0, 1, 2]])
ts = TraceSet(traces=traces)
conf = Namespace(reference_signal=None)
ops.spectogram_trace_set(ts, None, conf, None)
self.assertListEqual([round(x, 8) for x in list(ts.traces[0].signal)], [9., 3., 3.])
def test_normalize_trace_set(self):
traces = np.array([[10, 16, 19],])
expected = np.array([[-5, 1, 4],])
ts = TraceSet(traces=traces)
ops.normalize_trace_set(ts, None, None, None)
for i in range(0, len(traces)):
self.assertListEqual(list(ts.traces[i].signal), list(expected[i]))
def test_fft_trace_set(self):
traces = np.array([[0, 1, 2]])
ts = TraceSet(traces=traces)
conf = Namespace(reference_signal=None)
ops.fft_trace_set(ts, None, conf, None)
self.assertListEqual([round(x, 8) for x in list(ts.traces[0].signal)], [3.+0.j, -1.5+0.8660254j, -1.5-0.8660254j])
def test_window_trace_set(self):
traces = np.array([[1], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4]])
params = [1, 5]
expected = np.array([[0, 0, 0, 0], [2, 3, 4, 5], [2, 3, 4, 0]])
ts = TraceSet(traces=traces)
conf = Namespace(windowing_method="rectangular")
ops.window_trace_set(ts, None, conf, params=params)
for i in range(0, len(traces)):
self.assertListEqual(list(ts.traces[i].signal), list(expected[i]))
class TestUtils(unittest.TestCase):
def test_int_to_one_hot(self):
from emma.utils.utils import int_to_one_hot
self.assertListEqual(list(int_to_one_hot(0, 256)), [1] + [0]*255)
self.assertListEqual(list(int_to_one_hot(0, 3)), [1, 0, 0])
self.assertListEqual(list(int_to_one_hot(1, 3)), [0, 1, 0])
self.assertListEqual(list(int_to_one_hot(2, 3)), [0, 0, 1])
class TestIterator(unittest.TestCase):
def test_iterator_wrapping(self):
conf = Namespace(
input_type=AIInputType.SIGNAL,
leakage_model=LeakageModelType.SBOX_OH,
max_cache=None,
augment_roll=False,
augment_noise=False,
augment_shuffle=False,
normalize=False,
traces_per_set=32,
online=False,
dataset_id='test',
format='cw',
reference_signal=np.array([0]*128),
actions=[],
cnn=False,
key_low=2,
key_high=3,
norank=False,
)
iterator = AICorrSignalIterator(
["./datasets/unit-test/test_traces.npy", "./datasets/unit-test/test2_traces.npy"],
conf,
batch_size=48
)
inputs, labels = next(iterator)
for i in range(0, 48):
self.assertListEqual(list(inputs[i]), [i] * 128)
self.assertListEqual(list(labels[i]), list(to_categorical(sbox[1 ^ 0], num_classes=256)))
self.assertEqual(inputs.shape, (48, 128))
self.assertEqual(labels.shape, (48, 256))
inputs, labels = next(iterator)
for i in range(0, 48):
self.assertListEqual(list(inputs[i]), [(i+48) % 64] * 128)
self.assertEqual(inputs.shape, (48, 128))
self.assertEqual(labels.shape, (48, 256))
@unittest.skipIf(UnitTestSettings.TEST_FAST, "fast testing enabled")
def test_ascad_iterator(self):
"""
Check whether the AICorrSignalIterator returns the same output as load_ascad
:return:
"""
from ascad.ASCAD_train_models import load_ascad
conf = Namespace(
input_type=AIInputType.SIGNAL,
leakage_model=LeakageModelType.SBOX_OH,
max_cache=None,
augment_roll=False,
augment_noise=False,
augment_shuffle=False,
normalize=False,
traces_per_set=50000,
online=False,
dataset_id='test',
format='ascad',
reference_signal=np.array([0]*700),
actions=[Action('window[0,700]')],
cnn=False,
key_low=2,
key_high=3,
windowing_method='rectangular',
norank=False,
)
ascad_root = "./datasets/ASCAD/ASCAD_data/ASCAD_databases/ASCAD.h5"
ascad_paths = [
"%s#Profiling_traces[0:256]" % ascad_root,
"%s#Profiling_traces[256:512]" % ascad_root
]
iterator | |
<reponame>dlfming/dd_catl_demo<filename>venv/Lib/site-packages/baidubce/services/bes/bes_client.py<gh_stars>0
# Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides a client class for BES.
"""
import copy
import json
import logging
import sys
from baidubce import bce_base_client
from baidubce import compat
from baidubce.auth import bce_v1_signer
from baidubce.http import bce_http_client
from baidubce.http import handler
from baidubce.http import http_methods
from baidubce.services.bes import bes_model
from baidubce.utils import required
_logger = logging.getLogger(__name__)
if sys.version_info[0] == 2:
value_type = (str, unicode)
else:
value_type = (str, bytes)
class BesClient(bce_base_client.BceBaseClient):
"""
Bes sdk client
"""
prefix = b'/api/bes/cluster'
def __init__(self, config=None):
bce_base_client.BceBaseClient.__init__(self, config)
@required(name=value_type,
password=value_type,
modules=list,
version=value_type,
slot_type=value_type,
available_zone=value_type,
security_group_id=value_type,
subnet_uuid=value_type,
vpc_id=value_type,
billing=bes_model.Billing)
def create_cluster(self,
name,
password,
modules,
version,
slot_type,
available_zone,
security_group_id,
subnet_uuid,
vpc_id,
billing,
client_token=None):
"""
Create cluster
:param name: The parameter to specify es cluster name.
:type name: string
:param password: The parameter to specify password for manage cluster.
:type password: string
:param modules: The parameter to specify modules for cluster.
:type modules: array
:param version: The parameter to specify es cluster version.
:type version: string
:param slot_type: The parameter to specify the type of es cluster node resource.
:type slot_type: string
:param available_zone: he parameter to specify security zone.
:type available_zone: string
:param security_group_id: The parameter to specify id of the securityGroup.
:type security_group_id: string
:param subnet_uuid: The parameter to specify id of the subnet.
:type subnet_uuid: string
:param vpc_id: The parameter to specify id of the vpc.
:type vpc_id: string
:param billing: The parameter to specify id of billing info.
:type billing: xxx
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/create'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
module_json_array = []
for module in modules:
module_json_array.append(module.__dict__)
body = {
'name': name,
'password': password,
'modules': module_json_array,
'version': version,
'slotType': slot_type,
'availableZone': available_zone,
'securityGroupId': security_group_id,
'subnetUuid': subnet_uuid,
'vpcId': vpc_id,
'billing': billing.__dict__
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(name=value_type,
payment_type=value_type,
cluster_id=value_type,
region=value_type,
modules=list)
def resize_cluster(self,
name,
payment_type,
cluster_id,
region,
modules,
client_token=None):
"""
resize cluster
:param name: The parameter to specify es cluster name.
:type name: string
:param payment_type: The parameter to specify mode of payment.
:type payment_type: string
:param cluster_id: The parameter to specify cluster id.
:type cluster_id: array
:param region: The parameter to specify region.
:type region: string
:param modules: The parameter to specify the type of es cluster node resource.
:type modules: list
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/resize'
params = {
'orderType': "RESIZE"
}
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
# if client_token is not None:
# params = {
# 'clientToken': client_token
# }
module_json_array = []
for module in modules:
module_json_array.append(module.__dict__)
body = {
'name': name,
'paymentType': payment_type,
'modules': module_json_array,
'clusterId': cluster_id,
'region': region
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(page_no=int,
page_size=int)
def get_cluster_list(self,
page_no,
page_size,
client_token=None):
"""
get es cluster list
:param page_no: The parameter to specify cluster list pageNo.
:type page_no: int
:param page_size: The parameter to specify cluster list pageSize.
:type page_size: int
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/list'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'pageNo': page_no,
'pageSize': page_size
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(cluster_id=value_type)
def get_cluster_detail(self,
cluster_id,
client_token=None):
"""
get cluster detail info
:param cluster_id: The parameter to specify cluster id.
:type cluster_id: string
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/detail'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'clusterId': cluster_id
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(cluster_id=value_type)
def start_cluster(self,
cluster_id,
client_token=None):
"""
start es cluster
:param cluster_id: The parameter to specify cluster id.
:type cluster_id: string
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/start'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'clusterId': cluster_id
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(cluster_id=value_type)
def stop_cluster(self,
cluster_id,
client_token=None):
"""
stop es cluster
:param cluster_id: The parameter to specify cluster id.
:type cluster_id: string
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/stop'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'clusterId': cluster_id
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(cluster_id=value_type)
def delete_cluster(self,
cluster_id,
client_token=None):
"""
delete es cluster
:param cluster_id: The parameter to specify cluster id.
:type cluster_id: string
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/delete'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'clusterId': cluster_id
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(cluster_id=value_type,
instance_id=value_type)
def start_instance(self,
cluster_id,
instance_id,
client_token=None):
"""
start instance of es cluster
:param cluster_id: The parameter to specify cluster id.
:type cluster_id: string
:param instance_id: The parameter to specify cluster id.
:type instance_id: string
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/instance/start'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'clusterId': cluster_id,
'instanceId': instance_id,
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(cluster_id=value_type,
instance_id=value_type)
def stop_instance(self,
cluster_id,
instance_id,
client_token=None):
"""
stop instance of es cluster
:param cluster_id: The parameter to specify cluster id.
:type cluster_id: string
:param instance_id: The parameter to specify cluster id.
:type instance_id: string
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/instance/stop'
params = None
# if client_token is None:
# params['clientToken'] = generate_client_token()
# else:
# params['clientToken'] = client_token
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'clusterId': cluster_id,
'instanceId': instance_id,
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(order=value_type,
order_by=value_type,
page_no=int,
page_size=int,
days_to_expiration=int)
def get_renew_list(self,
order,
order_by,
page_no,
page_size,
days_to_expiration,
client_token=None):
"""
get es cluster renew list
:param order: The parameter to specify order rule.
:type order: string
:param order_by: The parameter to specify order field.
:type order_by: string
:param page_no: The parameter to specify renew cluster list pageNo.
:type page_no: int
:param page_size: The parameter to specify renew cluster list pageSize.
:type page_size: int
:param days_to_expiration: The parameter to specify how many days expire.
:type days_to_expiration: int
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = b'/renew/list'
params = None
if client_token is not None:
params = {
'clientToken': client_token
}
body = {
'pageNo': page_no,
'pageSize': page_size,
'daysToExpiration': days_to_expiration,
'order': order,
'orderBy': order_by
}
region = self.config.region
headers = {b'x-Region': region,
b'content-type': b'application/json;charset=UTF-8'}
return self._send_request(http_methods.POST, path, params=params, body=json.dumps(body), headers=headers)
@required(order=value_type,
cluster_id=value_type,
time=int)
def renew_cluster(self,
cluster_id,
time,
client_token=None):
"""
renew es cluster
:param cluster_id: The parameter to specify order | |
import os
import os.path as op
import ast
from werkzeug import secure_filename
from werkzeug.datastructures import FileStorage
from wtforms import ValidationError, fields
from wtforms.widgets import HTMLString, html_params
try:
from wtforms.fields.core import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from flask_admin.babel import gettext
from flask_admin.helpers import get_url
from flask_admin._compat import string_types, urljoin
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = ['FileUploadInput', 'MultipleFileUploadInput', 'FileUploadField', 'MultipleFileUploadField',
'ImageUploadInput', 'MultipleImageUploadInput', 'ImageUploadField', 'MultipleImageUploadField',
'namegen_filename', 'thumbgen_filename']
# Widgets
class FileUploadInput(object):
"""
Renders a file input chooser field.
You can customize `empty_template` and `data_template` members to customize
look and feel.
"""
empty_template = ('<input %(file)s>')
data_template = ('<div>'
' <input %(text)s>'
' <input type="checkbox" name="%(marker)s">Delete</input>'
'</div>'
'<input %(file)s>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
template = self.data_template if field.data else self.empty_template
if field.errors:
template = self.empty_template
if field.data and isinstance(field.data, FileStorage):
value = field.data.filename
else:
value = field.data or ''
return HTMLString(template % {
'text': html_params(type='text',
readonly='readonly',
value=value,
name=field.name),
'file': html_params(type='file',
value=value,
**kwargs),
'marker': '_%s-delete' % field.name
})
class MultipleFileUploadInput(object):
"""
Render a file input chooser field which you can choose multiple files.
You can customize `empty_template` and `data_template` members to customize
look and feel.
"""
empty_template = ('<input %(file)s multiple>')
data_template = ('<div>'
' %(files)s'
'</div>'
'<input %(file)s multiple>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
template = self.data_template if field.data else self.empty_template
if field.errors:
template = self.empty_template
if field.data and isinstance(field.data, string_types):
filenames = self.get_filenames(field)
files = " ".join([("<input type='text', readonly='readonly', value='{}', name='{}' />"
"<input type='checkbox' name='_{}-delete'>Delete</input>")
.format(filename, filename, filename) for filename in filenames])
else:
files = ""
return HTMLString(template % {
"files": files,
"file": html_params(type="file", **kwargs)
})
def get_filenames(self, field):
for filename in ast.literal_eval(field.data):
yield filename
class ImageUploadInput(object):
"""
Renders a image input chooser field.
You can customize `empty_template` and `data_template` members to customize
look and feel.
"""
empty_template = ('<input %(file)s>')
data_template = ('<div class="image-thumbnail">'
' <img %(image)s>'
' <input type="checkbox" name="%(marker)s">Delete</input>'
' <input %(text)s>'
'</div>'
'<input %(file)s>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
args = {
'text': html_params(type='hidden',
value=field.data,
name=field.name),
'file': html_params(type='file',
**kwargs),
'marker': '_%s-delete' % field.name
}
if field.data and isinstance(field.data, string_types):
url = self.get_url(field)
args['image'] = html_params(src=url)
template = self.data_template
else:
template = self.empty_template
return HTMLString(template % args)
def get_url(self, field):
if field.thumbnail_size:
filename = field.thumbnail_fn(field.data)
else:
filename = field.data
if field.url_relative_path:
filename = urljoin(field.url_relative_path, filename)
return get_url(field.endpoint, filename=filename)
class MultipleImageUploadInput(object):
"""
Render a image input chooser field which you can choose multiple images.
You can customize `empty_template` and `data_template` members to customize
look and feel.
"""
empty_template = ('<input %(file)s multiple>')
data_template = ('<div class="image-thumbnail">'
' %(images)s'
'</div>'
'<input %(file)s multiple>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
args = {"file": html_params(type="file", **kwargs)}
if field.data and isinstance(field.data, string_types):
attributes = self.get_attributes(field)
args["images"] = " ".join(["<img src='{}' /><input type='checkbox' name='_{}-delete'>Delete</input>"
.format(src, filename) for src, filename in attributes])
template = self.data_template
else:
template = self.empty_template
return HTMLString(template % args)
def get_attributes(self, field):
for item in ast.literal_eval(field.data):
filename = item
if field.thumbnail_size:
filename = field.thumbnail_fn(filename)
if field.url_relative_path:
filename = urljoin(field.url_relative_path, filename)
yield get_url(field.endpoint, filename=filename), item
# Fields
class FileUploadField(fields.StringField):
"""
Customizable file-upload field.
Saves file to configured path, handles updates and deletions. Inherits from `StringField`,
resulting filename will be stored as string.
"""
widget = FileUploadInput()
def __init__(self, label=None, validators=None,
base_path=None, relative_path=None,
namegen=None, allowed_extensions=None,
permission=0o666, allow_overwrite=True,
**kwargs):
"""
Constructor.
:param label:
Display label
:param validators:
Validators
:param base_path:
Absolute path to the directory which will store files
:param relative_path:
Relative path from the directory. Will be prepended to the file name for uploaded files.
Flask-Admin uses `urlparse.urljoin` to generate resulting filename, so make sure you have
trailing slash.
:param namegen:
Function that will generate filename from the model and uploaded file object.
Please note, that model is "dirty" model object, before it was committed to database.
For example::
import os.path as op
def prefix_name(obj, file_data):
parts = op.splitext(file_data.filename)
return secure_filename('file-%s%s' % parts)
class MyForm(BaseForm):
upload = FileUploadField('File', namegen=prefix_name)
:param allowed_extensions:
List of allowed extensions. If not provided, will allow any file.
:param allow_overwrite:
Whether to overwrite existing files in upload directory. Defaults to `True`.
.. versionadded:: 1.1.1
The `allow_overwrite` parameter was added.
"""
self.base_path = base_path
self.relative_path = relative_path
self.namegen = namegen or namegen_filename
self.allowed_extensions = allowed_extensions
self.permission = permission
self._allow_overwrite = allow_overwrite
self._should_delete = False
super(FileUploadField, self).__init__(label, validators, **kwargs)
def is_file_allowed(self, filename):
"""
Check if file extension is allowed.
:param filename:
File name to check
"""
if not self.allowed_extensions:
return True
return ('.' in filename and
filename.rsplit('.', 1)[1].lower() in
map(lambda x: x.lower(), self.allowed_extensions))
def _is_uploaded_file(self, data):
return (data and isinstance(data, FileStorage) and data.filename)
def pre_validate(self, form):
if self._is_uploaded_file(self.data) and not self.is_file_allowed(self.data.filename):
raise ValidationError(gettext('Invalid file extension'))
# Handle overwriting existing content
if not self._is_uploaded_file(self.data):
return
if not self._allow_overwrite and os.path.exists(self._get_path(self.data.filename)):
raise ValidationError(gettext('File "%s" already exists.' % self.data.filename))
def process(self, formdata, data=unset_value):
if formdata:
marker = '_%s-delete' % self.name
if marker in formdata:
self._should_delete = True
return super(FileUploadField, self).process(formdata, data)
def process_formdata(self, valuelist):
if self._should_delete:
self.data = None
elif valuelist:
for data in valuelist:
if self._is_uploaded_file(data):
self.data = data
break
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
if field:
# If field should be deleted, clean it up
if self._should_delete:
self._delete_file(field)
setattr(obj, name, None)
return
if self._is_uploaded_file(self.data):
if field:
self._delete_file(field)
filename = self.generate_name(obj, self.data)
filename = self._save_file(self.data, filename)
# update filename of FileStorage to our validated name
self.data.filename = filename
setattr(obj, name, filename)
def generate_name(self, obj, file_data):
filename = self.namegen(obj, file_data)
if not self.relative_path:
return filename
return urljoin(self.relative_path, filename)
def _get_path(self, filename):
if not self.base_path:
raise ValueError('FileUploadField field requires base_path to be set.')
if callable(self.base_path):
return op.join(self.base_path(), filename)
return op.join(self.base_path, filename)
def _delete_file(self, filename):
path = self._get_path(filename)
if op.exists(path):
os.remove(path)
def _save_file(self, data, filename):
path = self._get_path(filename)
if not op.exists(op.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission | 0o111)
if (self._allow_overwrite is False) and os.path.exists(path):
raise ValueError(gettext('File "%s" already exists.' % path))
data.save(path)
return filename
class MultipleFileUploadField(FileUploadField):
"""
Customizable multiple file-upload field.
Saves files to configured path, handles updates and deletions. Inherits from `FileUploadField`,
resulting filename will be stored as string representation of list.
"""
widget = MultipleFileUploadInput()
def process(self, formdata, data=unset_value):
self.formdata = formdata
return super(MultipleFileUploadField, self).process(formdata, data)
def process_formdata(self, valuelist):
self.data = list()
for value in valuelist:
if self._is_uploaded_file(value):
self.data.append(value)
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
if field:
filenames = ast.literal_eval(field)
for filename in filenames[:]:
if "_{}-delete".format(filename) in self.formdata:
self._delete_file(filename)
filenames.remove(filename)
else:
filenames = list()
for data in self.data:
if self._is_uploaded_file(data):
filename = self.generate_name(obj, data)
filename = self._save_file(data, filename)
data.filename = filename
filenames.append(filename)
setattr(obj, name, str(filenames))
class ImageUploadField(FileUploadField):
"""
Image upload field.
Does image validation, thumbnail generation, updating and deleting images.
Requires PIL (or Pillow) to be installed.
"""
widget = ImageUploadInput()
keep_image_formats = ('PNG',)
"""
If field detects that uploaded image is not in this list, it will save image
as PNG.
"""
def __init__(self, label=None, validators=None,
base_path=None, relative_path=None,
namegen=None, allowed_extensions=None,
max_size=None,
thumbgen=None, thumbnail_size=None,
permission=0o666,
url_relative_path=None, endpoint='static',
**kwargs):
"""
Constructor.
:param label:
Display label
:param validators:
Validators
:param base_path:
Absolute path to the directory which will store files
:param relative_path:
Relative path from the directory. Will be prepended to the file name for uploaded files.
Flask-Admin uses `urlparse.urljoin` to generate resulting filename, so make sure you have
trailing slash.
:param namegen:
Function that will generate filename from the model and uploaded file object.
Please note, that model is "dirty" model object, before it was committed to database.
For example::
import os.path as op
def prefix_name(obj, file_data):
parts = op.splitext(file_data.filename)
return secure_filename('file-%s%s' % parts)
class MyForm(BaseForm):
upload = FileUploadField('File', namegen=prefix_name)
:param allowed_extensions:
List of allowed extensions. If not provided, then gif, jpg, jpeg, png and tiff will be allowed.
:param max_size:
Tuple of (width, height, force) or None. If provided, Flask-Admin will
resize image to the desired size.
Width and height is in pixels. If `force` is set to `True`, will try to fit image into dimensions and
keep aspect ratio, otherwise will just resize to target size.
:param thumbgen:
Thumbnail filename | |
대한민국 전라북도 전주시 완산구에서 아버지 김종구, 어머니 김희자 사이의 1남 2녀 중 둘째로 태어났다. 가족으로는 오빠 김지웅, 여동생 김하연이 있다. 어릴 적부터 춤을 좋아했고 특히 명절 때는 친척들이 춤을 시키면 곧잘 추었다던 태연은 TV에서 보아를 보고 가수의 꿈을 갖게 되었다고 한다. 전주양지초등학교를 졸업하였고 전주양지중학교 2학년이던 2003년 SM아카데미 스타라이트 메인지방보컬과 4기에 들어가게 되면서 아버지와 함께 주말마다 전주에서 서울로 이동하며 가수의 꿈을 키웠다. 2004년에 당시 보컬 트레이너였던 더 원의 정규 2집 수록곡 〈You Bring Me Joy (Part 2)〉에 피처링으로 참여했다. 당시 만 15세였던 태연은 현재 활동하는 소속사 SM 엔터테인먼트에 들어가기 전이었다. 이후 태연은 2004년 8월에 열린 제8회 SM 청소년 베스트 선발 대회에서 노래짱 부문에 출전해 1위(대상)를 수상하였고 SM 엔터테인먼트에 정식 캐스팅되어 연습생 생활을 시작하게 되었다. 2005년 청담고등학교에 입학하였으나, 학교 측에서 연예계 활동을 용인하지 않아 전주예술고등학교 방송문화예술과로 전학하였고 2008년 졸업하면서 학교를 빛낸 공로로 공로상을 수상했다. 태연은 연습생 생활이 힘들어 숙소에서 몰래 뛰쳐나갔다가 하루 만에 다시 돌아오기도 했다고 이야기하기도 했다. 이후 SM엔터테인먼트에서 3년여의 연습생 기간을 거쳐 걸 그룹 소녀시대의 멤버로 정식 데뷔하게 되었다."
... ])
[['21일 저녁 목성과 토성 1623년 이후 397년 만에 가까워져', ' 2080년 3월 15일 대근접 예측', ' 크리스마스 즈음 남서쪽 하늘 올려보면 관측 가능'],
['태연, 2004년 청소년 베스트 선발 대회에서 노래짱 대상 수상', ' 태연, SM엔터테인먼트에서 3년여의 연습생 기간 거쳐 걸 그룹 소녀시대의 멤버로 정식 데뷔']]
>>> summ = Pororo(task="summarization", model="extractive", lang="ko")
>>> summ([
... "목성과 토성이 약 400년 만에 가장 가까이 만났습니다. 국립과천과학관 등 천문학계에 따르면 21일 저녁 목성과 토성은 1623년 이후 397년 만에 가장 가까워졌는데요. 크리스마스 즈음까지 남서쪽 하늘을 올려다보면 목성과 토성이 가까워지는 현상을 관측할 수 있습니다. 목성의 공전주기는 11.9년, 토성의 공전주기는 29.5년인데요. 공전주기의 차이로 두 행성은 약 19.9년에 한 번 가까워집니다. 이번 근접 때 목성과 토성 사이 거리는 보름달 지름의 5분의 1 정도로 가까워졌습니다. 맨눈으로 보면 두 행성이 겹쳐져 하나의 별처럼 보이는데요. 지난 21일 이후 목성과 토성의 대근접은 2080년 3월 15일로 예측됩니다. 과천과학관 측은 우리가 대근접을 볼 수 있는 기회는 이번이 처음이자 마지막이 될 가능성이 크다라고 설명했 습니다.",
... "가수 김태연은 걸 그룹 소녀시대, 소녀시대-태티서 및 소녀시대-Oh!GG의 리더이자 메인보컬이다. 2004년 SM에서 주최한 청소년 베스트 선발 대회에서 노래짱 대상을 수상하며 SM 엔터테인먼트에 캐스팅되었다. 이후 3년간의 연습생을 거쳐 2007년 소녀시대의 멤버로 데뷔했다. 태연은 1989년 3월 9일 대한민국 전라북도 전주시 완산구에서 아버지 김종구, 어머니 김희자 사이의 1남 2녀 중 둘째로 태어났다. 가족으로는 오빠 김지웅, 여동생 김하연이 있다. 어릴 적부터 춤을 좋아했고 특히 명절 때는 친척들이 춤을 시키면 곧잘 추었다던 태연은 TV에서 보아를 보고 가수의 꿈을 갖게 되었다고 한다. 전주양지초등학교를 졸업하였고 전주양지중학교 2학년이던 2003년 SM아카데미 스타라이트 메인지방보컬과 4기에 들어가게 되면서 아버지와 함께 주말마다 전주에서 서울로 이동하며 가수의 꿈을 키웠다. 2004년에 당시 보컬 트레이너였던 더 원의 정규 2집 수록곡 〈You Bring Me Joy (Part 2)〉에 피처링으로 참여했다. 당시 만 15세였던 태연은 현재 활동하는 소속사 SM 엔터테인먼트에 들어가기 전이었다. 이후 태연은 2004년 8월에 열린 제8회 SM 청소년 베스트 선발 대회에서 노래짱 부문에 출전해 1위(대상)를 수상하였고 SM 엔터테인먼트에 정식 캐스팅되어 연습생 생활을 시작하게 되었다. 2005년 청담고등학교에 입학하였으나, 학교 측에서 연예계 활동을 용인하지 않아 전주예술고등학교 방송문화예술과로 전학하였고 2008년 졸업하면서 학교를 빛낸 공로로 공로상을 수상했다. 태연은 연습생 생활이 힘들어 숙소에서 몰래 뛰쳐나갔다가 하루 만에 다시 돌아오기도 했다고 이야기하기도 했다. 이후 SM엔터테인먼트에서 3년여의 연습생 기간을 거쳐 걸 그룹 소녀시대의 멤버로 정식 데뷔하게 되었다."
... ])
['국립과천과학관 등 천문학계에 따르면 21일 저녁 목성과 토성은 1623년 이후 397년 만에 가장 가까워졌는데요. 크리스마스 즈음까지 남서쪽 하늘을 올려다보면 목성과 토성이 가까워지는 현상을 관측할 수 있습니다. 지난 21일 이후 목성과 토성의 대근접은 2080년 3월 15일로 예측됩니다.',
'2004년 SM에서 주최한 청소년 베스트 선발 대회에서 노래짱 대상을 수상하며 SM 엔터테인먼트에 캐스팅되었다. 이후 태연은 2004년 8월에 열린 제8회 SM 청소년 베스트 선발 대회에서 노래짱 부문에 출전해 1위(대상)를 수상하였고 SM 엔터테인먼트에 정식 캐스팅되어 연습생 생활을 시작하게 되었다. 이후 SM엔터테인먼트에서 3년여의 연습생 기간을 거쳐 걸 그룹 소녀시대의 멤버로 정식 데뷔하게 되었다.']
"""
def __init__(self, task: str, lang: str, model: Optional[str]):
super().__init__(task, lang, model)
@staticmethod
def get_available_langs():
return ["ko"]
@staticmethod
def get_available_models():
return {
"ko": [
"abstractive",
"bullet",
"extractive",
"kobart.base.ko.summary",
"kobart.base.ko.bullet",
"brainbert.base.ko.summary",
],
}
def load(self, device: str):
"""
Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model
"""
from pororo.tasks.tokenization import PororoTokenizationFactory
if self.config.n_model == "abstractive":
self.config.n_model = "kobart.base.ko.summary"
if self.config.n_model == "bullet":
self.config.n_model = "kobart.base.ko.bullet"
if self.config.n_model == "extractive":
self.config.n_model = "brainbert.base.ko.summary"
if "kobart" in self.config.n_model:
from pororo.models.bart.KoBART import KoBartModel
model_path = download_or_load(
f"bart/{self.config.n_model}",
self.config.lang,
)
model = KoBartModel.from_pretrained(
device=device,
model_path=model_path,
)
if "bullet" in self.config.n_model:
sent_tokenizer = (lambda text: PororoTokenizationFactory(
task="tokenization",
lang=self.config.lang,
model=f"sent_{self.config.lang}",
).load(device).predict(text))
ext_model_name = "brainbert.base.ko.summary"
ext_summary = PororoRobertaSummary(
sent_tokenizer,
device,
ext_model_name,
self.config,
)
return PororoKoBartBulletSummary(
model=model,
config=self.config,
ext_summary=ext_summary,
)
return PororoKoBartSummary(model=model, config=self.config)
if "brainbert" in self.config.n_model:
sent_tokenizer = (lambda text: PororoTokenizationFactory(
task="tokenization",
lang=self.config.lang,
model=f"sent_{self.config.lang}",
).load(device).predict(text))
return PororoRobertaSummary(
sent_tokenizer,
device,
self.config.n_model,
self.config,
)
class PororoKoBartSummary(PororoGenerationBase):
def __init__(self, model, config):
super(PororoKoBartSummary, self).__init__(config)
self._model = model
@torch.no_grad()
def predict(
self,
text: Union[str, List[str]],
beam: int = 5,
temperature: float = 1.0,
top_k: int = -1,
top_p: float = -1,
no_repeat_ngram_size: int = 4,
len_penalty: float = 1.0,
**kwargs,
):
"""
Conduct abstractive summarization
Args:
text (Union[str, List[str]]): input text to be extracted
beam (int): beam search size
temperature (float): temperature scale
top_k (int): top-K sampling vocabulary size
top_p (float): top-p sampling ratio
no_repeat_ngram_size (int): no repeat ngram size
len_penalty (float): length penalty ratio
Returns:
(str) summarized text
"""
sampling = False
if top_k != -1 or top_p != -1:
sampling = True
output = self._model.translate(
text,
beam=beam,
sampling=sampling,
temperature=temperature,
sampling_topk=top_k,
sampling_topp=top_p,
max_len_a=1,
max_len_b=50,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=len_penalty,
)
return output
def __call__(
self,
text: Union[str, List[str]],
beam: int = 5,
temperature: float = 1.0,
top_k: int = -1,
top_p: float = -1,
no_repeat_ngram_size: int = 4,
len_penalty: float = 1.0,
**kwargs,
):
return self.predict(
text,
beam,
temperature,
top_k,
top_p,
no_repeat_ngram_size,
len_penalty,
)
class PororoKoBartBulletSummary(PororoGenerationBase):
def __init__(self, model, config, ext_summary):
super(PororoKoBartBulletSummary, self).__init__(config)
self._model = model
self._ext_summary = ext_summary
def _postprocess(self, output: Union[str, List[str]]):
"""
Postprocess output sentence
Args:
output (Union[str, List[str]]): output sentence generated by model
Returns:
str: postprocessed output sentence
"""
output = "".join(output).replace("▁", " ")
for token in ["<s>", "</s>", "<pad>"]:
output = output.replace(token, "")
return output.strip().split("<unused0>")
@torch.no_grad()
def predict(
self,
text: Union[str, List[str]],
beam: int = 12,
temperature: float = 1.0,
top_k: int = -1,
top_p: float = -1,
no_repeat_ngram_size: int = 4,
len_penalty: float = 1.0,
):
"""
Conduct bullet-point summarization
Args:
text (Union[str, List[str]]): input text to be extracted
beam (int): beam search size
temperature (float): temperature scale
top_k (int): top-K sampling vocabulary size
top_p (float): top-p sampling ratio
no_repeat_ngram_size (int): no repeat ngram size
len_penalty (float): length penalty ratio
Returns:
(str) summarized text
"""
sampling = False
if top_k != -1 or top_p != -1:
sampling = True
if isinstance(text, str):
texts = self._ext_summary(text)
else:
texts = [self._ext_summary(i) for i in text]
output = self._model.translate(
texts,
beam=beam,
sampling=sampling,
temperature=temperature,
sampling_topk=top_k,
sampling_topp=top_p,
max_len_a=1,
max_len_b=50,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=len_penalty,
return_tokens=True,
bad_words_ids=[
[self._model.tokenizer.convert_tokens_to_ids("[")],
[self._model.tokenizer.convert_tokens_to_ids("]")],
[self._model.tokenizer.convert_tokens_to_ids("▁[")],
[self._model.tokenizer.convert_tokens_to_ids("▁]")],
[self._model.tokenizer.convert_tokens_to_ids("】")],
[self._model.tokenizer.convert_tokens_to_ids("【")],
],
)
return self._postprocess(output) if isinstance(
text,
str,
) else [self._postprocess(o) for o in output]
def __call__(
self,
text: Union[str, List[str]],
beam: int = 12,
temperature: float = 1.0,
top_k: int = -1,
top_p: float = -1,
no_repeat_ngram_size: int = 4,
len_penalty: float = 1.0,
):
return self.predict(
text,
beam,
temperature,
top_k,
top_p,
no_repeat_ngram_size,
len_penalty,
)
class PororoRobertaSummary(PororoSimpleBase):
def __init__(
self,
sent_tokenizer,
device: str,
ext_model_name: str,
config,
):
super().__init__(config)
ckpt_dir = download_or_load(f"bert/{ext_model_name}", config.lang)
tok_path = download_or_load(
f"tokenizers/bpe32k.{config.lang}.zip",
config.lang,
)
x = hub_utils.from_pretrained(
ckpt_dir,
"model.pt",
load_checkpoint_heads=True,
)
wrapper = BrainRobertaHubInterface(
x["args"],
x["task"],
x["models"][0],
tok_path,
)
clf_dict = torch.load(
f"{ckpt_dir}/classifier.pt",
map_location=device,
)
classifier_size = 768 if "base" in config.n_model else 1024
self._device = device
self._classifier = nn.Linear(classifier_size, 1).to(device).eval()
self._classifier.load_state_dict(clf_dict)
self._model = wrapper.model.encoder.sentence_encoder.to(device).eval()
if "cuda" in device.type:
self._model = self._model.half()
self._classifier = self._classifier.half()
self._tokenizer = BertSumTokenizer(
bpe=wrapper.bpe,
dictionary=wrapper.task.source_dictionary,
sent_tokenizer=sent_tokenizer,
)
@torch.no_grad()
def predict(self, text: str, return_list: bool = False):
"""
Conduct extractive summarization
Args:
text (str): input text
return_list (bool): whether to return as list
Returns:
(str) summarized text
(List[str]) list of text if return_list is True
"""
encoded = self._tokenizer.encode_batch(text, max_length=512)
input_ids = encoded["input_ids"].to(self._device)
segment_ids = encoded["segment_ids"].to(self._device)
sentences = encoded["sentences"][0] | |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0906411,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.273882,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.568838,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.336469,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.582643,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.334162,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.25327,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.245375,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.32463,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.107466,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0121973,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.11891,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0902063,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.226376,
'Execution Unit/Register Files/Runtime Dynamic': 0.102404,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.312167,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.805067,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.84,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00148775,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00148775,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00130448,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000509718,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00129582,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0055758,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0139553,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0867176,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.51598,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.253829,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.294532,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.0063,
'Instruction Fetch Unit/Runtime Dynamic': 0.65461,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0768637,
'L2/Runtime Dynamic': 0.0204211,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.39234,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.54398,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.102079,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.102079,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.87634,
'Load Store Unit/Runtime Dynamic': 2.14948,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.251709,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.503418,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0893323,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0903967,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.342963,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0418778,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.6559,
'Memory Management Unit/Runtime Dynamic': 0.132275,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.5017,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.374924,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0217167,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.168049,
'Renaming Unit/Int Front End RAT/Subthreshold | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# BibleBooksCodes.py
#
# Module handling BibleBooksCodes functions
#
# Copyright (C) 2010-2022 <NAME>
# Author: <NAME> <<EMAIL>>
# License: See gpl-3.0.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Module handling BibleBooksCodes functions.
BibleOrgSys uses a three-character book code to identify books.
These referenceAbbreviations are nearly always represented as BBB in the program code
(although formally named referenceAbbreviation
and possibly still represented as that in some of the older code),
and in a sense, this is the centre of the BibleOrgSys.
The referenceAbbreviation/BBB always starts with a letter, and letters are always UPPERCASE
so 2 Corinthians is 'CO2' not '2Co' or anything.
This was because early versions of HTML ID fields used to need
to start with a letter (not a digit),
(and most identifiers in computer languages still require that).
"""
from gettext import gettext as _
from typing import Dict, List, Tuple
import os
import logging
# if __name__ == '__main__':
# import sys
# aboveAboveFolderpath = os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath( __file__ ) ) ) )
# if aboveAboveFolderpath not in sys.path:
# sys.path.insert( 0, aboveAboveFolderpath )
from singleton import singleton
import BibleOrgSysGlobals
from BibleOrgSysGlobals import fnPrint, vPrint, dPrint
LAST_MODIFIED_DATE = '2022-03-25' # by RJH
SHORT_PROGRAM_NAME = "BibleBooksCodes"
PROGRAM_NAME = "Bible Books Codes handler"
PROGRAM_VERSION = '0.87'
programNameVersion = f'{SHORT_PROGRAM_NAME} v{PROGRAM_VERSION}'
debuggingThisModule = False
@singleton # Can only ever have one instance
class BibleBooksCodes:
"""
Class for handling BibleBooksCodes.
This class doesn't deal at all with XML, only with Python dictionaries, etc.
Note: BBB is used in this class to represent the three-character referenceAbbreviation.
"""
def __init__( self ) -> None: # We can't give this parameters because of the singleton
"""
Constructor:
"""
self.__DataDicts = None # We'll import into this in loadData
# end of BibleBooksCodes.__init__
def loadData( self, XMLFileOrFilepath=None ):
"""
Loads the JSON or pickle or XML data file (in that order unless the parameter is given)
and imports it to dictionary format (if not done already).
"""
if not self.__DataDicts: # We need to load them once -- don't do this unnecessarily
if XMLFileOrFilepath is None:
# See if we can load from the pickle file (faster than loading from the XML)
standardXMLFileOrFilepath = BibleOrgSysGlobals.BOS_DATAFILES_FOLDERPATH.joinpath( 'BibleBooksCodes.xml' )
standardPickleFilepath = BibleOrgSysGlobals.BOS_DERIVED_DATAFILES_FOLDERPATH.joinpath( 'BibleBooksCodes_Tables.pickle' )
try:
pickleIsNewer = os.stat(standardPickleFilepath).st_mtime > os.stat(standardXMLFileOrFilepath).st_mtime \
and os.stat(standardPickleFilepath).st_ctime > os.stat(standardXMLFileOrFilepath).st_ctime
except FileNotFoundError as e:
pickleIsNewer = 'xml' in str(e) # Couldn't find xml file -- these aren't included in PyPI package
# if os.access( standardPickleFilepath, os.R_OK ) \
# and os.stat(standardPickleFilepath).st_mtime > os.stat(standardXMLFileOrFilepath).st_mtime \
# and os.stat(standardPickleFilepath).st_ctime > os.stat(standardXMLFileOrFilepath).st_ctime: # There's a newer pickle file
if pickleIsNewer:
import pickle
vPrint( 'Info', debuggingThisModule, f"Loading pickle file {standardPickleFilepath}…" )
with open( standardPickleFilepath, 'rb') as pickleFile:
self.__DataDicts = pickle.load( pickleFile ) # The protocol version used is detected automatically, so we do not have to specify it
return self # So this command can be chained after the object creation
elif debuggingThisModule:
vPrint( 'Quiet', debuggingThisModule, "BibleBooksCodes pickle file can't be loaded!" )
standardJsonFilepath = BibleOrgSysGlobals.BOS_DERIVED_DATAFILES_FOLDERPATH.joinpath( 'BibleBooksCodes_Tables.json' )
if os.access( standardJsonFilepath, os.R_OK ) \
and os.stat(standardJsonFilepath).st_mtime > os.stat(standardXMLFileOrFilepath).st_mtime \
and os.stat(standardJsonFilepath).st_ctime > os.stat(standardXMLFileOrFilepath).st_ctime: # There's a newer pickle file
import json
vPrint( 'Info', debuggingThisModule, f"Loading json file {standardJsonFilepath}…" )
with open( standardJsonFilepath, 'rb') as JsonFile:
self.__DataDicts = json.load( JsonFile )
# NOTE: We have to convert str referenceNumber keys back to ints
self.__DataDicts['referenceNumberDict'] = { int(key):value \
for key,value in self.__DataDicts['referenceNumberDict'].items() }
return self # So this command can be chained after the object creation
elif debuggingThisModule:
vPrint( 'Quiet', debuggingThisModule, "BibleBooksCodes JSON file can't be loaded!" )
# else: # We have to load the XML (much slower)
from BibleBooksCodesConverter import BibleBooksCodesConverter
if XMLFileOrFilepath is not None:
logging.warning( _("Bible books codes are already loaded -- your given filepath of {!r} was ignored").format(XMLFileOrFilepath) )
bbcc = BibleBooksCodesConverter()
bbcc.loadAndValidate( XMLFileOrFilepath ) # Load the XML (if not done already)
self.__DataDicts = bbcc.importDataToPython() # Get the various dictionaries organised for quick lookup
return self # So this command can be chained after the object creation
# end of BibleBooksCodes.loadData
def __str__( self ) -> str:
"""
This method returns the string representation of a Bible book code.
@return: the name of a Bible object formatted as a string
@rtype: string
"""
indent = 2
result = "BibleBooksCodes object"
result += ('\n' if result else '') + ' '*indent + _("Number of entries = {:,}").format( len(self.__DataDicts['referenceAbbreviationDict']) )
return result
# end of BibleBooksCodes.__str__
def __len__( self ):
"""
Return the number of available codes.
"""
assert len(self.__DataDicts['referenceAbbreviationDict']) == len(self.__DataDicts['referenceNumberDict'])
return len(self.__DataDicts['referenceAbbreviationDict'])
# end of BibleBooksCodes.__len__
def __contains__( self, BBB:str ) -> bool:
""" Returns True or False. """
return BBB in self.__DataDicts['referenceAbbreviationDict']
def __iter__( self ) -> str:
""" Yields the next BBB. """
for BBB in self.__DataDicts['referenceAbbreviationDict']:
yield BBB
def isValidBBB( self, BBB:str ) -> bool:
"""
Returns True or False.
"""
return BBB in self.__DataDicts['referenceAbbreviationDict']
def getBBBFromReferenceNumber( self, referenceNumber ) -> str:
"""
Return the referenceAbbreviation for the given book number (referenceNumber).
This is probably only useful in the range 1..66 (GEN..REV).
(After that, it specifies our arbitrary order.)
"""
if isinstance( referenceNumber, str ): referenceNumber = int( referenceNumber ) # Convert str to int if necessary
if not 1 <= referenceNumber <= 999: raise ValueError
return self.__DataDicts['referenceNumberDict'][referenceNumber]['referenceAbbreviation']
# end of BibleBooksCodes.getBBBFromReferenceNumber
def getAllReferenceAbbreviations( self ) -> List[str]:
""" Returns a list of all possible BBB codes. """
return [BBB for BBB in self.__DataDicts['referenceAbbreviationDict']]
#return self.__DataDicts['referenceAbbreviationDict'].keys() # Why didn't this work?
def getReferenceNumber( self, BBB:str ) -> int:
""" Return the referenceNumber 1..999 for the given book code (referenceAbbreviation). """
return self.__DataDicts['referenceAbbreviationDict'][BBB]['referenceNumber']
def getSequenceList( self, myList=None ) -> List[str]:
"""
Return a list of BBB codes in a sequence that could be used for the print order if no further information is available.
If you supply a list of books, it puts your actual book codes into the default order.
Your list can simply be a list of BBB strings, or a list of tuples with the BBB as the first entry in the tuple.
"""
if myList is None: return self.__DataDicts['sequenceList']
# They must have given us their list of books
assert isinstance( myList, list )
if not myList: return [] # Return an empty list if that's what they gave
for something in myList: # Something can be a BBB string or a tuple
BBB = something if isinstance( something, str ) else something[0] # If it's a tuple, assume that the BBB is the first item in the tuple
assert self.isValidBBB( BBB ) # Check the supplied list
resultList = []
for BBB1 in self.__DataDicts['sequenceList']:
for something in myList:
BBB2 = something if isinstance( something, str ) else something[0] # If it's a tuple, assume that the BBB is the first item in the tuple
if BBB2 == BBB1:
resultList.append( something )
break
assert len(resultList) == len(myList)
#if resultList == myList: vPrint( 'Quiet', debuggingThisModule, "getSequenceList made no change to the order" )
#else: vPrint( 'Quiet', debuggingThisModule, "getSequenceList: {} produced {}".format( myList, resultList ) )
return resultList
# end of BibleBooksCodes.getSequenceList
def _getFullEntry( self, BBB:str ) -> dict:
"""
Return the full dictionary for the given book (code).
"""
return self.__DataDicts['referenceAbbreviationDict'][BBB]
def getCCELNumber( self, BBB:str ) -> int:
""" Return the CCEL number string for the given book code (referenceAbbreviation). """
return self.__DataDicts['referenceAbbreviationDict'][BBB]['CCELNumberString']
def getShortAbbreviation( self, BBB:str ) -> str:
""" Return the short abbreviation string | |
<filename>tests/labhub_test.py
import queue
import textwrap
from unittest.mock import Mock, MagicMock, create_autospec, PropertyMock, patch
import github3
import IGitt
from IGitt.GitHub.GitHubMergeRequest import GitHubMergeRequest
from IGitt.GitLab.GitLabMergeRequest import GitLabMergeRequest
from IGitt.GitHub.GitHubIssue import GitHubIssue
from errbot.backends.test import TestBot
from errbot.backends.base import Message
import plugins.labhub
from plugins.labhub import LabHub
from tests.corobo_test_case import CoroboTestCase
class TestLabHub(CoroboTestCase):
def setUp(self):
super().setUp((plugins.labhub.LabHub,))
plugins.labhub.github3 = create_autospec(github3)
self.mock_org = create_autospec(github3.orgs.Organization)
self.mock_gh = create_autospec(github3.GitHub)
self.mock_team = create_autospec(github3.orgs.Team)
self.mock_team.name = PropertyMock()
self.mock_team.name = 'mocked team'
self.teams = {
'coala newcomers': self.mock_team,
'coala developers': self.mock_team,
'coala maintainers': self.mock_team,
}
self.mock_repo = create_autospec(IGitt.GitHub.GitHub.GitHubRepository)
plugins.labhub.github3.login.return_value = self.mock_gh
self.mock_gh.organization.return_value = self.mock_org
self.mock_org.teams.return_value = [self.mock_team]
plugins.labhub.github3.organization.return_value = self.mock_org
# patching
plugins.labhub.GitHub = create_autospec(IGitt.GitHub.GitHub.GitHub)
plugins.labhub.GitLab = create_autospec(IGitt.GitLab.GitLab.GitLab)
plugins.labhub.GitHubToken = create_autospec(IGitt.GitHub.GitHubToken)
plugins.labhub.GitLabPrivateToken = create_autospec(
IGitt.GitLab.GitLabPrivateToken)
self.global_mocks = {
'REPOS': {
'repository': self.mock_repo,
'repository.github.io': self.mock_repo,
},
'_teams': self.teams,
}
self.labhub = self.load_plugin('LabHub', self.global_mocks)
def test_invite_cmd(self):
mock_team_newcomers = create_autospec(github3.orgs.Team)
mock_team_developers = create_autospec(github3.orgs.Team)
mock_team_maintainers = create_autospec(github3.orgs.Team)
self.teams['coala newcomers'] = mock_team_newcomers
self.teams['coala developers'] = mock_team_developers
self.teams['coala maintainers'] = mock_team_maintainers
mock_dict = {
'TEAMS': self.teams,
'is_room_member': MagicMock(),
}
self.inject_mocks('LabHub', mock_dict)
testbot = self
plugins.labhub.os.environ['GH_TOKEN'] = 'patched?'
self.assertEqual(self.labhub.TEAMS, self.teams)
mock_dict['is_room_member'].return_value = False
testbot.assertCommand('!invite meet to newcomers',
'@meet is not a member of this room.')
mock_dict['is_room_member'].return_value = True
# invite by maintainer
mock_team_newcomers.is_member.return_value = True
mock_team_developers.is_member.return_value = True
mock_team_maintainers.is_member.return_value = True
testbot.assertCommand(
'!invite meet to newcomers',
'To get started, please follow our [newcomers guide]')
testbot.assertCommand('!invite meet to developers',
'@meet, you are a part of developers')
testbot.assertCommand('!invite meet to maintainers',
'@meet you seem to be awesome!')
# invite by developer
mock_team_maintainers.is_member.return_value = False
mock_dict['is_room_member'].return_value = True
testbot.assertCommand(
'!invite meet to newcomers',
'To get started, please follow our [newcomers guide]')
testbot.assertCommand('!invite meet to developers',
':poop:')
testbot.assertCommand('!invite meet to maintainers',
':poop:')
# invite by newcomer
mock_team_developers.is_member.return_value = False
testbot.assertCommand('!invite meet to newcomers',
':poop')
testbot.assertCommand('!invite meet to developers',
':poop:')
testbot.assertCommand('!invite meet to maintainers',
':poop:')
# invalid team
testbot.assertCommand('!invite meet to something',
'select from one of the valid')
# invalid command
testbot.assertCommand('!invite meetto newcomers',
'Command "invite" / "invite meetto" not found.')
# not a member of org
mock_team_newcomers.is_member.return_value = False
mock_team_developers.is_member.return_value = False
mock_team_maintainers.is_member.return_value = False
testbot.assertCommand(
'!invite meet to newcomers',
'You need to be a member of this organization to use this command')
def test_is_room_member(self):
msg = create_autospec(Message)
msg.frm.room.occupants = PropertyMock()
msg.frm.room.occupants = ['batman', 'superman']
self.assertTrue(LabHub.is_room_member('batman', msg))
def test_hello_world_callback(self):
self.mock_team.is_member.return_value = False
testbot = self
testbot.assertCommand('hello, world', 'newcomer')
# Since the user won't be invited again, it'll timeout waiting for a
# response.
with self.assertRaises(queue.Empty):
testbot.assertCommand('helloworld', 'newcomer')
def test_create_issue_cmd(self):
plugins.labhub.GitHubToken.assert_called_with(None)
plugins.labhub.GitLabPrivateToken.assert_called_with(None)
# Start ignoring PycodestyleBear, LineLengthBear
# TODO
# Ignoring assertion to prevent build failure for time being
# Creating issue in private chat
# testbot_private.assertCommand('!new issue repository this is the title\nbo\ndy',
# 'You\'re not allowed')
# Stop ignoring
# Creating issue in public chat
self.mock_team.is_member.return_value = True
testbot_public = self
testbot_public.assertCommand(
textwrap.dedent('''\
!new issue repository this is the title
first line of body
second line of body
'''),
'Here you go')
self.global_mocks['REPOS']['repository'].create_issue \
.assert_called_once_with(
'this is the title',
textwrap.dedent('''\
first line of body
second line of body
Opened by @None at [text]()''')
)
testbot_public.assertCommand(
textwrap.dedent('''\
!new issue repository.github.io another title
body
'''),
'Here you go')
self.global_mocks['REPOS']['repository.github.io'].create_issue \
.assert_called_with(
'another title',
textwrap.dedent('''\
body
Opened by @None at [text]()''')
)
testbot_public.assertCommand(
'!new issue coala title',
'repository that does not exist')
# not a member of org
self.mock_team.is_member.return_value = False
testbot_public.assertCommand(
textwrap.dedent('''\
!new issue repository this is the title
body
'''),
'You need to be a member of this organization to use this command.'
)
def test_is_newcomer_issue(self):
mock_iss = create_autospec(IGitt.GitHub.GitHubIssue)
mock_iss.labels = PropertyMock()
mock_iss.labels = ('difficulty/newcomer',)
self.assertTrue(LabHub.is_newcomer_issue(mock_iss))
mock_iss.labels = ('difficulty/medium',)
self.assertFalse(LabHub.is_newcomer_issue(mock_iss))
def test_unassign_cmd(self):
self.inject_mocks('LabHub', {'REPOS': {'example': self.mock_repo}})
mock_iss = create_autospec(IGitt.GitHub.GitHubIssue)
self.mock_repo.get_issue.return_value = mock_iss
mock_iss.assignees = PropertyMock()
mock_iss.assignees = (None, )
mock_iss.unassign = MagicMock()
self.mock_team.is_member.return_value = True
testbot = self
testbot.assertCommand(
'!unassign https://github.com/coala/example/issues/999',
'you are unassigned now',
timeout=10000)
self.mock_repo.get_issue.assert_called_with(999)
mock_iss.unassign.assert_called_once_with(None)
mock_iss.assignees = ('meetmangukiya', )
testbot.assertCommand(
'!unassign https://github.com/coala/example/issues/999',
'not an assignee on the issue')
testbot.assertCommand(
'!unassign https://github.com/coala/example2/issues/999',
'Repository doesn\'t exist.')
testbot.assertCommand(
'!unassign https://gitlab.com/example/test/issues/999',
'Repository not owned by our org.')
# not a member of org
self.mock_team.is_member.return_value = False
testbot.assertCommand(
'!unassign https://github.com/coala/test/issues/23',
'You need to be a member of this organization '
'to use this command.')
def test_assign_cmd(self):
mock_issue = create_autospec(GitHubIssue)
self.mock_repo.get_issue.return_value = mock_issue
mock_dev_team = create_autospec(github3.orgs.Team)
mock_maint_team = create_autospec(github3.orgs.Team)
mock_dev_team.is_member.return_value = False
mock_maint_team.is_member.return_value = False
self.teams['coala developers'] = mock_dev_team
self.teams['coala maintainers'] = mock_maint_team
mock_dict = {
'REPOS': {'a': self.mock_repo},
'TEAMS': self.teams,
}
self.inject_mocks('LabHub', mock_dict)
testbot = self
cmd = '!assign https://github.com/{}/{}/issues/{}'
# no assignee, not newcomer
mock_issue.assignees = tuple()
self.mock_team.is_member.return_value = False
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'You need to be a member of this organization '
'to use this command.')
# no assignee, newcomer, initiatives/gci
self.mock_team.is_member.return_value = True
mock_maint_team.is_member.return_value = False
mock_dev_team.is_member.return_value = False
mock_issue.labels = 'initiatives/gci',
mock_issue.assignees = tuple()
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'You are not eligible to be assigned'
' to this issue')
testbot.pop_message()
# no assignee, developer, initiatives/gci
mock_maint_team.is_member.return_value = False
mock_dev_team.is_member.return_value = True
mock_issue.labels = 'initiatives/gci',
mock_issue.assignees = tuple()
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'You are not eligible to be assigned'
' to this issue')
testbot.pop_message()
mock_dev_team.is_member.return_value = False
# no assignee, newcomer, difficulty/low
mock_issue.labels = PropertyMock()
mock_issue.labels = ('difficulty/low', )
mock_issue.assignees = tuple()
self.mock_team.is_member.return_value = True
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'You\'ve been assigned to the issue')
# no assignee, newcomer, no labels
self.mock_team.is_member.return_value = True
mock_issue.labels = tuple()
mock_issue.assignees = tuple()
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'not eligible to be assigned to this issue')
testbot.pop_message()
# no assignee, newcomer, difficulty medium
mock_issue.labels = ('difficulty/medium', )
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'not eligible to be assigned to this issue')
testbot.pop_message()
# no assignee, newcomer, difficulty medium
mock_dict = {
'GH_ORG_NAME': 'not-coala',
'TEAMS': {
'not-coala newcomers': self.mock_team,
'not-coala developers': mock_dev_team,
'not-coala maintainers': mock_maint_team,
},
}
self.inject_mocks('LabHub', mock_dict)
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'assigned')
mock_dict['GH_ORG_NAME'] = 'coala'
mock_dict['TEAMS'] = self.teams
self.inject_mocks('LabHub', mock_dict)
# newcomer, developer, difficulty/medium
mock_dev_team.is_member.return_value = True
mock_maint_team.is_member.return_value = False
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'assigned')
# has assignee
mock_issue.assignees = ('somebody', )
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'already assigned to someone')
# has assignee same as user
mock_issue.assignees = (None, )
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'already assigned to you')
# non-existent repository
testbot.assertCommand(cmd.format('coala', 'c', '23'),
'Repository doesn\'t exist.')
# unknown org
testbot.assertCommand(cmd.format('coa', 'a', '23'),
'Repository not owned by our org.')
# no assignee, newcomer, difficulty/newcomer, second newcomer issue
mock_issue.assignees = tuple()
mock_dev_team.is_member.return_value = False
mock_issue.labels = ('difficulty/newcomer', )
with patch('plugins.labhub.GitHub') as mock_gh:
mock_gh.raw_search = Mock()
mock_gh.raw_search.return_value = ['mocked?']
testbot.assertCommand(cmd.format('coala', 'a', '23'),
'not eligible to be assigned to this issue')
def test_mark_cmd(self):
self.inject_mocks('LabHub', {'REPOS': {'test': self.mock_repo}})
testbot = self
mock_github_mr = create_autospec(GitHubMergeRequest)
mock_gitlab_mr = create_autospec(GitLabMergeRequest)
mock_github_mr.labels = PropertyMock()
mock_gitlab_mr.labels = PropertyMock()
mock_github_mr.author = 'johndoe'
mock_gitlab_mr.author = 'johndoe'
cmd_github = '!mark {} https://github.com/{}/{}/pull/{}'
cmd_gitlab = '!mark {} https://gitlab.com/{}/{}/merge_requests/{}'
self.mock_repo.get_mr.return_value = mock_github_mr
self.mock_team.is_member.return_value = True
# Non-eistent repo
testbot.assertCommand(cmd_github.format('wip', 'a', 'b', '23'),
'Repository doesn\'t exist.')
testbot.assertCommand(
'!mark wip https://gitlab.com/a/b/merge_requests/2',
'Repository doesn\'t exist.')
mock_github_mr.web_url = 'https://github.com/coala/test/pull/23'
mock_gitlab_mr.web_url = (
'https://gitlab.com/coala/test/merge_requests/23')
# mark wip
mock_github_mr.labels = ['process/pending review']
mock_gitlab_mr.labels = ['process/pending review']
testbot.assertCommand(cmd_github.format('wip', 'coala', 'test', '23'),
'marked work in progress')
testbot.assertCommand(cmd_github.format('wip', 'coala', 'test', '23'),
'@johndoe, please check your pull request')
testbot.assertCommand(cmd_github.format('wip', 'coala', 'test', '23'),
'https://github.com/coala/test/pull/23')
self.mock_repo.get_mr.return_value = mock_gitlab_mr
testbot.assertCommand(cmd_gitlab.format('wip', 'coala', 'test', '23'),
'@johndoe, please check your pull request')
testbot.assertCommand(
cmd_gitlab.format('wip', 'coala', 'test', '23'),
'https://gitlab.com/coala/test/merge_requests/23')
self.mock_repo.get_mr.return_value = mock_github_mr
# mark pending
mock_github_mr.labels = ['process/wip']
mock_gitlab_mr.labels = ['process/wip']
testbot.assertCommand(
cmd_github.format('pending', 'coala', 'test', '23'),
'marked pending review')
testbot.assertCommand(
cmd_github.format('pending-review', 'coala', 'test', '23'),
'marked pending review')
testbot.assertCommand(
cmd_github.format('pending review', 'coala', 'test', '23'),
'marked pending review')
# not a member of org
self.mock_team.is_member.return_value = False
testbot.assertCommand(
cmd_github.format('pending review', 'coala', 'a', '23'),
'You need to be a member of this organization to use this command')
def test_alive(self):
with patch('plugins.labhub.time.sleep') as mock_sleep:
gh_repos_mock = {
'coala':
create_autospec(IGitt.GitHub.GitHub.GitHubRepository),
'coala-bears':
create_autospec(IGitt.GitHub.GitHub.GitHubRepository),
'coala-utils':
create_autospec(IGitt.GitHub.GitHub.GitHubRepository),
}
# for the branch where program sleeps
gh_repos_mock.update({str(i):
create_autospec(
IGitt.GitHub.GitHub.GitHubRepository)
for i in range(30)})
gl_repos_mock = {
'test': create_autospec(IGitt.GitLab.GitLab.GitLabRepository),
}
self.mock_team.is_member.return_value = True
mock_dict = {
'gh_repos': gh_repos_mock,
'gl_repos': gl_repos_mock,
}
self.inject_mocks('LabHub', mock_dict)
testbot = self
mock_dict['gh_repos']['coala'].search_mrs.return_value = [1, 2]
mock_dict['gh_repos']['coala-bears'].search_mrs.return_value = []
mock_dict['gh_repos']['coala-utils'].search_mrs.return_value = []
testbot.assertCommand('!pr stats 10hours',
'2 PRs opened in last 10 hours\n'
'The community is alive', timeout=100)
| |
stack.
See also: AWS API Documentation
:example: response = client.describe_layers(
StackId='string',
LayerIds=[
'string',
]
)
:type StackId: string
:param StackId: The stack ID.
:type LayerIds: list
:param LayerIds: An array of layer IDs that specify the layers to be described. If you omit this parameter, DescribeLayers returns a description of every layer in the specified stack.
(string) --
:rtype: dict
:return: {
'Layers': [
{
'StackId': 'string',
'LayerId': 'string',
'Type': 'aws-flow-ruby'|'ecs-cluster'|'java-app'|'lb'|'web'|'php-app'|'rails-app'|'nodejs-app'|'memcached'|'db-master'|'monitoring-master'|'custom',
'Name': 'string',
'Shortname': 'string',
'Attributes': {
'string': 'string'
},
'CloudWatchLogsConfiguration': {
'Enabled': True|False,
'LogStreams': [
{
'LogGroupName': 'string',
'DatetimeFormat': 'string',
'TimeZone': 'LOCAL'|'UTC',
'File': 'string',
'FileFingerprintLines': 'string',
'MultiLineStartPattern': 'string',
'InitialPosition': 'start_of_file'|'end_of_file',
'Encoding': 'ascii'|'big5'|'big5hkscs'|'cp037'|'cp424'|'cp437'|'cp500'|'cp720'|'cp737'|'cp775'|'cp850'|'cp852'|'cp855'|'cp856'|'cp857'|'cp858'|'cp860'|'cp861'|'cp862'|'cp863'|'cp864'|'cp865'|'cp866'|'cp869'|'cp874'|'cp875'|'cp932'|'cp949'|'cp950'|'cp1006'|'cp1026'|'cp1140'|'cp1250'|'cp1251'|'cp1252'|'cp1253'|'cp1254'|'cp1255'|'cp1256'|'cp1257'|'cp1258'|'euc_jp'|'euc_jis_2004'|'euc_jisx0213'|'euc_kr'|'gb2312'|'gbk'|'gb18030'|'hz'|'iso2022_jp'|'iso2022_jp_1'|'iso2022_jp_2'|'iso2022_jp_2004'|'iso2022_jp_3'|'iso2022_jp_ext'|'iso2022_kr'|'latin_1'|'iso8859_2'|'iso8859_3'|'iso8859_4'|'iso8859_5'|'iso8859_6'|'iso8859_7'|'iso8859_8'|'iso8859_9'|'iso8859_10'|'iso8859_13'|'iso8859_14'|'iso8859_15'|'iso8859_16'|'johab'|'koi8_r'|'koi8_u'|'mac_cyrillic'|'mac_greek'|'mac_iceland'|'mac_latin2'|'mac_roman'|'mac_turkish'|'ptcp154'|'shift_jis'|'shift_jis_2004'|'shift_jisx0213'|'utf_32'|'utf_32_be'|'utf_32_le'|'utf_16'|'utf_16_be'|'utf_16_le'|'utf_7'|'utf_8'|'utf_8_sig',
'BufferDuration': 123,
'BatchCount': 123,
'BatchSize': 123
},
]
},
'CustomInstanceProfileArn': 'string',
'CustomJson': 'string',
'CustomSecurityGroupIds': [
'string',
],
'DefaultSecurityGroupNames': [
'string',
],
'Packages': [
'string',
],
'VolumeConfigurations': [
{
'MountPoint': 'string',
'RaidLevel': 123,
'NumberOfDisks': 123,
'Size': 123,
'VolumeType': 'string',
'Iops': 123
},
],
'EnableAutoHealing': True|False,
'AutoAssignElasticIps': True|False,
'AutoAssignPublicIps': True|False,
'DefaultRecipes': {
'Setup': [
'string',
],
'Configure': [
'string',
],
'Deploy': [
'string',
],
'Undeploy': [
'string',
],
'Shutdown': [
'string',
]
},
'CustomRecipes': {
'Setup': [
'string',
],
'Configure': [
'string',
],
'Deploy': [
'string',
],
'Undeploy': [
'string',
],
'Shutdown': [
'string',
]
},
'CreatedAt': 'string',
'InstallUpdatesOnBoot': True|False,
'UseEbsOptimizedInstances': True|False,
'LifecycleEventConfiguration': {
'Shutdown': {
'ExecutionTimeout': 123,
'DelayUntilElbConnectionsDrained': True|False
}
}
},
]
}
:returns:
(string) --
(string) --
"""
pass
def describe_load_based_auto_scaling(LayerIds=None):
"""
Describes load-based auto scaling configurations for specified layers.
See also: AWS API Documentation
:example: response = client.describe_load_based_auto_scaling(
LayerIds=[
'string',
]
)
:type LayerIds: list
:param LayerIds: [REQUIRED]
An array of layer IDs.
(string) --
:rtype: dict
:return: {
'LoadBasedAutoScalingConfigurations': [
{
'LayerId': 'string',
'Enable': True|False,
'UpScaling': {
'InstanceCount': 123,
'ThresholdsWaitTime': 123,
'IgnoreMetricsTime': 123,
'CpuThreshold': 123.0,
'MemoryThreshold': 123.0,
'LoadThreshold': 123.0,
'Alarms': [
'string',
]
},
'DownScaling': {
'InstanceCount': 123,
'ThresholdsWaitTime': 123,
'IgnoreMetricsTime': 123,
'CpuThreshold': 123.0,
'MemoryThreshold': 123.0,
'LoadThreshold': 123.0,
'Alarms': [
'string',
]
}
},
]
}
:returns:
(string) --
"""
pass
def describe_my_user_profile():
"""
Describes a user's SSH information.
See also: AWS API Documentation
:example: response = client.describe_my_user_profile()
:rtype: dict
:return: {
'UserProfile': {
'IamUserArn': 'string',
'Name': 'string',
'SshUsername': 'string',
'SshPublicKey': 'string'
}
}
"""
pass
def describe_permissions(IamUserArn=None, StackId=None):
"""
Describes the permissions for a specified stack.
See also: AWS API Documentation
:example: response = client.describe_permissions(
IamUserArn='string',
StackId='string'
)
:type IamUserArn: string
:param IamUserArn: The user's IAM ARN. This can also be a federated user's ARN. For more information about IAM ARNs, see Using Identifiers .
:type StackId: string
:param StackId: The stack ID.
:rtype: dict
:return: {
'Permissions': [
{
'StackId': 'string',
'IamUserArn': 'string',
'AllowSsh': True|False,
'AllowSudo': True|False,
'Level': 'string'
},
]
}
:returns:
If the request object contains only a stack ID, the array contains a Permission object with permissions for each of the stack IAM ARNs.
If the request object contains only an IAM ARN, the array contains a Permission object with permissions for each of the user's stack IDs.
If the request contains a stack ID and an IAM ARN, the array contains a single Permission object with permissions for the specified stack and IAM ARN.
"""
pass
def describe_raid_arrays(InstanceId=None, StackId=None, RaidArrayIds=None):
"""
Describe an instance's RAID arrays.
See also: AWS API Documentation
:example: response = client.describe_raid_arrays(
InstanceId='string',
StackId='string',
RaidArrayIds=[
'string',
]
)
:type InstanceId: string
:param InstanceId: The instance ID. If you use this parameter, DescribeRaidArrays returns descriptions of the RAID arrays associated with the specified instance.
:type StackId: string
:param StackId: The stack ID.
:type RaidArrayIds: list
:param RaidArrayIds: An array of RAID array IDs. If you use this parameter, DescribeRaidArrays returns descriptions of the specified arrays. Otherwise, it returns a description of every array.
(string) --
:rtype: dict
:return: {
'RaidArrays': [
{
'RaidArrayId': 'string',
'InstanceId': 'string',
'Name': 'string',
'RaidLevel': 123,
'NumberOfDisks': 123,
'Size': 123,
'Device': 'string',
'MountPoint': 'string',
'AvailabilityZone': 'string',
'CreatedAt': 'string',
'StackId': 'string',
'VolumeType': 'string',
'Iops': 123
},
]
}
"""
pass
def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):
"""
Describes Amazon RDS instances.
This call accepts only one resource-identifying parameter.
See also: AWS API Documentation
:example: response = client.describe_rds_db_instances(
StackId='string',
RdsDbInstanceArns=[
'string',
]
)
:type StackId: string
:param StackId: [REQUIRED]
The stack ID that the instances are registered with. The operation returns descriptions of all registered Amazon RDS instances.
:type RdsDbInstanceArns: list
:param RdsDbInstanceArns: An array containing the ARNs of the instances to be described.
(string) --
:rtype: dict
:return: {
'RdsDbInstances': [
{
'RdsDbInstanceArn': 'string',
'DbInstanceIdentifier': 'string',
'DbUser': 'string',
'DbPassword': '<PASSWORD>',
'Region': 'string',
'Address': 'string',
'Engine': 'string',
'StackId': 'string',
'MissingOnRds': True|False
},
]
}
"""
pass
def describe_service_errors(StackId=None, InstanceId=None, ServiceErrorIds=None):
"""
Describes AWS OpsWorks Stacks service errors.
This call accepts only one resource-identifying parameter.
See also: AWS API Documentation
:example: response = client.describe_service_errors(
StackId='string',
InstanceId='string',
ServiceErrorIds=[
'string',
]
)
:type StackId: string
:param StackId: The stack ID. If you use this parameter, DescribeServiceErrors returns descriptions of the errors associated with the specified stack.
:type InstanceId: string
:param InstanceId: The instance ID. If you use this parameter, DescribeServiceErrors returns descriptions of the errors associated with the specified instance.
:type ServiceErrorIds: list
:param ServiceErrorIds: An array of service error IDs. If you use this parameter, DescribeServiceErrors returns descriptions of the specified errors. Otherwise, it returns a description of every error.
(string) --
:rtype: dict
:return: {
'ServiceErrors': [
{
'ServiceErrorId': 'string',
'StackId': 'string',
'InstanceId': 'string',
'Type': 'string',
'Message': 'string',
'CreatedAt': 'string'
},
]
}
"""
pass
def describe_stack_provisioning_parameters(StackId=None):
"""
Requests a description of a stack's provisioning parameters.
See also: AWS API Documentation
:example: response = client.describe_stack_provisioning_parameters(
StackId='string'
)
:type StackId: string
:param StackId: [REQUIRED]
The stack ID
:rtype: dict
:return: {
'AgentInstallerUrl': 'string',
'Parameters': {
'string': 'string'
}
}
"""
pass
def describe_stack_summary(StackId=None):
"""
Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online .
See also: AWS API Documentation
:example: response = client.describe_stack_summary(
StackId='string'
)
:type StackId: string
:param StackId: [REQUIRED]
The stack ID.
:rtype: dict
:return: {
'StackSummary': {
'StackId': 'string',
'Name': 'string',
'Arn': 'string',
'LayersCount': 123,
'AppsCount': 123,
'InstancesCount': {
'Assigning': 123,
'Booting': 123,
'ConnectionLost': 123,
'Deregistering': 123,
'Online': 123,
'Pending': 123,
'Rebooting': 123,
'Registered': 123,
'Registering': 123,
'Requested': 123,
'RunningSetup': 123,
'SetupFailed': 123,
'ShuttingDown': 123,
'StartFailed': 123,
'Stopped': 123,
'Stopping': 123,
'Terminated': 123,
'Terminating': 123,
'Unassigning': 123
}
}
}
"""
pass
def describe_stacks(StackIds=None):
"""
Requests a description of one or more stacks.
See also: AWS API Documentation
:example: response = client.describe_stacks(
StackIds=[
'string',
]
)
:type StackIds: list
:param StackIds: An array of stack IDs that specify the stacks to be described. If you omit this parameter, DescribeStacks returns a description of every stack.
(string) --
:rtype: dict
:return: {
'Stacks': [
{
'StackId': 'string',
'Name': 'string',
'Arn': 'string',
'Region': 'string',
'VpcId': 'string',
'Attributes': {
'string': 'string'
},
'ServiceRoleArn': 'string',
'DefaultInstanceProfileArn': 'string',
'DefaultOs': 'string',
'HostnameTheme': 'string',
'DefaultAvailabilityZone': 'string',
'DefaultSubnetId': 'string',
'CustomJson': 'string',
'ConfigurationManager': {
'Name': 'string',
'Version': 'string'
},
'ChefConfiguration': {
'ManageBerkshelf': True|False,
'BerkshelfVersion': 'string'
},
'UseCustomCookbooks': True|False,
'UseOpsworksSecurityGroups': True|False,
'CustomCookbooksSource': {
'Type': 'git'|'svn'|'archive'|'s3',
'Url': 'string',
'Username': 'string',
'Password': '<PASSWORD>',
'SshKey': 'string',
'Revision': 'string'
},
'DefaultSshKeyName': 'string',
'CreatedAt': 'string',
'DefaultRootDeviceType': 'ebs'|'instance-store',
'AgentVersion': 'string'
},
]
}
:returns:
(string) --
(string) --
"""
pass
def describe_time_based_auto_scaling(InstanceIds=None):
"""
Describes time-based auto scaling configurations for specified instances.
See also: AWS API Documentation
:example: response = client.describe_time_based_auto_scaling(
InstanceIds=[
'string',
]
)
:type InstanceIds: list
:param InstanceIds: [REQUIRED]
An array | |
<filename>tests.py<gh_stars>0
__author__ = "<NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
# MODULE EXPOSURE
__all__ = [
"assert_eq",
"assert_identical",
"assert_is",
"assert_not_identical",
"none",
]
# IMPORTS
from pyrat.base import identical, vector
# FUNCTIONS
def assert_eq(a, b):
test = (a == b)
if isinstance(test, vector):
raise ValueError("assert_eq is incorrect for vectors")
assert test, "%s != %s" % tuple(map(repr, (a, b)))
def assert_is(a, b):
test = (a is b)
assert test, "%s is not %s" % tuple(map(repr, (a, b)))
def assert_error(f, err):
try:
f()
except err:
pass
def assert_identical(a, b):
test = identical(a, b)
assert test, "%s != %s" % tuple(map(repr, (a, b)))
def assert_not_identical(a, b):
test = not identical(a, b)
assert test, "%s == %s" % tuple(map(repr, (a, b)))
def none(x):
return not any(x)
# SCRIPT
if __name__ == "__main__":
import operator
from pyrat.base import *
from pyrat.closure import *
from pyrat.stats import *
# VARIABLES
t12 = (1, 2)
t123 = (1, 2, 3)
v12 = c(1, 2)
v123 = c(1, 2, 3)
v213 = c(2, 1, 3)
ptrn = r"[Aa]"
vstr = c("python", "a rat", "pirate")
# VECTOR TESTS
# empty calls to identical
assert_error(lambda: identical(), TypeError)
assert_error(lambda: identical(None), TypeError)
assert identical(None, None)
# identical ensures that tuples are truly the same
assert_identical(t123, t123)
assert_not_identical(t123, t12)
# identical ensures that vectors are truly the same
assert_identical(v123, v123)
assert_not_identical(v123, v12)
# vector is a class...
assert_identical(c(1, 2, 3), v123)
assert_identical(vector([1, 2, 3]), v123)
assert_identical(vector((1, 2, 3)), v123)
# ...with vectorized operators...
# math operators
assert_identical(-v123, c(-1, -2, -3))
assert_identical(abs(-v123), v123)
assert_identical(v123 + v123, c(2, 4, 6))
assert_identical(v123 * v123, c(1, 4, 9))
assert_identical(v123 - v123, rep(0, 3))
assert_identical(v123 / v123, rep(1.0, 3))
assert_identical(v123 // v123, rep(1, 3))
assert_identical(v123 ** v123, c(1, 4, 27))
# equivalence operators
assert all(v123 == v123)
assert all(v123 <= v123)
assert all(v123 >= v123)
assert none(v123 != v123)
assert none(v123 < v123)
assert none(v123 > v123)
# __invert__ has a new usage (like ! from R)
assert none(~(v123 == v123))
assert all(~(v123 != v123))
# __eq__ recycling weirdness (gives warning in R)
assert_identical(v123 == v12, c(True, True, False))
assert_identical(c(1, 2, 1) == v12, c(True, True, True))
# __getitem__
assert_identical(v123[0], 1)
assert_identical(v123[0, 1], v12)
assert_identical(v123[(0, 1)], v12)
assert_identical(v123[[0, 1]], v12)
assert_identical(v123[c(0, 1)], v12)
assert_identical(v123[1, 0, 1], c(2, 1, 2))
assert_identical(v123[1, NA, 1], c(2, NA, 2))
assert_identical(v123[:2], v12)
# __getitem__ with casting
assert_identical(v123.astype(str)[0], "1")
assert_identical(v123.astype(str)[c(0, 1)], v12.astype(str))
# no named vectors! :-(
# tuple is immutable, must override __new__,
# but cannot set attributes on tuples anyways
# ...some tuple methods...
assert_eq(v123.index(1), 0)
assert_eq(v123.count(1), 1)
# ...and some cool methods
assert_identical((v123 + 0.1).round(), v123)
assert_identical(v123.reduce(operator.add), sum(v123))
assert_identical(v123.accumulate(operator.add), c(1, 3, 6))
assert_identical(v123.filter(lambda x: x < 3), v12)
assert_identical(v123.filter(lambda x: x < 3, invert=True), c(3))
assert_eq(v123.tapply(v123 % 2 == 0, c), {False: c(1, 3), True: c(2)})
assert_identical(v123.astype(str), c("1", "2", "3"))
# transform works on the whole vector
assert_identical(v213.transform(sort), v123)
assert_identical(v213.sort(), v123)
# different kinds of apply methods,
# which can take multiple inputs
assert_identical(v123.apply_map(int.__mul__, v123), v123 * v123)
assert all(v123.astype(str).apply(str.isdigit))
assert all(v123.astype(str).apply_map(str.isdigit))
assert all(v123.astype(str).thread(str.isdigit))
assert all(v123.astype(str).thread_map(str.isdigit))
# assert all(v123.astype(str).proc_map(str.isdigit))
# apply partial function
assert_identical(
vstr.apply(str.split, " "),
vector((["python"], ["a", "rat"], ["pirate"]))
)
# a ridiculuous example
tmp = (
v123
.astype(str)
.apply_map(str.replace, v123.astype(str), v213.astype(str))
.astype(int)
)
assert_identical(tmp, v213)
# pipe method can take multiple functions,
# to continuously transform the results
assert_identical(
v123.pipe(str, ord, part(int.__add__, 1), chr, int),
c(2, 3, 4)
)
# BASE TESTS
# na_safe makes a function safe to use with NAs
assert_eq(na_safe(round)(NA), NA)
# c(ombine) will make new vectors,
# or flatten vectors for concatenation
assert_identical(c(), vector())
assert_identical(c(1, c(2, 3)), c(1, 2, 3))
# isiter: is the object iterable?
assert all(map(isiter, (dict(), list(), set(), str(), tuple())))
assert none(map(isiter, (bool(), complex(), float(), int())))
# isnonstriter: is it a non-str iterable?
assert all(map(isnonstriter, (dict(), list(), set(), tuple())))
assert none(map(isnonstriter, (bool(), complex(), float(), int(), str())))
# is_na is both singular and multiple,
# NA is not None
assert_error(is_na, TypeError)
assert_eq(is_na(NA), True)
assert_eq(is_na(None), False)
assert_identical(is_na(c(NA, None)), c(True, False))
# any_na is both singular and multiple,
assert_error(is_na, TypeError)
assert_eq(any_na(NA), True)
assert_eq(any_na(None), False)
assert_eq(any_na(c(NA, None)), True)
# is_none is both singular and multiple,
# None is not NA,
# this is a little different than is.null in R
assert_error(is_none, TypeError)
assert_eq(is_none(None), True)
assert_eq(is_none(NA), False)
assert_identical(is_none(c(None, NA)), c(True, False))
# rep can repeat a vector
assert_is(rep(), None)
assert_identical(rep(0, 2), c(0, 0))
assert_identical(rep("hi", 2), c("hi", "hi"))
assert_identical(rep(v12), v12)
assert_identical(rep(v12, times=2), c(v12, v12))
assert_identical(rep(v12, times=2), rep(v12, 4, 4))
assert_identical(rep(v12, each=2), c(1, 1, 2, 2))
assert_identical(rep(v12, each=3, length_out=4), c(1, 1, 1, 2))
# seq is used for making ranges,
# it is inclusive, unlike Python's range
vec = c(0.00, 0.25, 0.50, 0.75, 1.00)
assert_identical(seq(3), v123)
assert_identical(seq(0), c(1, 0))
assert_identical(seq(0, 9), c(range(10)))
assert_identical(seq(0, 1, 0.25), vec)
assert_identical(seq(1, 0, 0.25), vec[::-1])
assert_identical(seq(0, 1, length_out=5), vec)
assert_identical(seq(3, 1), c(3, 2, 1))
# sort returns a sorted vector
assert_error(sort, TypeError)
assert_identical(sort(v213), v123)
# order returns the sorted indices based on the data
assert_is(order(), None)
assert_identical(order(v213), c(1, 0, 2))
assert_identical(v213[order(v213)], sort(v213))
# paste is useful for joining str vectors...
assert_identical(paste(), vector())
assert_identical(paste(0), c("0"))
assert_identical(paste(0, 1), c("0 1"))
assert_identical(paste(c(0, 1)), c("0", "1"))
assert_identical(paste(0, c(1, 1)), c("0 1", "0 1"))
# ...use the collapse argument to return a str
assert_identical(paste(collapse="."), "")
assert_identical(paste(0, 1, collapse="."), "0 1")
assert_identical(paste(c(0, 1), collapse="."), "0.1")
assert_identical(paste(0, c(1, 1), collapse="."), "0 1.0 1")
# ifelse works along a vector
assert_error(ifelse, TypeError)
assert_identical(v123 > 1, c(False, True, True))
assert_identical(ifelse(v123 > 1, v123, NA), c(NA, 2, 3))
assert_identical(ifelse(v123 > 1, 1, 0), c(0, 1, 1))
# match up a vector with an index,
# NA if not found in index
assert_error(match, TypeError)
assert_error(lambda: match(v123), TypeError)
assert_identical(match(v123, 0), rep(NA, 3))
assert_identical(match(v123, 0, "hi"), rep("hi", 3))
assert_identical(match(v123, 1), c(0, NA, NA))
assert_identical(match(1, seq(3, 1)), c(2))
assert_identical(match(v12, v123), c(0, 1))
assert_identical(match(v123, v12), c(0, 1, NA))
assert_identical(match(c(1, 2, NA), c(1, NA)), c(0, NA, 1))
# which gives the indices of True values
assert_error(which, TypeError)
assert_identical(which(True), c(0))
assert_identical(which(v123 > 1), c(1, 2))
assert_identical(which(c(True, NA, False, True)), c(0, 3))
# unique dedupes a vector
assert_error(which, TypeError)
assert_identical(unique(rep(v213, times=2)), v213)
assert_identical(unique(rep(v213, each=2)), v213)
assert_identical(unique(c(1, NA, 1, NA, 2)), c(1, NA, 2))
# grepl
assert_error(grepl, TypeError)
assert_identical(grepl(ptrn, vstr), c(False, True, True))
# grep
assert_error(grep, TypeError)
assert_identical(grep(ptrn, vstr), c(1, 2))
# gsub
assert_error(gsub, TypeError)
assert_identical(gsub(ptrn, "", vstr), c("python", " rt", "pirte"))
assert_identical(gsub(ptrn, "", vstr, 1), c("python", " rat", "pirte"))
# gextr
assert_error(gextr, TypeError)
assert_identical(gextr(ptrn, vstr), c(NA, "a", "a"))
# gextrall
assert_error(gextrall, TypeError)
assert_identical(gextrall(ptrn, vstr), vector((c(), rep("a", 2), c("a"))))
# sqrt is a vectorized sqrt function
assert_error(sqrt, TypeError)
assert_eq(sqrt(4), 2)
assert_identical(sqrt(c(1, NA, 4)).round(), c(1, NA, 2))
assert_identical(sqrt(v123).round(6), (v123 ** (1 / 2)).round(6))
# mean (you know this one)
assert_error(mean, TypeError)
assert_identical(mean(c()), NA)
assert_identical(mean(c(1, 2)), 1.5)
assert_identical(mean(c(1, 2, NA)), NA)
assert_identical(mean(c(1, 2, NA), na_rm=True), 1.5)
# rmin
assert_eq(rmin(), Inf)
assert_eq(rmin(1, c(NA, 2)), NA)
assert_eq(rmin(1, NA, 2, na_rm=True), 1)
# rmax
assert_eq(rmax(), -Inf)
assert_eq(rmax(1, c(NA, 2)), NA)
assert_eq(rmax(1, NA, 2, na_rm=True), 2)
# exp
assert_error(exp, TypeError)
assert_eq(exp(0), 1)
assert_identical(exp(c(0, NA)).astype(int), c(1, NA))
# log
assert_error(log, TypeError)
assert_eq(log(1), 0)
assert_identical(log(c(1, NA)).astype(int), c(0, NA))
# sin
assert_error(sin, TypeError)
assert_eq(sin(0), 0)
assert_identical(sin(c(0, NA)), c(0.0, NA))
# tan
assert_error(tan, TypeError)
assert_eq(tan(0), 0)
assert_identical(tan(c(0, NA)), c(0.0, NA))
# cos
assert_error(cos, TypeError)
assert_eq(cos(0), 1)
assert_identical(cos(c(0, NA)), c(1.0, NA))
# acos
assert_error(acos, TypeError)
assert_eq(acos(1), 0)
assert_identical(acos(c(1, NA)), c(0.0, NA))
# asin
assert_error(asin, TypeError)
assert_eq(asin(0), 0)
assert_identical(asin(c(0, NA)), c(0.0, NA))
# atan
assert_error(atan, TypeError)
assert_eq(atan(0), 0)
assert_identical(atan(c(0, NA)), c(0.0, NA))
# STATS TESTS
# na_omit
assert_error(na_omit, TypeError)
assert_identical(na_omit(0), c(0))
assert_identical(na_omit(NA), c())
assert_identical(na_omit(c(1, 2, NA)), v12)
# median
assert_error(median, TypeError)
assert_identical(median(0), 0)
assert_identical(median(NA), NA)
assert_identical(median(v12), 1.5)
assert_identical(median(v123), 2)
assert_identical(median(c(1, 2, NA)), NA)
assert_identical(median(c(1, 2, NA), na_rm=True), 1.5)
# ss, sum of squares
assert_error(ss, TypeError)
assert_eq(ss(0), 0)
assert_eq(ss(c(0)), 0)
assert_eq(ss(v123), 14)
assert_eq(ss(c(1, 2, NA)), NA)
assert_eq(ss(c(1, 2, NA), na_rm=True), 5)
# dev(iance)
assert_error(dev, TypeError)
assert_identical(dev(v123).astype(int), c(-1, 0, 1))
assert_identical(dev(v123, median).astype(int), c(-1, 0, 1))
assert_identical(dev(c(1, 2, 3, NA)), NA)
assert_identical(dev(c(1, 2, 3, NA), na_rm=True), c(-1.0, 0.0, 1.0))
# var(iance)
assert_error(var, TypeError)
assert_eq(var(0), NA)
assert_eq(var(c(0)), NA)
assert_eq(var(v123), 1)
| |
servers
elv_ctx = context.elevated()
if port_id:
relay_servers = self._get_port_relay_servers(elv_ctx, port_id)
else:
relay_servers = []
filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router_id]}
ports = self.get_ports(elv_ctx, filters=filters)
for port in ports:
port_relay_servers = self._get_port_relay_servers(
elv_ctx, port['id'], network_id=port['network_id'])
if port_relay_servers:
relay_servers.extend(port_relay_servers)
# Add rules to allow dhcp traffic relay servers
if relay_servers:
# if it is a single port, the source/dest is this logical switch
if port_id:
nsx_ls_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
context.session, port_id)
port_target = [{'target_type': 'LogicalSwitch',
'target_id': nsx_ls_id}]
else:
port_target = None
# translate the relay server ips to the firewall format
relay_target = []
if self.fwaas_callbacks:
relay_target = (self.fwaas_callbacks.fwaas_driver.
translate_addresses_to_target(set(relay_servers),
self.plugin_type()))
dhcp_services = self._get_port_relay_services()
# ingress rule
extra_rules.append({
'display_name': "DHCP Relay ingress traffic",
'action': nsxlib_consts.FW_ACTION_ALLOW,
'sources': relay_target,
'destinations': port_target,
'services': dhcp_services,
'direction': 'IN'})
# egress rule
extra_rules.append({
'display_name': "DHCP Relay egress traffic",
'action': nsxlib_consts.FW_ACTION_ALLOW,
'destinations': relay_target,
'sources': port_target,
'services': dhcp_services,
'direction': 'OUT'})
# VPN rules:
vpn_plugin = directory.get_plugin(plugin_const.VPN)
if vpn_plugin:
vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider]
vpn_rules = (
vpn_driver._generate_ipsecvpn_firewall_rules(
self.plugin_type(), context, router_id=router_id))
if vpn_rules:
extra_rules.extend(vpn_rules)
return extra_rules
def _get_ports_and_address_groups(self, context, router_id, network_id,
exclude_sub_ids=None):
exclude_sub_ids = [] if not exclude_sub_ids else exclude_sub_ids
address_groups = []
network_ports = self._get_router_interface_ports_by_network(
context, router_id, network_id)
ports = []
for port in network_ports:
if port['fixed_ips']:
add_port = False
for fip in port['fixed_ips']:
if fip['subnet_id'] not in exclude_sub_ids:
add_port = True
if add_port:
ports.append(port)
for port in ports:
for fip in port['fixed_ips']:
address_group = {}
gateway_ip = fip['ip_address']
subnet = self.get_subnet(context, fip['subnet_id'])
prefixlen = str(netaddr.IPNetwork(subnet['cidr']).prefixlen)
address_group['ip_addresses'] = [gateway_ip]
address_group['prefix_length'] = prefixlen
address_groups.append(address_group)
return (ports, address_groups)
@nsx_plugin_common.api_replay_mode_wrapper
def add_router_interface(self, context, router_id, interface_info):
# In case on dual stack, neutron creates a separate interface per
# IP version
subnet = self._get_interface_subnet(context, interface_info)
network_id = self._get_interface_network_id(context, interface_info,
subnet=subnet)
extern_net = self._network_is_external(context, network_id)
overlay_net = self._is_overlay_network(context, network_id)
router_db = self._get_router(context, router_id)
gw_network_id = (router_db.gw_port.network_id if router_db.gw_port
else None)
with locking.LockManager.get_lock(str(network_id)):
# disallow more than one subnets belong to same network being
# attached to routers
self._validate_multiple_subnets_routers(
context, router_id, network_id, subnet)
# A router interface cannot be an external network
if extern_net:
msg = _("An external network cannot be attached as "
"an interface to a router")
raise n_exc.InvalidInput(error_message=msg)
# Non overlay networks should be configured with a centralized
# router, which is allowed only if GW network is attached
if not overlay_net and not gw_network_id:
msg = _("A router attached to a VLAN backed network "
"must have an external network assigned")
raise n_exc.InvalidInput(error_message=msg)
# Interface subnets cannot overlap with the GW external subnet
self._validate_gw_overlap_interfaces(context, gw_network_id,
[network_id])
# Update the interface of the neutron router
info = super(NsxV3Plugin, self).add_router_interface(
context, router_id, interface_info)
try:
nsx_net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
context.session, info['port_id'])
# If it is a no-snat router, interface address scope must be the
# same as the gateways
self._validate_interface_address_scope(context, router_db, subnet)
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
router_id)
_ports, address_groups = self._get_ports_and_address_groups(
context, router_id, network_id)
display_name = utils.get_name_and_uuid(
subnet['name'] or 'subnet', subnet['id'])
tags = self.nsxlib.build_v3_tags_payload(
{'id': info['port_id'], 'project_id': context.project_id},
resource_type='os-neutron-rport-id',
project_name=context.tenant_name)
tags.append({'scope': 'os-subnet-id', 'tag': subnet['id']})
# Add the dhcp relay service to the NSX interface
relay_service = None
if subnet['enable_dhcp']:
net_az = self.get_network_az_by_net_id(context, network_id)
relay_service = net_az.dhcp_relay_service
resource_type = (None if overlay_net else
nsxlib_consts.LROUTERPORT_CENTRALIZED)
# Centralized router port require a service router
if resource_type == nsxlib_consts.LROUTERPORT_CENTRALIZED:
if not self.verify_sr_at_backend(
context, router_id):
self.create_service_router(
context, router_id, router=router_db)
# Validate the TZ of the new subnet match the one of the router
tier0_uuid = self._get_tier0_uuid_by_router(context.elevated(),
router_db)
self._validate_router_tz(context.elevated(), tier0_uuid, [subnet])
# create the interface ports on the NSX
self.nsxlib.router.create_logical_router_intf_port_by_ls_id(
logical_router_id=nsx_router_id,
display_name=display_name,
tags=tags,
ls_id=nsx_net_id,
logical_switch_port_id=nsx_port_id,
address_groups=address_groups,
relay_service_uuid=relay_service,
resource_type=resource_type)
if router_db.gw_port and not router_db.enable_snat:
# TODO(berlin): Announce the subnet on tier0 if enable_snat
# is False
pass
if not cfg.CONF.nsx_v3.native_dhcp_metadata:
# Ensure the NSX logical router has a connection to a
# 'metadata access' network (with a proxy listening on
# its DHCP port), by creating it if needed.
nsx_rpc.handle_router_metadata_access(self, context, router_id,
interface=info)
# add the SNAT/NO_DNAT rules for this interface
if router_db.enable_snat and gw_network_id:
if router_db.gw_port.get('fixed_ips'):
gw_address_scope = self._get_network_address_scope(
context, gw_network_id)
for fip in router_db.gw_port['fixed_ips']:
gw_ip = fip['ip_address']
self._add_subnet_snat_rule(
context, router_id, nsx_router_id,
subnet, gw_address_scope, gw_ip)
self._add_subnet_no_dnat_rule(context, nsx_router_id, subnet)
# update firewall rules
self.update_router_firewall(context, router_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Neutron failed to add_router_interface on "
"router %s, and would try to rollback.",
router_id)
try:
self.remove_router_interface(
context, router_id, interface_info)
except Exception:
# rollback also failed
LOG.error("Neutron rollback failed to remove router "
"interface on router %s.", router_id)
return info
def remove_router_interface(self, context, router_id, interface_info):
self._validate_interface_info(interface_info, for_removal=True)
# Get the interface port & subnet
subnet = None
subnet_id = None
port_id = None
network_id = None
if 'port_id' in interface_info:
port_id = interface_info['port_id']
# Find subnet_id which is needed for removing the SNAT rule
port = self._get_port(context, port_id)
network_id = port['network_id']
if port.get('fixed_ips'):
for fip in port['fixed_ips']:
subnet_id = fip['subnet_id']
subnet_obj = self._get_subnet_object(context, subnet_id)
subnet = self._make_subnet_dict(subnet_obj, fields=None,
context=context)
self._confirm_router_interface_not_in_use(
context, router_id, subnet)
if not (port['device_owner'] in const.ROUTER_INTERFACE_OWNERS and
port['device_id'] == router_id):
raise l3_exc.RouterInterfaceNotFound(
router_id=router_id, port_id=port_id)
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet_obj = self._get_subnet_object(context, subnet_id)
subnet = self._make_subnet_dict(subnet_obj, fields=None,
context=context)
self._confirm_router_interface_not_in_use(
context, router_id, subnet)
network_id = subnet['network_id']
ports = self._get_router_interface_ports_by_network(
context, router_id, network_id)
for p in ports:
fip_subnet_ids = [fixed_ip['subnet_id']
for fixed_ip in p['fixed_ips']]
if subnet_id in fip_subnet_ids:
port_id = p['id']
break
else:
raise l3_exc.RouterInterfaceNotFoundForSubnet(
router_id=router_id, subnet_id=subnet_id)
try:
# TODO(berlin): Revocate announce the subnet on tier0 if
# enable_snat is False
router_db = self._get_router(context, router_id)
if router_db.gw_port and not router_db.enable_snat:
pass
nsx_net_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
context.session, port_id)
if not subnet:
subnet_obj = self._get_subnet_object(context, subnet_id)
subnet = self._make_subnet_dict(subnet_obj, fields=None,
context=context)
ports, address_groups = self._get_ports_and_address_groups(
context, router_id, network_id,
exclude_sub_ids=[subnet_id])
nsx_router_id = nsx_db.get_nsx_router_id(
context.session, router_id)
if len(ports) >= 1:
new_using_port_id = ports[0]['id']
_net_id, new_nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
context.session, new_using_port_id)
self.nsxlib.logical_router_port.update_by_lswitch_id(
nsx_router_id, nsx_net_id,
linked_logical_switch_port_id={
'target_id': new_nsx_port_id},
subnets=address_groups)
else:
self.nsxlib.logical_router_port.delete_by_lswitch_id(
nsx_net_id)
# try to delete the SNAT/NO_DNAT rules of this subnet
if router_db.gw_port and router_db.enable_snat:
if router_db.gw_port.get('fixed_ips'):
for fixed_ip in router_db.gw_port['fixed_ips']:
gw_ip = fixed_ip['ip_address']
self.nsxlib.router.delete_gw_snat_rule_by_source(
nsx_router_id, gw_ip, subnet['cidr'],
skip_not_found=True)
self._del_subnet_no_dnat_rule(context, nsx_router_id, subnet)
except nsx_lib_exc.ResourceNotFound:
LOG.error("router port on router %(router_id)s for net "
"%(net_id)s not found at the backend",
{'router_id': router_id,
'net_id': network_id})
# inform the FWaaS that interface port was removed
if self.fwaas_callbacks:
self.fwaas_callbacks.delete_port(context, port_id)
info = super(NsxV3Plugin, self).remove_router_interface(
context, router_id, interface_info)
if not cfg.CONF.nsx_v3.native_dhcp_metadata:
# Ensure the connection to the 'metadata access network' is removed
# (with the network) if this is the last DHCP-disabled subnet on
# the router.
nsx_rpc.handle_router_metadata_access(self, context, router_id)
# update firewall rules
self.update_router_firewall(context, router_id)
return info
def _update_lb_vip(self, port, vip_address):
# update the load balancer virtual server's VIP with
# floating ip, but don't add NAT rules
device_id = port['device_id']
if device_id.startswith(oct_const.DEVICE_ID_PREFIX):
device_id = device_id[len(oct_const.DEVICE_ID_PREFIX):]
lb_tag = [{'scope': 'os-lbaas-lb-id', 'tag': device_id}]
vs_list = self.nsxlib.search_by_tags(
tags=lb_tag, resource_type='LbVirtualServer')
if vs_list['results']:
vs_client = self.nsxlib.load_balancer.virtual_server
for vs in vs_list['results']:
vs_client.update_virtual_server_with_vip(vs['id'],
vip_address)
def create_floatingip(self, context, floatingip):
# First do some validations
fip_data = floatingip['floatingip']
port_id = fip_data.get('port_id')
if port_id:
port_data = self.get_port(context, port_id)
self._assert_on_assoc_floatingip_to_special_ports(
fip_data, port_data)
# create the neutron fip
new_fip = self._create_floating_ip_wrapper(context, floatingip)
router_id = new_fip['router_id']
if not router_id:
return new_fip
if port_id:
device_owner = port_data.get('device_owner')
fip_address = new_fip['floating_ip_address']
if (device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or
device_owner == oct_const.DEVICE_OWNER_OCTAVIA or
device_owner == lb_const.VMWARE_LB_VIP_OWNER):
try:
self._update_lb_vip(port_data, fip_address)
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
super(NsxV3Plugin, self).delete_floatingip(
context, new_fip['id'])
return new_fip
try:
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
router_id)
self.nsxlib.router.add_fip_nat_rules(
nsx_router_id, new_fip['floating_ip_address'],
new_fip['fixed_ip_address'],
bypass_firewall=False)
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
self.delete_floatingip(context, new_fip['id'])
return new_fip
def delete_floatingip(self, context, fip_id):
fip = self.get_floatingip(context, fip_id)
router_id = fip['router_id']
port_id = fip['port_id']
is_lb_port = False
if port_id:
port_data = self.get_port(context, port_id)
device_owner = port_data.get('device_owner')
fixed_ip_address = fip['fixed_ip_address']
if (device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or
device_owner == oct_const.DEVICE_OWNER_OCTAVIA or
device_owner == lb_const.VMWARE_LB_VIP_OWNER):
# If the port is LB VIP port, after deleting the FIP,
# update the virtual server VIP back to fixed IP.
is_lb_port = True
try:
self._update_lb_vip(port_data, fixed_ip_address)
except nsx_lib_exc.ManagerError as e:
LOG.error("Exception when updating vip ip_address"
"on vip_port %(port)s: %(err)s",
{'port': port_id, 'err': e})
if router_id and not is_lb_port:
try:
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
router_id)
self.nsxlib.router.delete_fip_nat_rules(
nsx_router_id, fip['floating_ip_address'],
fip['fixed_ip_address'])
except nsx_lib_exc.ResourceNotFound:
LOG.warning("Backend NAT rules for | |
# mapper.py
# <NAME>
# Lines to sentences code partially taken/modified from project with Dr. Harrison
import re
import sys
from unidecode import unidecode
import zipimport
importer = zipimport.zipimporter('nltk.mod')
nltk = importer.load_module('nltk')
try:
from nltk.corpus import stopwords
except Exception as e:
print('stopwords import failure: [{}]'.format(repr(e)))
exit()
from nltk import word_tokenize
nltk.data.path+=["."]
try:
stops = set(stopwords.words('english'))
except Exception as e:
print('stopwords failure: [{}]'.format(repr(e)))
exit()
# Pre-defined contractions to keep
CONTRACTIONS = [
'n\'t', '\'ll', '\'s', '\'re', '\'m', '\'d', '\'ve'
]
# Sentence parsing methods
# Removes unneeded punctuation in the data:
# hyphens between words, ex: A -- B -> A B
# commas, double quotes, hyphens, colons, semi-colons
def normalizeString(s):
s = s.strip().lower()
# Check if hyphens are against single/double quotes (we need to keep the quotes and not add a space)
if re.search(r'((\'|\")\-+|\-+(\'|\"))', s):
s = re.sub(r'(\'\-+\s*|\s*\-+\')', '\'', s)
s = re.sub(r'(\"\-+\s*|\s*\-+\")', '\"', s)
s = re.sub(r'[\(\)\*\_\\\/\-,:]', ' ', s) # Punctuations to remove, replace with space
return s
# Handle multiple periods by determining if they end a sentence or not
# If they end a sentence, replace with a single period
# If they do not, replace with whitespace
def replace_multiple_periods(lines):
# Helper function
def replace_periods(word, replace_text=''):
return re.sub(r'[\.]{2,}', replace_text, word)
for i, line in enumerate(lines):
words = line.strip().split()
for j, word in enumerate(words):
# Separate sentences if multiple periods exist based on the following conditions:
# First char after periods is capital, and not "I" or "I'*".
if re.search(r'[\.]{2,}', word): # Checks [word]..[.*]
# Check if this is multiple periods separating two words, no spaces
if re.search(r'[\.]{2,}\w', word):
word_split = re.sub(r'[\.]{2,}', ' ', word).split()
word_builder = word_split[0]
# Combine the words together, deciding if there should be a period or space
for k in range(len(word_split)):
if k+1 < len(word_split):
# If the word following the periods is capital, and is not "I", "I'*", etc, add a period
if re.search(r'^[A-Z]', word_split[k+1]) and not re.search(r'^(I\'.+|I[.!?]*$)', word_split[k+1]):
word_builder += '. ' + word_split[k+1]
else:
word_builder += ' ' + word_split[k+1]
# Replace our current word
word = word_builder
# Check the next word
elif j+1 < len(words):
# First char is capital, and not "I", "I'*", etc
if re.search(r"^[A-Z]", words[j+1]) and not re.search(r"^(I'.+|I[.!?]*$)", words[j+1]):
# Replace "..+" with "."
word = replace_periods(word, '.')
else:
# Replace "..+" with " "
word = replace_periods(word)
else:
# Check the next line
if i+1 < len(lines):
next_words = lines[i+1].strip().split()
if len(next_words) > 0:
# First char is capital, or begins with dialogue (double quotes)
if re.search(r'^[A-Z\"]', next_words[0]):
# Replace "..+" with "."
word = replace_periods(word, '.')
else:
# Next sentence begins with a lower case letter, let's assume the sentence continues
word = replace_periods(word)
else:
# Empty line next, assume the sentence ended
word = replace_periods(word, '.')
else:
# EOL, and EOF, replace "..+" with " "
word = replace_periods(word)
elif re.search(r'^(\'|\")?[\.]{2,}\w', word):
word = replace_periods(word)
words[j] = word
lines[i] = ' '.join(words)
return lines
# Splits character dialogue and "inner" dialogue into separate sentences
def parse_dialogue(lines):
sentences = []
currSentence = []
inDialogue = False
inInnerDialogue = False
for i, line in enumerate(lines):
# Correction for inDialogue bool
if inDialogue and len(line) == 0:
inDialogue = False
if '"' in line:
line_words = line.split()
for j, word in enumerate(line_words):
# Search for dialogue to find double quotes
if '"' in word:
# Special case where double quote is separated as its own token
if word == '"':
if inDialogue:
if len(currSentence) > 0:
sentences.append(' '.join(currSentence))
currSentence = []
inDialogue = False
else:
# We are not in dialogue, will need to do a few checks
# Check if the previous word contained a double quote
prev_line_words = []
if i > 0:
prev_line_words = lines[i-1].split()
if (j > 0 and '"' in line_words[j-1]) or \
(j == 0 and len(prev_line_words) > 0 and '"' in prev_line_words[-1]):
# Current sentence should be empty, but if not, let's empty it
if len(currSentence) > 0:
sentences.append(' '.join(currSentence))
currSentence = []
inDialogue = True # Set dialogue for next sentence
#elif (j+1 < len(words) and '"' in words[j+1]) or (i+1 < len(words) and '"' in line[i+1].split()[0]):
else:
# Previous word did not have double quotes, next does.
# End current sentence, inDialogue already set to False
if len(currSentence) > 0:
sentences.append(' '.join(currSentence))
currSentence = []
elif re.match(r'^\".+\"$', word):
if not inDialogue:
# Remove double quotes from word
word = re.sub('"', '', word)
# End current sentence if it exists
if len(currSentence) > 0:
sentences.append(' '.join(currSentence))
currSentence = []
# Add the single word (with possible terminator split)
sentences.append(' '.join(separate_terminator(word)))
else:
inDialogue = False # Correct dialogue errors?
elif re.match(r'^\".+$', word):
# Starting dialogue
inDialogue = True
# An existing sentence was not terminated before entering dialogue
if len(currSentence) > 0:
sentences.append(' '.join(currSentence)) # Add the sentence
word = re.sub('"', '', word)
currSentence = [word] # Start a new sentence with the dialogue
elif re.match(r'^.+\"$', word):
# End of dialogue
inDialogue = False
# Check if the last word ended in one of our sentence terminators
word = re.sub('"', '', word)
words = separate_terminator(word)
currSentence += words
sentences.append(' '.join(currSentence)) # end the current sentence
currSentence = []
else:
# Remove double quote and add word
word = re.sub('"', '', word)
currSentence.append(word)
else:
currSentence.append(word)
else:
sentences.append(line)
return sentences
# Separates a terminator with a space to be its own token in the sentence
def separate_terminator(text):
if re.search(r'[\.!?;]$', text):
if text[-1] == '!' or text[-1] == '?':
return [text[:-1], text[-1]]
else:
return [text[:-1]]
else:
return [text]
# Removes all punctuation from the given lines
def remove_punctuation(lines, replace_str=''):
for i, line in enumerate(lines):
# Single quotes are not handled by this as they are a special case and already removed
lines[i] = re.sub(r'[\"\(\)\-\_\+\=\&\^\$\%\#\@\~\`\.\;\:\\\/\<\>]', replace_str, line)
return lines
def convert_lines_to_sentences(filename, chunk_index, lines):
# Convert unicode characters to their nearest ASCII equivalent
lines = [unidecode(line) for line in lines]
# Use nltk to parse into sentences
sentences = []
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
for sentence in tokenizer.tokenize('\\'.join(lines)):
# Replace backslashes with spaces
sentences.append(' '.join(sentence.split('\\')))
# Parse multiple periods to determine if they end a sentence
sentences = replace_multiple_periods(sentences)
# Replace double single quotes with double quotes
sentences = [re.sub(r'\'\'', '"', sentence) for sentence in sentences]
# Lower and normalize the text
sentences = [normalizeString(line) for line in sentences]
# Tokenize the sentences
for i, sentence in enumerate(sentences):
sentence = ' '.join(word_tokenize(sentence))
sentence = re.sub(r'(\'\'|\`\`)', '"', sentence)
sentence = re.sub(r'\s\'\s', ' " ', sentence)
# Append backslashes on terminators to split on later
sentence = re.sub(r'\s\!\s', ' !\\ ', sentence)
sentence = re.sub(r'\s\?\s', ' ?\\ ', sentence)
sentence = re.sub(r'\s[\.\;]\s', ' \\ ', sentence)
sentences[i] = sentence
# Repleace known contractions with asterisks for placeholders
for i, sentence in enumerate(sentences):
if '\'' in sentence:
words = sentence.split()
for j, word in enumerate(words):
if '\'' in word:
# Check if we have a known contraction, otherwise remove the single quote
if word in CONTRACTIONS:
words[j] = re.sub(r'\'', '*', word)
else:
words[j] = re.sub(r'\'', '', word)
sentences[i] = ' '.join(words)
# Separate dialogue into its own sentences
sentences = parse_dialogue(sentences)
# Remove any leftover single quotes
#sentences = [re.sub(r'\'', '', sentence) for sentence in sentences]
# Need one final pass to split on backslashes gathered in terminator section
final_sentences = []
for sentence in sentences:
final_sentences += [s.strip() for s in sentence.split('\\') if len(s) > 0]
# Remove placeholder asterisks with single quotes
# Final pass to remove all punctuation
final_sentences = remove_punctuation([re.sub(r'\*', '\'', sentence) for sentence in final_sentences])
# Output to reducer
print('{}\t{}\t{}'.format(filename, chunk_index, '\\'.join(final_sentences)))
def main(argv):
local = False
if len(argv) > 0 and argv[0] == '--local':
local = True
#lines = []
#filename = None
line_chunk = []
chunk_count = 0
curr_file = None
for line in sys.stdin:
line = line.strip()
if len(line) > 0:
try:
# Check if this is being ran outside of Hadoop
line_split = None
if not local:
# We're on Hadoop
line_split = line.split()
| |
#! /usr/bin/env python
"""Experimental module for Python 2 compatibility.
The purpose of this module is to enable Pyslet to be gradually converted
to Python3 while retaining support for Python 2.7 and 2.6. This fills a
similar role to the six module but the idea is to minimise the number of
required fixes by making the Pyslet code as Python3 native as
possible."""
import io
import sys
import types
py2 = sys.hexversion < 0x03000000
"""Unfortunately, sometimes you just need to know if you are running
under Python 2, this flag provides a common way for version specific
code to check. (There are multiple ways of checking, this flag just
makes it easier to find places in Pyslet where we care.)"""
_sys_codec = sys.getdefaultencoding()
if py2:
suffix = ''
def u8(arg):
if isinstance(arg, types.UnicodeType):
try:
arg.encode('ascii')
except UnicodeEncodeError:
raise ValueError("u8: use binary literal for non-ASCII data")
return arg
try:
return arg.decode('utf-8')
except UnicodeDecodeError:
raise ValueError("u8: invalid utf-8 string, did you mean ul?")
def ul(arg):
if isinstance(arg, types.UnicodeType):
try:
arg.encode('latin-1')
except UnicodeEncodeError:
raise ValueError("ul: cannot be used with non-latin data")
return arg
return arg.decode('latin-1')
def is_string(arg):
return isinstance(arg, types.StringTypes)
is_text = is_string
def force_text(arg):
if isinstance(arg, str):
return unicode(arg)
elif isinstance(arg, unicode):
return arg
else:
raise TypeError("Expected str or unicode: %s" % repr(arg))
def is_ascii(arg):
return isinstance(arg, str)
def force_ascii(arg):
if isinstance(arg, unicode):
return arg.encode('ascii')
elif isinstance(arg, str):
return arg
else:
raise TypeError("Expected str or unicode: %s" % repr(arg))
to_text = unicode
def is_unicode(arg):
return isinstance(arg, unicode)
def character(arg):
if isinstance(arg, str):
if len(arg) == 1:
return unichr(ord(arg[0]))
else:
raise ValueError('Expected single character')
else:
return unichr(arg)
join_characters = unicode('').join
uempty = unicode('')
uspace = unicode(' ')
def force_bytes(arg):
if isinstance(arg, unicode):
return arg.encode('ascii')
return arg
to_bytes = str
def is_byte(arg):
return isinstance(arg, bytes) and len(arg) == 1
def byte(arg):
if isinstance(arg, str):
if len(arg) == 1:
return arg
else:
raise ValueError('Expected single character')
elif isinstance(arg, types.UnicodeType):
if len(arg) == 1:
arg = ord(arg)
# fall through to int tests
else:
raise ValueError('Expected single character')
elif isinstance(arg, bytearray):
if len(arg) == 1:
return chr(arg[0])
else:
raise ValueError('Expected single byte')
if isinstance(arg, (int, long)):
if arg >= 0 and arg <= 255:
return chr(arg)
else:
raise ValueError("Value out of range 0..255")
else:
raise TypeError('Expectected character or int')
byte_value = ord
join_bytes = b''.join
def byte_to_bstr(arg):
return arg
buffer2 = types.BufferType
long2 = long
range3 = xrange
def dict_keys(d):
return d.iterkeys()
def dict_values(d):
return d.itervalues()
def dict_items(d):
return d.iteritems()
import __builtin__ as builtins
input3 = raw_input
from urllib import ( # noqa : unused import
urlencode,
urlopen,
quote as urlquote
)
from urlparse import parse_qs # noqa : unused import
else:
suffix = '3'
def u8(arg):
if isinstance(arg, bytes):
return arg.decode('utf-8')
elif isinstance(arg, str):
# only works for ascii
try:
arg.encode('ascii')
except UnicodeEncodeError:
raise ValueError("u8: use binary literal for non-ASCII data")
return arg
else:
raise TypeError
def ul(arg):
if isinstance(arg, bytes):
return arg.decode('latin-1')
elif isinstance(arg, str):
try:
arg.encode('latin-1')
except UnicodeEncodeError:
raise ValueError("ul: cannot be used with non-latin data")
return arg
else:
raise TypeError
def is_string(arg):
return isinstance(arg, (str, bytes))
def is_text(arg):
return isinstance(arg, str)
def force_text(arg):
if not isinstance(arg, str):
raise TypeError("Expected str: %s" % repr(arg))
return arg
def is_ascii(arg):
if isinstance(arg, str):
arg.encode('ascii')
return True
else:
return False
def force_ascii(arg):
if isinstance(arg, bytes):
return arg.decode('ascii')
elif isinstance(arg, str):
return arg
else:
raise TypeError("Expected str: %s" % repr(arg))
def to_text(arg):
if isinstance(arg, str):
return arg
elif isinstance(arg, bytes):
return arg.decode('ascii')
else:
return str(arg)
def is_unicode(arg):
return isinstance(arg, str)
character = chr
join_characters = ''.join
uempty = ''
uspace = ' '
def force_bytes(arg):
if isinstance(arg, str):
return arg.encode('ascii')
return arg
def to_bytes(arg):
if hasattr(arg, '__bytes__'):
return arg.__bytes__()
else:
return str(arg).encode('ascii')
def is_byte(arg):
return isinstance(arg, int) and 0 <= arg <= 255
def byte(arg):
if isinstance(arg, str):
if len(arg) == 1:
arg = ord(arg)
else:
raise ValueError('Expected single character')
elif isinstance(arg, (bytes, bytearray)):
if len(arg) == 1:
arg = arg[0]
else:
raise ValueError('Expected single byte')
if isinstance(arg, int):
if arg >= 0 and arg <= 255:
return arg
else:
raise ValueError("Value out of range 0..255")
else:
raise TypeError('Expectected character or int')
byte_value = int
join_bytes = bytes
def byte_to_bstr(arg):
return bytes([arg])
buffer2 = bytes
long2 = int
range3 = range
def dict_keys(d):
return d.keys()
def dict_values(d):
return d.values()
def dict_items(d):
return d.items()
import builtins # noqa : unused import
input3 = input
from urllib.request import urlopen # noqa : unused import
from urllib.parse import ( # noqa : unused import
parse_qs,
quote as urlquote,
urlencode
)
class UnicodeMixin(object):
"""Mixin class to handle string formatting
For classes that need to define a __unicode__ method of their own
this class is used to ensure that the correct behaviour exists
in Python versions 2 and 3.
The mixin class implements __str__ based on your existing (required)
__unicode__ or (optional) __bytes__ implementation. In python 2,
the output of __unicode__ is encoded using the default system
encoding if no __bytes__ implementation is provided. This may well
generate errors but that seems more appropriate as it will catch
cases where the *str* function has been used instead of
:py:func:`to_text`."""
if py2:
def __str__(self): # noqa
if hasattr(self, '__bytes__'):
return self.__bytes__()
else:
return self.__unicode__().encode(_sys_codec)
else:
def __str__(self): # noqa
return self.__unicode__()
class SortableMixin(object):
"""Mixin class for handling comparisons
Utility class for helping provide comparisons that are compatible
with Python 2 and Python 3. Classes must define a method
:meth:`sortkey` which returns a sortable key value representing the
instance.
Derived classes may optionally override the classmethod :meth:`otherkey`
to provide an ordering against other object types.
This mixin then adds implementations for all of the comparison
methods: __eq__, __ne__, __lt__, __le__, __gt__, __ge__."""
def sortkey(self):
"""Returns a value to use as a key for sorting.
By default returns NotImplemented. This value causes the
comparison functions to also return NotImplemented."""
return NotImplemented
def otherkey(self, other):
"""Returns a value to use as a key for sorting
The difference between this method and :meth:`sortkey` is that
this method takes an arbitrary object and either returns the key
to use when comparing with this instance or NotImplemented if
the sorting is not supported.
You don't have to override this implementation, by default it
returns other.sortkey() if *other* is an instance of the same
class as *self*, otherwise it returns NotImplemented."""
if isinstance(other, self.__class__):
return other.sortkey()
else:
return NotImplemented
def __eq__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
return NotImplemented
else:
return a == b
def __ne__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
return NotImplemented
else:
return a != b
def __lt__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s < %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a < b
def __le__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s <= %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a <= b
def __gt__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s > %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a > b
def __ge__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s >= %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a >= b
class CmpMixin(object):
"""Mixin class for handling comparisons
For compatibility with Python 2's __cmp__ method this class defines
an implementation of __eq__, __lt__, __le__, __gt__, __ge__ that are
redirected to __cmp__. These are the minimum methods required for
Python's rich comparisons.
In Python 2 it also provides an implementation of __ne__ that simply
inverts the result of __eq__. (This is not required in Python 3.)"""
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
| |
self).__init__(**kwargs)
self.id = kwargs['id']
self.entities = kwargs['entities']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentError(msrest.serialization.Model):
"""DocumentError.
All required parameters must be populated in order to send to Azure.
:param id: Required. Document Id.
:type id: str
:param error: Required. Document Error.
:type error: ~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError
"""
_validation = {
'id': {'required': True},
'error': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'error': {'key': 'error', 'type': 'TextAnalyticsError'},
}
def __init__(
self,
**kwargs
):
super(DocumentError, self).__init__(**kwargs)
self.id = kwargs['id']
self.error = kwargs['error']
class DocumentHealthcareEntities(msrest.serialization.Model):
"""DocumentHealthcareEntities.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Healthcare entities.
:type entities: list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareEntity]
:param relations: Required. Healthcare entity relations.
:type relations: list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareRelation]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'entities': {'required': True},
'relations': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'entities': {'key': 'entities', 'type': '[HealthcareEntity]'},
'relations': {'key': 'relations', 'type': '[HealthcareRelation]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentHealthcareEntities, self).__init__(**kwargs)
self.id = kwargs['id']
self.entities = kwargs['entities']
self.relations = kwargs['relations']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentKeyPhrases(msrest.serialization.Model):
"""DocumentKeyPhrases.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param key_phrases: Required. A list of representative words or phrases. The number of key
phrases returned is proportional to the number of words in the input document.
:type key_phrases: list[str]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'key_phrases': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentKeyPhrases, self).__init__(**kwargs)
self.id = kwargs['id']
self.key_phrases = kwargs['key_phrases']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentLanguage(msrest.serialization.Model):
"""DocumentLanguage.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param detected_language: Required. Detected Language.
:type detected_language: ~azure.ai.textanalytics.v3_1_preview_3.models.DetectedLanguage
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'detected_language': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentLanguage, self).__init__(**kwargs)
self.id = kwargs['id']
self.detected_language = kwargs['detected_language']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentLinkedEntities(msrest.serialization.Model):
"""DocumentLinkedEntities.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized well-known entities in the document.
:type entities: list[~azure.ai.textanalytics.v3_1_preview_3.models.LinkedEntity]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'entities': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(
self,
**kwargs
):
super(DocumentLinkedEntities, self).__init__(**kwargs)
self.id = kwargs['id']
self.entities = kwargs['entities']
self.warnings = kwargs['warnings']
self.statistics = kwargs.get('statistics', None)
class DocumentSentiment(msrest.serialization.Model):
"""DocumentSentiment.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param sentiment: Required. Predicted sentiment for document (Negative, Neutral, Positive, or
Mixed). Possible values include: "positive", "neutral", "negative", "mixed".
:type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentSentimentValue
:param statistics: if showStats=true was specified in the request this field will contain
information about the document payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
:param confidence_scores: Required. Document level sentiment confidence scores between 0 and 1
for each sentiment class.
:type confidence_scores:
~azure.ai.textanalytics.v3_1_preview_3.models.SentimentConfidenceScorePerLabel
:param sentences: Required. Sentence level sentiment analysis.
:type sentences: list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceSentiment]
:param warnings: Required. Warnings encountered while processing document.
:type warnings: list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
"""
_validation = {
'id': {'required': True},
'sentiment': {'required': True},
'confidence_scores': {'required': True},
'sentences': {'required': True},
'warnings': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'sentiment': {'key': 'sentiment', 'type': 'str'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
}
def __init__(
self,
**kwargs
):
super(DocumentSentiment, self).__init__(**kwargs)
self.id = kwargs['id']
self.sentiment = kwargs['sentiment']
self.statistics = kwargs.get('statistics', None)
self.confidence_scores = kwargs['confidence_scores']
self.sentences = kwargs['sentences']
self.warnings = kwargs['warnings']
class DocumentStatistics(msrest.serialization.Model):
"""if showStats=true was specified in the request this field will contain information about the document payload.
All required parameters must be populated in order to send to Azure.
:param characters_count: Required. Number of text elements recognized in the document.
:type characters_count: int
:param transactions_count: Required. Number of transactions for the document.
:type transactions_count: int
"""
_validation = {
'characters_count': {'required': True},
'transactions_count': {'required': True},
}
_attribute_map = {
'characters_count': {'key': 'charactersCount', 'type': 'int'},
'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DocumentStatistics, self).__init__(**kwargs)
self.characters_count = kwargs['characters_count']
self.transactions_count = kwargs['transactions_count']
class EntitiesResult(msrest.serialization.Model):
"""EntitiesResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document.
:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentEntities]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
:param statistics: if showStats=true was specified in the request this field will contain
information about the request payload.
:type statistics: ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
:param model_version: Required. This field indicates which model is used for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntitiesResult, self).__init__(**kwargs)
self.documents = kwargs['documents']
self.errors = kwargs['errors']
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs['model_version']
class EntitiesTask(msrest.serialization.Model):
"""EntitiesTask.
:param parameters:
:type parameters: ~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesTaskParameters
"""
_attribute_map = {
'parameters': {'key': 'parameters', 'type': 'EntitiesTaskParameters'},
}
def __init__(
self,
**kwargs
):
super(EntitiesTask, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
class EntitiesTaskParameters(msrest.serialization.Model):
"""EntitiesTaskParameters.
:param model_version:
:type model_version: str
:param string_index_type: Possible values include: "TextElements_v8", "UnicodeCodePoint",
"Utf16CodeUnit". Default value: "TextElements_v8".
:type string_index_type: str or
~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexTypeResponse
"""
_attribute_map = {
'model_version': {'key': 'model-version', 'type': 'str'},
'string_index_type': {'key': 'stringIndexType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntitiesTaskParameters, self).__init__(**kwargs)
self.model_version = kwargs.get('model_version', "latest")
self.string_index_type = kwargs.get('string_index_type', "TextElements_v8")
class Entity(msrest.serialization.Model):
"""Entity.
All required parameters must be populated in order to send to Azure.
:param text: Required. Entity text as appears in the request.
:type text: str
:param category: Required. Entity type.
:type category: str
:param subcategory: (Optional) Entity sub type.
:type subcategory: str
:param offset: Required. Start position for the entity text. Use of different 'stringIndexType'
values can affect the offset returned.
:type offset: int
:param length: Required. Length for the entity text. Use of different 'stringIndexType' values
can affect the length returned.
:type length: int
:param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity.
:type confidence_score: float
"""
_validation = {
'text': {'required': True},
'category': {'required': True},
'offset': {'required': True},
'length': {'required': True},
'confidence_score': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'subcategory': {'key': 'subcategory', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(Entity, self).__init__(**kwargs)
self.text = kwargs['text']
self.category = kwargs['category']
self.subcategory = kwargs.get('subcategory', None)
| |
data by before
performing the comparison.
source_by (list): A list of field names to sort the source data by before
performing the comparison (uses ``by`` if not given).
target_by (list): A list of field names to sort the target data by before
performing the comparison (uses ``by`` if not given).
Examples:
Asserts that the value of the source field ``name`` is copied to the
target field ``slayer_name`` and ``num`` is copied to ``vampires_slain``::
case.then(
then.fields_are_copied(
scenario.sources['main'],
scenario.targets['main'],
[
('name', 'slayer_name'),
('num', 'vampires_slain')
],
by=['id']
)
)
'''
source_fields = list({m[0] for m in mapping})
target_fields = list({m[1] for m in mapping})
source_by = source_by or by
target_by = target_by or by or source_by
def _then(case):
if source_by:
expected = source[case].data.sort_values(source_by)\
.reset_index(drop=True)[source_fields]
actual = target[case].data.sort_values(target_by)\
.reset_index(drop=True)[target_fields]
else:
expected = source[case].data[source_fields]
actual = target[case].data[target_fields]
for source_field, target_field in mapping:
try:
assert_series_equal(actual[target_field], expected[source_field],
check_names=False, check_dtype=False,
check_datetimelike_compat=True)
except AssertionError as err:
raise AssertionError(
'Source field {} not copied to target field {}: {}'.format(
source_field, target_field, err
)
)
return _then
@staticmethod
def target_does_not_have_fields(target, fields):
'''
Asserts that the target does not have certain fields.
Args:
target (scenario.targets[]): The scenario target data subject.
fields (list): List of field names that should not be on the target.
Examples:
Asserts that the scenario target ``main`` does not have the fields
``sparkle_factor`` or ``is_werewolf``::
case.then(
then.target_does_not_have_fields(
scenario.targets['main'],
['sparkle_factor', 'is_werewolf']
)
)
'''
def _then(case):
unexpected_fields = set(fields) & set(target[case].data.columns)
if len(unexpected_fields) > 0:
raise AssertionError(
"The fields '{}' were not expected to be found in the target".format(
unexpected_fields
)
)
return _then
@staticmethod
def target_has_fields(target, fields, only=False):
'''
Asserts that the target has certain fields.
Args:
target (scenario.targets[]): The scenario target data subject.
fields (list): List of field names that should not be on the target.
only (bool): Specifies whether the target should only have the fields listed. Raises
an exception if there are additional fields.
Examples:
Asserts that the scenario target ``main`` only has the fields
``name`` and ``vampires_slain``::
case.then(
then.target_has_fields(
scenario.targets['main'],
['name', 'vampires_slain'],
only=True
)
)
'''
def _then(case):
missing_fields = set(fields) - set(target[case].data.columns)
extra_fields = set(target[case].data.columns) - set(fields)
if len(missing_fields) > 0:
raise AssertionError(
"The fields '{}' were expected to be found in the target".format(
missing_fields
)
)
if len(extra_fields) > 0 and only:
raise AssertionError(
"The fields '{}' were not expected to be found on the target".format(
extra_fields
)
)
return _then
@staticmethod
def target_is_empty(target):
'''
Asserts that the target has no records.
Args:
target (scenario.targets[]): The scenario target data subject.
Examples:
Asserts that the scenario target ``errors`` does not have any records::
case.then(then.target_is_empty(scenario.targets['errors'])
'''
def _then(case):
nrecords = len(target[case].data)
if nrecords != 0:
raise AssertionError(
'Expecting target to be empty, found {} records'.format(
nrecords
)
)
return _then
@staticmethod
def target_has_n_records(target, expected_n):
'''
Asserts that the target has a specific number of records.
Args:
target (scenario.targets[]): The scenario target data subject.
expected_n (int): The number of records expected.
Examples:
Asserts that the scenario target ``main`` has 3 records::
case.then(then.target_has_n_records(scenario.targets['main'], 3)
'''
def _then(case):
nrecords = len(target[case].data)
if nrecords != expected_n:
raise AssertionError(
'Excpecting target to have {} records, found {} records'.format(
expected_n, nrecords
)
)
return _then
class SubscriptableLambda: #pylint: disable=too-few-public-methods
'''
Used to help with putting specific values in example data tables.
Args:
func (func): Some python function you want to access as subscriptable.
Examples:
In the simplest form::
sl = SubscriptableLambda(lambda v: v + 10)
sl[3] #=> 13
This class is useful in tests when creating complex methods that need to be used
int table data::
payload = pt.SubscriptableLambda(lambda ref: json.dumps({
'external_id': scenario.factories['students']['external_id'][ref]
}))
response = pt.SubscriptableLambda(lambda ref: json.dumps([{
'itk-api': [
{'resource_uuid': scenario.factories['students']['uuid'][ref]}
]
}]))
ex_create_response = pemi.data.Table(
"""
| payload | response |
| - | - |
| {payload[created1]} | {response[created1]} |
| {payload[created2]} | {response[created2]} |
| {payload[created3]} | {response[created3]} |
| {payload[created4]} | {response[created4]} |
""".format(
payload=payload,
response=response
),
schema=pemi.Schema(
payload=JsonField(),
response=JsonField()
)
)
'''
def __init__(self, func):
self.func = func
def __getitem__(self, key=None):
return self.func(key)
CaseCollector = namedtuple('CaseCollector', ['subject_field', 'factory', 'factory_field'])
class DuplicateScenarioError(Exception): pass
class DuplicateCaseError(Exception): pass
class Scenario: #pylint: disable=too-many-instance-attributes, too-many-arguments
'''
A **Scenario** describes the transformation that is being tested
(a Pemi pipe), and the data sources and targets that are the
subject of the test. Scenarios are composed of one more **Cases**.
Args:
name (str): The name of a scenario. Multiple scenarios may be present in a file,
but the names of each scenario must be unique.
pipe (pemi.Pipe): The Pemi pipe that is the main subject of the test. Test
data will be provided to the sources of the pipe (defined below), and the pipe
will be executed. Note that the pipe is only executed once per scenario.
flow (str): The name of the method used to execute the pipe (default: `flow`).
factories(dict): A dictionary where the keys are the names of factories and
the values are FactoryBoy factories that will be used to generate unique keys.
sources (dict): A dictionary where the keys are the names of sources that will
be the subjects of testing. The values are methods that accept the pipe
referenced in the **pipe** argument above and return the data subject that
will be used as a source.
targets (dict): A dictionary where the keys are the names of targets that will
be the subjects of testing. The values are methods that accept the pipe
referenced in the **pipe** argument above and return the data subject that
will be used as a target.
target_case_collectors (dict): A dictionary where the keys are the names of the
targets that will be the subjects of testing. The values are ``CaseCollector``
objects that tie a field in the scenario's target to the field in a given factory.
Every named target needs to have a case collector.
selector (str): A string representing a regular expression. Any case names that
**do not** match this regex will be excluded from testing.
usefixtures (str): Name of a Pytest fixture to use for the scenario. Often used
for database setup/teardown options.
'''
def __init__(self, name, pipe, factories, sources, targets, target_case_collectors,
flow='flow', selector=None, usefixtures=None):
self.name = name
self.pipe = pipe
self.flow = flow
self.factories = self._setup_factories(factories)
self.sources = self._setup_subjects(sources)
self.targets = self._setup_subjects(targets)
self.target_case_collectors = target_case_collectors
self.selector = selector
self.usefixtures = usefixtures or []
self.cases = OrderedDict()
self.has_run = False
def _register_test(self, module_name):
@pytest.mark.usefixtures(*self.usefixtures)
@pytest.mark.scenario(self, self.selector)
def test_scenario(case):
case.assert_case()
test_attr = 'testScenario:{}'.format(self.name)
if hasattr(sys.modules[module_name], test_attr):
raise DuplicateScenarioError(
'Scenario names must be unique to a module. '
'Duplicate detected: {}'.format(test_attr)
)
setattr(sys.modules[module_name], test_attr, test_scenario)
def __enter__(self):
return self
def __exit__(self, *exc):
current_frame = inspect.currentframe()
calling_module = inspect.getouterframes(current_frame)[1].frame.f_locals['__name__']
self._register_test(calling_module)
@staticmethod
def _setup_factories(factories):
return {name: KeyFactory(factory) for name, factory in factories.items()}
def _setup_subjects(self, subjects):
return {name: TestSubject(subject(self.pipe), name) for name, subject in subjects.items()}
def case(self, name):
if name in self.cases:
raise DuplicateCaseError(
'Case names must be unique to a scenario. '
'Duplicate case detected in scenario "{}": "{}"'.format(
self.cases[name].scenario.name, name
)
)
case = Case(name, self)
for factory in self.factories.values():
factory.next_case(case)
self.cases[name] = case
return case
def run(self):
if self.has_run:
return
self.setup_cases()
getattr(self.pipe, self.flow)()
self.collect_results()
self.has_run = True
def setup_cases(self):
for case in self.cases.values():
case.setup()
self.load_test_data()
def load_test_data(self):
for _, source in self.sources.items():
if len(source.data.values()) > 0:
all_case_data = pd.concat(
[cd.data for cd in source.data.values()],
ignore_index=True,
sort=False
)
source.subject.from_pd(all_case_data)
def collect_results(self):
for target_name, target in self.targets.items():
all_target_data = target.subject.to_pd()
for case in self.cases.values():
target[case].data = pd.DataFrame(columns=all_target_data.columns)
try:
collector = self.target_case_collectors[target_name]
except KeyError:
raise NoTargetCaseCollectorError(
'No case collector defined for target {}'.format(target_name)
)
if len(all_target_data) > 0:
all_target_data['__pemi_case__'] = all_target_data[collector.subject_field].apply(
self.factories[collector.factory].case_lookup(collector)
)
for case, df in all_target_data.groupby(['__pemi_case__'], sort=False):
del df['__pemi_case__']
target[case].data = df
class Case:
'''
A **Case** is a set of **Conditions** and **Expectations** that describe
how the pipe is supposed to function.
Args:
name (str): The name of the case. The names of cases within a scenario must be unique.
scenario (pemi.testing.Scenario): The scenario object that this | |
#
# Utility functions for loading and creating and solving circuits defined by
# netlists
#
import numpy as np
import codecs
import pandas as pd
import liionpack as lp
import os
import pybamm
import scipy as sp
from lcapy import Circuit
def read_netlist(
filepath,
Ri=None,
Rc=None,
Rb=None,
Rt=None,
I=None,
V=None,
):
"""
Assumes netlist has been saved by LTSpice with format Descriptor Node1 Node2 Value
Any lines starting with * are comments and . are commands so ignore them
Nodes begin with N so remove that
Open ended components are not allowed and their nodes start with NC (no-connection)
Args:
filepath (str): Path to netlist circuit file '.cir' or '.txt'.
Ri (float): Internal resistance ($\Omega$).
Rc (float): Connection resistance ($\Omega$).
Rb (float): Busbar resistance ($\Omega$).
Rt (float): Terminal connection resistance ($\Omega$).
I (float): Current (A).
V (float): Initial battery voltage (V).
Returns:
pandas.DataFrame:
A netlist of circuit elements with format desc, node1, node2, value.
"""
# Read in the netlist
if "." not in filepath:
filepath += ".cir"
if not os.path.isfile(filepath):
temp = os.path.join(lp.CIRCUIT_DIR, filepath)
if not os.path.isfile(temp):
pass
else:
filepath = temp
if ".cir" in filepath:
with codecs.open(filepath, "r", "utf-16LE") as fd:
Lines = fd.readlines()
elif ".txt" in filepath:
with open(filepath, "r") as f:
Lines = f.readlines()
else:
raise FileNotFoundError(
'Please supply a valid file with extension ".cir" or ".txt"'
)
# Ignore lines starting with * or .
Lines = [l.strip("\n").split(" ") for l in Lines if l[0] not in ["*", "."]]
Lines = np.array(Lines, dtype="<U16")
# Read descriptions and nodes, strip N from nodes
# Lines is desc | node1 | node2
desc = Lines[:, 0]
node1 = Lines[:, 1]
node2 = Lines[:, 2]
value = Lines[:, 3]
try:
value = value.astype(float)
except ValueError:
pass
node1 = np.array([x.strip("N") for x in node1], dtype=int)
node2 = np.array([x.strip("N") for x in node2], dtype=int)
netlist = pd.DataFrame(
{"desc": desc, "node1": node1, "node2": node2, "value": value}
)
# Populate the values based on the descriptions (element types)
for name, val in [
("Ri", Ri),
("Rc", Rc),
("Rb", Rb),
("Rl", Rb),
("Rt", Rt),
("I", I),
("V", V),
]:
if val is not None:
# netlist["desc"] consists of entries like 'Ri13'
# this map finds all the entries that start with (e.g.) 'Ri'
name_map = netlist["desc"].str.find(name) > -1
# then allocates the value to the corresponding indices
netlist.loc[name_map, ("value")] = val
lp.logger.notice("netlist " + filepath + " loaded")
return netlist
def setup_circuit(
Np=1,
Ns=1,
Ri=1e-2,
Rc=1e-2,
Rb=1e-4,
Rt=1e-5,
I=80.0,
V=4.2,
plot=False,
terminals="left",
):
"""
Define a netlist from a number of batteries in parallel and series
Args:
Np (int): Number of batteries in parallel.
Ns (int): Number of batteries in series.
Ri (float): Internal resistance ($\Omega$).
Rc (float): Connection resistance ($\Omega$).
Rb (float): Busbar resistance ($\Omega$).
Rt (float): Terminal connection resistance ($\Omega$).
I (float): Current (A).
V (float): Initial battery voltage (V).
plot (bool): Plot the circuit.
terminals (string): The location of the terminals. Can be "left", "right",
"left-right", "right-left" or a list or array of node integers.
Returns:
pandas.DataFrame:
A netlist of circuit elements with format desc, node1, node2, value.
"""
Nc = Np
Nr = Ns * 3 + 1
grid = np.arange(Nc * Nr).reshape([Nr, Nc])
coords = np.indices(grid.shape)
y = coords[0, :, :]
x = coords[1, :, :]
# make contiguous now instead of later when netlist is done as very slow
mask = np.ones([Nr, Nc], dtype=bool)
# This is no longer needed as terminals connect directly to battery
# Guess could also add a terminal connection resistor though
# mask[1:-1, 0] = False
grid[mask] = np.arange(np.sum(mask)) + 1
x = x[mask].flatten()
y = y[mask].flatten()
grid[~mask] = -2 # These should never be used
# grid is a Nr x Nc matrix
# 1st column is terminals only
# 1st and last rows are busbars
# Other rows alternate between series resistor and voltage source
# For example if Np=1 and Nc=2,
# grid = array([[ 0, 1], # busbar
# # Rs
# [ 2, 3],
# # V
# [ 4, 5],
# # Ri
# [ 6, 7],
# # Rs
# [ 8, 9],
# # V
# [10, 11],
# # Ri
# [12, 13]] # busbar)
# Connections are across busbars in first and last rows, and down each column
# See "01 Getting Started.ipynb"
# Build data with ['element type', node1, node2, value]
netlist = []
num_Rb = 0
num_V = 0
desc = []
node1 = []
node2 = []
value = []
# -ve busbars (bottom row of the grid)
bus_nodes = [grid[0, :]]
for nodes in bus_nodes:
for i in range(len(nodes) - 1):
# netline = []
desc.append("Rbn" + str(num_Rb))
num_Rb += 1
node1.append(nodes[i])
node2.append(nodes[i + 1])
value.append(Rb)
num_Rs = 0
num_Ri = 0
# Series resistors and voltage sources
cols = np.arange(Nc)
rows = np.arange(Nr)[:-1]
rtype = ["Rc", "V", "Ri"] * Ns
for col in cols:
# Go down the column alternating Rs, V, Ri connections between nodes
nodes = grid[:, col]
for row in rows:
if rtype[row] == "Rc":
# Inter(c)onnection / weld
desc.append(rtype[row] + str(num_Rs))
num_Rs += 1
val = Rc
elif rtype[row] == "Ri":
# Internal resistor
desc.append(rtype[row] + str(num_Ri))
num_Ri += 1
val = Ri
else:
# Voltage source
desc.append("V" + str(num_V))
num_V += 1
val = V
node1.append(nodes[row + 1])
node2.append(nodes[row])
value.append(val)
# netlist.append(netline)
# +ve busbar (top row of the grid)
bus_nodes = [grid[-1, :]]
for nodes in bus_nodes:
for i in range(len(nodes) - 1):
# netline = []
desc.append("Rbp" + str(num_Rb))
num_Rb += 1
node1.append(nodes[i])
node2.append(nodes[i + 1])
value.append(Rb)
desc = np.asarray(desc)
node1 = np.asarray(node1)
node2 = np.asarray(node2)
value = np.asarray(value)
main_grid = {
"desc": desc,
"node1": node1,
"node2": node2,
"value": value,
"node1_x": x[node1 - 1],
"node1_y": y[node1 - 1],
"node2_x": x[node2 - 1],
"node2_y": y[node2 - 1],
}
# Current source - spans the entire pack
if (terminals == "left") or (terminals is None):
t_nodes = [0, 0]
elif terminals == "right":
t_nodes = [-1, -1]
elif terminals == "left-right":
t_nodes = [0, -1]
elif terminals == "right-left":
t_nodes = [-1, 0]
elif isinstance(terminals, (list, np.ndarray)):
t_nodes = terminals
else:
raise ValueError(
'Please specify a valid terminals argument: "left", '
+ '"right", "left-right" or "right-left" or a list or '
+ "array of nodes"
)
# terminal nodes
t1 = grid[-1, t_nodes[0]]
t2 = grid[0, t_nodes[1]]
# terminal coords
x1 = x[t1 - 1]
x2 = x[t2 - 1]
y1 = y[t1 - 1]
y2 = y[t2 - 1]
nn = grid.max() + 1 # next node
# coords of nodes forming current source loop
if terminals == "left" or (
isinstance(terminals, (list, np.ndarray)) and np.all(np.array(terminals) == 0)
):
ix = x1 - 1
dy = 0
elif terminals == "right" or (
isinstance(terminals, (list, np.ndarray)) and np.all(np.array(terminals) == -1)
):
ix = x1 + 1
dy = 0
else:
ix = -1
dy = 1
if dy == 0:
desc = ["Rtp1", "I0", "Rtn1"]
xs = np.array([x1, ix, ix, x2])
ys = np.array([y1, y1, y2, y2])
node1 = [t1, nn, 0]
node2 = [nn, 0, t2]
value = [Rt, I, Rt]
num_elem = 3
else:
desc = ["Rtp0", "Rtp1", "I0", "Rtn1", "Rtn0"]
xs = np.array([x1, x1, ix, ix, x2, x2])
ys = np.array([y1, y1 + dy, y1 + dy, 0 - dy, 0 - dy, y2])
node1 = [t1, nn, nn + 1, 0, nn + 2]
node2 = [nn, nn + 1, 0, nn + 2, t2]
hRt = Rt / 2
value = [hRt, hRt, I, hRt, hRt]
num_elem = 5
desc = np.asarray(desc)
node1 = np.asarray(node1)
node2 = np.asarray(node2)
value = np.asarray(value)
current_loop = {
"desc": desc,
"node1": node1,
"node2": node2,
"value": value,
"node1_x": xs[:num_elem],
"node1_y": ys[:num_elem],
"node2_x": xs[1:],
"node2_y": ys[1:],
}
for key in main_grid.keys():
main_grid[key] = np.concatenate((main_grid[key], current_loop[key]))
netlist = pd.DataFrame(main_grid)
if plot:
lp.simple_netlist_plot(netlist)
lp.logger.notice("Circuit created")
return netlist
def solve_circuit(netlist):
| |
import numpy as np
import collections
from gensim import matutils
import os
import codecs
import sys
import glob
import gensim
# NOTE that you can give a dModels as input (this allows for
# adding to/replacing models already loaded).
def loadAllModels(sGlobPattern, dModels={}, bReplace=True, bBinary=True):
for sModelFile in glob.glob(sGlobPattern):
# Chop off the path and the extension
sModelName = os.path.splitext(os.path.basename(sModelFile))[0]
if sModelName.startswith('all_preprocessed_files'):
sModelName = sModelName[23:]
if (sModelName in dModels) and not bReplace:
print "[%s]: already in" % sModelName
else:
print "[%s]: %s" % (sModelName, sModelFile)
dModels[sModelName] = gensim.models.word2vec.Word2Vec.load_word2vec_format(sModelFile, binary=bBinary)
return dModels
def cosineSimilarities(npaWrdEmbds1, npaWrdEmbds2):
if (npaWrdEmbds1.size == 0) or (npaWrdEmbds2.size == 0):
return np.array([[0.0]])
npaNorms1 = np.array([np.sqrt(np.power(npaWrdEmbds1, 2).sum(axis=1))])
npaNorms2 = np.array([np.sqrt(np.power(npaWrdEmbds2, 2).sum(axis=1))])
npaNorms = npaNorms1.T * npaNorms2
npaDots = npaWrdEmbds1.dot(npaWrdEmbds2.T)
return npaDots / npaNorms
# Input: two matrices of word embeddings
def euclidean_distances(npaWrdEmbds1, npaWrdEmbds2):
npaDistances = np.empty([npaWrdEmbds1.shape[0],npaWrdEmbds2.shape[0]],
dtype=float)
for i in range(npaWrdEmbds1.shape[0]):
for j in range(npaWrdEmbds2.shape[0]):
npaDistances[i][j] = euclidean_distance_vec(npaWrdEmbds1[i],
npaWrdEmbds2[j])
return npaDistances
# Input: two word embedding vectors
def euclidean_distance_vec(npaWrdEmbd1, npaWrdEmbd2):
if (npaWrdEmbd1.size == 0) or (npaWrdEmbd2.size == 0):
return 0.0
npaDiff = npaWrdEmbd1 - npaWrdEmbd2
return np.sqrt(np.power(npaDiff, 2).sum())
# Input: one word embedding vector, one matrix of word embeddings
def euclidean_distance_matrix(npaWrdEmbds, npaWrdEmbd):
if (npaWrdEmbd1.size == 0) or (npaWrdEmbd2.size == 0):
return 0.0
return np.sqrt(np.power(npaWrdEmbds - npaWrdEmbd, 2).sum(axis=1))
# Find the most similar words in terms of Euclidean distance
# This turns out to give EXACTLY the same results as most_similar (which does
# things based on cosine similarities)...?!?!?!?
def most_similar_eucl(oModel, sWord, iTopN=10):
npaDistances = np.sqrt(np.power(oModel.syn0 - oModel[sWord], 2).sum(axis=1))
npaBestIndices = npaDistances.argsort()[:iTopN + 1]
# Ignore (don't return) the input word
aResult = [(oModel.index2word[i], npaDistances[i]) \
for i in npaBestIndices if i != oModel.vocab[sWord].index]
return aResult[:iTopN]
# - Get the related terms for all terms
# - See, for each term, how many times it is mentioned in a related term list
# of another word
# - Keep the most related-to terms
# So we sort by >>in-degree<<
def trackCloud3_inlink(oModel, aTerms, iMaxNrOfTerms=10,
iMaxNrOfRelatedTerms=10,
fMinDist=0.0, fSeedWordBoost=1.0,
bSumOfDistances=False, bDebug=False):
if bDebug:
import pdb
pdb.set_trace()
aRelatedTerms = []
dRelatedTerms = {}
for sTerm in aTerms:
if bSumOfDistances:
try:
aSimTerms = oModel.most_similar(sTerm, topn=iMaxNrOfRelatedTerms)
# The terms are always related to themselves
try:
dRelatedTerms[sTerm] += fSeedWordBoost
except KeyError:
dRelatedTerms[sTerm] = fSeedWordBoost
for tSimTerm in aSimTerms:
if tSimTerm[1] < fMinDist:
break
fDistance = 1.0 - tSimTerm[1] # Similarity to distance
try:
dRelatedTerms[tSimTerm[0]] += fDistance
except KeyError:
dRelatedTerms[tSimTerm[0]] = fDistance
except KeyError: # If the word is not present in this era
pass
else:
try:
aRelatedTerms += \
[x[0] for x in \
oModel.most_similar(sTerm, topn=iMaxNrOfRelatedTerms) \
if x[1] >= fMinDist]
# The terms are always related to themselves
try:
dRelatedTerms[sTerm] += fSeedWordBoost
except KeyError:
dRelatedTerms[sTerm] = fSeedWordBoost
except KeyError: # If the word is not present in this era
pass
oCounter = None
if bSumOfDistances:
oCounter = collections.Counter(dRelatedTerms)
else:
# The terms are always related to themselves
aRelatedTerms += aTerms
oCounter = collections.Counter(aRelatedTerms)
return oCounter.most_common(iMaxNrOfTerms)
# - Expend the seed term list with all similar terms (within distance)
# - See, for all terms in this expanded list, how many related terms they have
# (within distance) in this expanded list
# - Keep the terms which have most of these
# So we sort by >>out-degree<<
def trackCloud3_outlink_oud(oModel, aTerms, iMaxNrOfTerms=10,
iMaxNrOfRelatedTerms=10, fMinDist=0.0,
fSeedWordBoost=1.00,
bSumOfDistances=False):
aSimilarTerms = []
# Get the related terms for all terms
for sTerm in aTerms:
aSimilarTerms += \
[x[0] for x in \
oModel.most_similar(sTerm, topn=iMaxNrOfRelatedTerms) \
if x[1] >= fMinDist]
# The terms are always related to themselves
setSimilarTerms = set(aTerms + aSimilarTerms)
dOutlinks = {}
for sSimTerm in setSimilarTerms:
dOutlinks[sSimTerm] = \
len( set([x[0] for x in \
oModel.most_similar(sSimTerm, topn=iMaxNrOfRelatedTerms) \
if x[1] >= fMinDist]) &
setSimilarTerms)
oCounter = collections.Counter(dOutlinks)
return oCounter.most_common(iMaxNrOfTerms)
# - Expend the seed term list with all similar terms (within distance)
# - See, for all terms in this expanded list, how many related terms they have
# (within distance) in this expanded list
# - Keep the terms which have most of these
# So we sort by >>out-degree<<
def trackCloud3_outlink(oModel, aTerms, iMaxNrOfTerms=10,
iMaxNrOfRelatedTerms=10, fMinDist=0.0,
fSeedWordBoost=1.00,
bSumOfDistances=False, bDebug=False):
aFirstTierTerms = []
dOutlinks = {}
# Get the first tier related terms
for sTerm in aTerms:
try:
aFirstTierTerms += \
[x[0] for x in oModel.most_similar(sTerm, topn=iMaxNrOfRelatedTerms)\
if x[1] >= fMinDist]
aFirstTierTerms.append(sTerm)
# Every word is related to itself
dOutlinks[sTerm] = fSeedWordBoost
except KeyError:
pass
setFirstTierTerms = set(aFirstTierTerms)
dFirstTierTerms = {x: 1 for x in aFirstTierTerms}
fAdd = 1.0
for sFirstTierTerm in setFirstTierTerms:
aSecondTierTerms = \
[x for x in oModel.most_similar(sFirstTierTerm,
topn=iMaxNrOfRelatedTerms) \
if x[1] >= fMinDist]
for tSecondTierTerm in aSecondTierTerms:
if tSecondTierTerm[0] in dFirstTierTerms:
if bSumOfDistances: # Else it is 1 (and stays 1)
#fAdd = tSecondTierTerm[1]
fAdd = 1.0 - tSecondTierTerm[1]
try:
dOutlinks[sFirstTierTerm] += fAdd
except KeyError:
dOutlinks[sFirstTierTerm] = fAdd
if bDebug:
import pdb
pdb.set_trace()
oCounter = collections.Counter(dOutlinks)
return oCounter.most_common(iMaxNrOfTerms)
def trackClouds3(dModels, aSeedTerms, sOutputFile=None,
iMaxNrOfTerms=10,
iMaxNrOfRelatedTerms=10, sStartKey=None, sEndKey=None,
fMinDist=0.0, fSeedWordBoost=1.00, sDirection='forwards',
sDescription='', bSumOfDistances=False, bOutlinks=False,
bDebug=False):
fh = sys.stdout
if sOutputFile is not None:
fh = codecs.open(sOutputFile, mode='w', encoding='utf8')
bBackwards = True if (sDirection == 'backwards') else False
# First line always contains the seed terms
print >>fh, ",".join(aSeedTerms)
# Second line is always the direction
print >>fh, sDirection
# Third line is always the description
print >>fh, sDescription
aSeedSet = aSeedTerms
dResult = {}
aSortedKeys = sorted(dModels.keys())
if bBackwards:
aSortedKeys = aSortedKeys[::-1]
for sKey in aSortedKeys:
if (sEndKey is not None) and (sKey == sEndKey):
break
if (sStartKey is not None):
if sKey != sStartKey:
continue
else:
sStartKey = None
if bOutlinks:
dResult[sKey] = \
trackCloud3_outlink(dModels[sKey], aSeedSet,
iMaxNrOfTerms=iMaxNrOfTerms,
iMaxNrOfRelatedTerms=iMaxNrOfRelatedTerms,
fMinDist=fMinDist,
fSeedWordBoost=fSeedWordBoost,
bSumOfDistances=bSumOfDistances,
bDebug=bDebug)
else:
dResult[sKey] = \
trackCloud3_inlink(dModels[sKey], aSeedSet,
iMaxNrOfTerms=iMaxNrOfTerms,
iMaxNrOfRelatedTerms=iMaxNrOfRelatedTerms,
fMinDist=fMinDist,
fSeedWordBoost=fSeedWordBoost,
bSumOfDistances=bSumOfDistances,
bDebug=bDebug)
if bSumOfDistances:
print >>fh, "%s\t%s" % (sKey, ' '.join(["%s (%.2f)" % (x[0], x[1]) for x in dResult[sKey]]))
else:
print >>fh, "%s\t%s" % (sKey, ' '.join(["%s (%d)" % (x[0], x[1]) for x in dResult[sKey]]))
# Make a new seed set
aSeedSet = [x[0] for x in dResult[sKey]]
if sOutputFile is not None:
fh.close()
# return dResult
def addRelatedWord(dRelatedWords, sWord, fWeight):
try:
dRelatedWords[sWord] += fWeight
except KeyError:
dRelatedWords[sWord] = fWeight
def expandRelatedWords(oModel, aSeedSet, iMaxNrOfTerms=None, fMinDist=None):
dRelatedWords = {}
for (sSeedWord, fSeedWordWeight) in aSeedSet:
# Always add the seed words themselves
addRelatedWord(dRelatedWords, sSeedWord, fSeedWordWeight)
for (sRelatedWord, fRelatedWordWeight) in \
oModel.most_similar(sSeedWord,topn=iMaxNrOfTerms):
if fRelatedWordWeight < fMinDist:
break
else:
fTmpWeight = fSeedWordWeight * fRelatedWordWeight
addRelatedWord(dRelatedWords, sRelatedWord, fTmpWeight)
return [(sWord, fWeight) for sWord, fWeight in dRelatedWords.iteritems()]
def trackVocab(dModels, aSeedTerms, iMaxNrOfTerms=10, iMaxNrOfRelatedTerms=10,
sStartKey=None, sEndKey=None, fMinDist=0.0, bBackwards=False,
bSumOfDistances=False, bOutlinks=False):
import pdb
pdb.set_trace()
aSortedKeys = sorted(dModels.keys())
if bBackwards:
aSortedKeys = aSortedKeys[::-1]
# Start with initial set, all the weights are identical
aSeedSet = [(x, 1.0) for x in aSeedTerms]
for sKey in aSortedKeys:
if (sEndKey is not None) and (sKey == sEndKey):
break
if (sStartKey is not None):
if sKey != sStartKey:
continue
else:
sStartKey = None
# NOTE that we change the seed set here with every iteration!
aSeedSet = expandRelatedWords(dModels[sKey], aSeedSet,
iMaxNrOfTerms=iMaxNrOfTerms,
fMinDist=fMinDist)
print "%s: %s" % (sKey, aSeedSet)
def trackWord(dModels, sTerm, iMaxNrOfRelatedTerms=10, fMinDist=0.0):
aSortedKeys = sorted(dModels.keys())
for sKey in aSortedKeys:
try:
print "%s: %s" % \
(sKey,
", ".join(["%s (%.2f)" % (x[0], x[1]) for x in \
dModels[sKey].most_similar(sTerm,
topn=iMaxNrOfRelatedTerms)\
if x[1] > fMinDist]))
except KeyError:
print "%s: []" % sKey
def trackWords(dModels, aTerms, sDirection, sDescription, sOutputFile=None,
iMaxNrOfRelatedTerms=10, fMinDist=0.0):
fh = sys.stdout
if sOutputFile is not None:
fh = codecs.open(sOutputFile, mode='w', encoding='utf8')
# First line always contains the seed terms
print >>fh, ",".join(aTerms)
# Actually the directi0n doesn't mean anything here. We just always have it
# on the second line, so the rest of the code can rely on this...
print >>fh, sDirection
# And third line is always the description
print >>fh, sDescription
aSortedKeys = sorted(dModels.keys())
for sKey in aSortedKeys:
try:
aWordEmbeddings = initialVectors(dModels[sKey], sKey, aTerms)
if len(aWordEmbeddings) == 0:
print >>fh, "%s\t" % sKey
else:
npaMeanVector = np.mean(aWordEmbeddings, axis=0)
print >>fh, "%s\t%s" % \
(sKey,
" ".join(surroundingWords(dModels[sKey], npaMeanVector,
fMinDist, iMaxNrOfRelatedTerms)))
except KeyError:
print >>fh, "%s\t" % sKey
if sOutputFile is not None:
fh.close()
# Input: the model of the new | |
<reponame>nwukie/ChiDG
from __future__ import division
import sys
import os
import time
import numpy
import pickle
from sympy import *
from sympy.tensor.array import MutableSparseNDimArray
def update_progress(job_title, progress):
length = 20 # modify this to change the length
block = int(round(length*progress))
msg = "\r{0}: [{1}] {2}%".format(job_title, "#"*block + "-"*(length-block), round(progress*100, 2))
if progress >= 1: msg += " DONE\r\n"
sys.stdout.write(msg)
sys.stdout.flush()
def cls():
os.system('cls' if os.name=='nt' else 'clear')
cls()
print "WARNING: This script is very slow, it might run for hours. It is strongly recommended to watch Netflix in the meanwhile."
################################################################################################################
# Define symbols for each coordinate for support node
x1,y1,z1 = symbols('x1 y1 z1')
x2,y2,z2 = symbols('x2 y2 z2')
x3,y3,z3 = symbols('x3 y3 z3')
x4,y4,z4 = symbols('x4 y4 z4')
x5,y5,z5 = symbols('x5 y5 z5')
x6,y6,z6 = symbols('x6 y6 z6')
x7,y7,z7 = symbols('x7 y7 z7')
x8,y8,z8 = symbols('x8 y8 z8')
coords_ = Matrix( [[x1,y1,z1],
[x2,y2,z2],
[x3,y3,z3],
[x4,y4,z4],
[x5,y5,z5],
[x6,y6,z6],
[x7,y7,z7],
[x8,y8,z8],
] )
nnodes_r = coords_.shape[0]
nnodes_ie = 8
nnodes_if = 4
nterms_s = 8
ndirs = 3
# Define coordinate values at support nodes
coords = Matrix( [[0.0,0.0,0.0],
[5.0,0.0,0.0],
[0.0,1.0,0.0],
[5.0,1.0,0.0],
[0.0,0.0,1.0],
[5.0,0.0,1.0],
[0.0,1.0,1.0],
[5.0,1.0,1.0],
] )
# Define matrix of polynomial basis terms at support nodes
val_r = Matrix( [[ 1.0,-1.0,-1.0,-1.0, 1.0, 1.0, 1.0,-1.0],
[ 1.0,-1.0,-1.0, 1.0,-1.0,-1.0, 1.0, 1.0],
[ 1.0, 1.0,-1.0,-1.0,-1.0, 1.0,-1.0, 1.0],
[ 1.0, 1.0,-1.0, 1.0, 1.0,-1.0,-1.0,-1.0],
[ 1.0,-1.0, 1.0,-1.0, 1.0,-1.0,-1.0, 1.0],
[ 1.0,-1.0, 1.0, 1.0,-1.0, 1.0,-1.0,-1.0],
[ 1.0, 1.0, 1.0,-1.0,-1.0,-1.0, 1.0,-1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
] )
# Define matrices at interpolation nodes (quadrature, level = 1)
val_i = Matrix( [[ 1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0, 1.0/3.0,-1.0/3.0*sqrt(1.0/3.0)],
[ 1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0, 1.0/3.0, 1.0/3.0*sqrt(1.0/3.0)],
[ 1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0,-1.0/3.0, 1.0/3.0*sqrt(1.0/3.0)],
[ 1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0,-1.0/3.0,-1.0/3.0*sqrt(1.0/3.0)],
[ 1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0,-1.0/3.0, 1.0/3.0*sqrt(1.0/3.0)],
[ 1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0,-1.0/3.0,-1.0/3.0*sqrt(1.0/3.0)],
[ 1.0, sqrt(1.0/3.0), sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0, 1.0/3.0,-1.0/3.0*sqrt(1.0/3.0)],
[ 1.0, sqrt(1.0/3.0), sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0, 1.0/3.0, 1.0/3.0*sqrt(1.0/3.0)],
] )
ddxi_i = Matrix( [[ 0.0,0.0,0.0,1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0, sqrt(1.0/3.0), sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0, sqrt(1.0/3.0), sqrt(1.0/3.0),0.0, 1.0/3.0],
] )
ddeta_i = Matrix( [[ 0.0,1.0,0.0,0.0,-sqrt(1.0/3.0),0.0,-sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0, sqrt(1.0/3.0),0.0,-sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0,-sqrt(1.0/3.0),0.0,-sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0, sqrt(1.0/3.0),0.0,-sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0,-sqrt(1.0/3.0),0.0, sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0, sqrt(1.0/3.0),0.0, sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0,-sqrt(1.0/3.0),0.0, sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0, sqrt(1.0/3.0),0.0, sqrt(1.0/3.0), 1.0/3.0],
] )
ddzeta_i= Matrix( [[ 0.0,0.0,1.0,0.0,0.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0],
] )
# Define element interpolation nodes weights for linear element
weights_e = Matrix( [1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0] )
# Define val_f for each face
# Face 1, XI_MIN
val_1 = Matrix( [[ 1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0, sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0],
[ 1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0],
[ 1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0],
[ 1.0, sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0],
] )
# Face 2, XI_MAX
val_2 = Matrix( [[ 1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0),1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0],
[ 1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0],
[ 1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0],
[ 1.0, sqrt(1.0/3.0), sqrt(1.0/3.0),1.0, sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0],
] )
# Face 3, ETA_MIN
val_3 = Matrix( [[ 1.0,-1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0, sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,-1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0, sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0,-sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-1.0, sqrt(1.0/3.0), sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0,-sqrt(1.0/3.0),-1.0/3.0],
] )
# Face 4, ETA_MAX
val_4 = Matrix( [[ 1.0,1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0,-sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0,-sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0, sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,1.0, sqrt(1.0/3.0), sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0, sqrt(1.0/3.0), 1.0/3.0],
] )
# Face 5, ZETA_MIN
val_5 = Matrix( [[ 1.0,-sqrt(1.0/3.0),-1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0, sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,-sqrt(1.0/3.0),-1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0, sqrt(1.0/3.0), 1.0/3.0],
[ 1.0, sqrt(1.0/3.0),-1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0,-sqrt(1.0/3.0), 1.0/3.0],
[ 1.0, sqrt(1.0/3.0),-1.0, sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0,-sqrt(1.0/3.0),-1.0/3.0],
] )
# Face 6, ZETA_MAX
val_6 = Matrix( [[ 1.0,-sqrt(1.0/3.0),1.0,-sqrt(1.0/3.0), sqrt(1.0/3.0),-1.0/3.0,-sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-sqrt(1.0/3.0),1.0, sqrt(1.0/3.0),-sqrt(1.0/3.0), 1.0/3.0,-sqrt(1.0/3.0),-1.0/3.0],
[ 1.0, sqrt(1.0/3.0),1.0,-sqrt(1.0/3.0),-sqrt(1.0/3.0),-1.0/3.0, sqrt(1.0/3.0),-1.0/3.0],
[ 1.0, sqrt(1.0/3.0),1.0, sqrt(1.0/3.0), sqrt(1.0/3.0), 1.0/3.0, sqrt(1.0/3.0), 1.0/3.0],
] )
#--------------------------------------------------------------------
# Matrix modes_to_nodes
val_r_inv = val_r**(-1)
# Computes coordiantes modes
coords_modes_ = val_r_inv * coords_
coords_modes = lambdify(coords_,coords_modes_,"numpy")
# Initialized coordiantes
interp_coords_ = MutableSparseNDimArray.zeros(nnodes_ie,3)
for inode in range(0,nnodes_ie):
for idir in range(0,3):
interp_coords_[inode,idir] = val_i[inode,:] * coords_modes_[:,idir]
# Initialized jacobian
jacobian_ = MutableSparseNDimArray.zeros(3, 3, nnodes_ie)
for inode in range(0,nnodes_ie):
jacobian_[0,0,inode] = ddxi_i[inode,:] * coords_modes_[:,0]
jacobian_[0,1,inode] = ddeta_i[inode,:] * coords_modes_[:,0]
jacobian_[0,2,inode] = ddzeta_i[inode,:] * coords_modes_[:,0]
jacobian_[1,0,inode] = ddxi_i[inode,:] * coords_modes_[:,1]
jacobian_[1,1,inode] = ddeta_i[inode,:] * coords_modes_[:,1]
jacobian_[1,2,inode] = ddzeta_i[inode,:] * coords_modes_[:,1]
jacobian_[2,0,inode] = ddxi_i[inode,:] * coords_modes_[:,2]
jacobian_[2,1,inode] = ddeta_i[inode,:] * coords_modes_[:,2]
jacobian_[2,2,inode] = ddzeta_i[inode,:] * coords_modes_[:,2]
update_progress("Computing Jacobian ", inode/(nnodes_ie-1))
# Matrics and Determinant
metrics_ = MutableSparseNDimArray.zeros(3, 3, nnodes_ie)
jinv_ = zeros(nnodes_ie)
for inode in range(0,nnodes_ie):
ijacobian = zeros(3,3)
for irow in range(0,3):
for icol in range(0,3):
ijacobian[irow,icol] = jacobian_[irow,icol,inode]
# Compute jacobian for the ith node
update_progress("Computing Jinv and Metric ", inode/(nnodes_ie-1))
jinv_[inode] = ijacobian.det()
imetric = ijacobian**(-1)
for irow in range(0,3):
for icol in range(0,3):
metrics_[irow,icol,inode] = imetric[irow,icol]
# Compute inverse Mass matrix
invmass_ = zeros(nterms_s,nterms_s)
mass_ = zeros(nterms_s,nterms_s)
i = 1
val_tmp = val_i
for iterm in range(0,nterms_s):
for inode in range(0,nnodes_ie):
val_tmp[inode,iterm] = val_tmp[inode,iterm] * weights_e[inode] * jinv_[inode]
update_progress("Computing invmass ", i/(nterms_s*nnodes_ie))
i += 1
mass_ = transpose(val_tmp)*val_i
invmass_ = (mass_)**(-1)
# Compute BR2_VOL for each face
br2_vol_face1_ = zeros(nnodes_ie,nnodes_if)
br2_vol_face2_ = zeros(nnodes_ie,nnodes_if)
br2_vol_face3_ = zeros(nnodes_ie,nnodes_if)
br2_vol_face4_ = zeros(nnodes_ie,nnodes_if)
br2_vol_face5_ = zeros(nnodes_ie,nnodes_if)
br2_vol_face6_ = zeros(nnodes_ie,nnodes_if)
br2_vol_face1_ = val_i*(invmass_*transpose(val_1))
br2_vol_face2_ = val_i*(invmass_*transpose(val_2))
br2_vol_face3_ = val_i*(invmass_*transpose(val_3))
br2_vol_face4_ = val_i*(invmass_*transpose(val_4))
br2_vol_face5_ = val_i*(invmass_*transpose(val_5))
br2_vol_face6_ = val_i*(invmass_*transpose(val_6))
update_progress("Computing br2_vol ", 1)
# Compute BR2_FACE for each face
br2_face_face1_ = zeros(nnodes_if,nnodes_if)
br2_face_face2_ = zeros(nnodes_if,nnodes_if)
br2_face_face3_ = zeros(nnodes_if,nnodes_if)
br2_face_face4_ = zeros(nnodes_if,nnodes_if)
br2_face_face5_ = zeros(nnodes_if,nnodes_if)
br2_face_face6_ = zeros(nnodes_if,nnodes_if)
br2_face_face1_ = val_1*(invmass_*transpose(val_1))
br2_face_face2_ = val_2*(invmass_*transpose(val_2))
br2_face_face3_ = val_3*(invmass_*transpose(val_3))
br2_face_face4_ = val_4*(invmass_*transpose(val_4))
br2_face_face5_ = val_5*(invmass_*transpose(val_5))
br2_face_face6_ = val_6*(invmass_*transpose(val_6))
update_progress("Computing br2_face ", 1)
## Grad1, Grad2, and Grad3
#grad1_ = zeros(nnodes_ie,nterms_s)
#grad2_ = zeros(nnodes_ie,nterms_s)
#grad3_ = zeros(nnodes_ie,nterms_s)
#i = 1
#for iterm in range(0,nterms_s):
# for inode in range(0,nnodes_ie):
# grad1_[inode,iterm] = metrics_[0,0,inode] * ddxi_i[inode,iterm] + metrics_[1,0,inode] * ddeta_i[inode,iterm] + metrics_[2,0,inode] * ddzeta_i[inode,iterm]
# grad2_[inode,iterm] = metrics_[0,1,inode] * ddxi_i[inode,iterm] + metrics_[1,1,inode] * ddeta_i[inode,iterm] + metrics_[2,1,inode] * ddzeta_i[inode,iterm]
# grad3_[inode,iterm] = metrics_[0,2,inode] * ddxi_i[inode,iterm] + metrics_[1,2,inode] * ddeta_i[inode,iterm] + metrics_[2,2,inode] * ddzeta_i[inode,iterm]
# update_progress("Computing grad1, grad2, grad3 ", i/(nnodes_ie*nterms_s))
# i += 1
# Differentiate coordinates at interpolation points
interp_coords_dx_ = MutableSparseNDimArray.zeros(nnodes_ie, 3, nnodes_r, ndirs)
i = 1
for inode in range(0,nnodes_ie):
for direct in range(0,3):
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
interp_coords_dx_[inode,direct,inode_diff,idir] = interp_coords_[inode,direct].diff(coords_[inode_diff,idir])
update_progress("Computing interp_coords_dx ", i/(nnodes_ie*nnodes_r*ndirs*3))
i += 1
# Differentiate determinant
djinv_dx_ = MutableSparseNDimArray.zeros(nnodes_ie, nnodes_r, ndirs)
i = 1
for inode in range(0,nnodes_ie):
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
djinv_dx_[inode,inode_diff,idir] = jinv_[inode].diff(coords_[inode_diff,idir])
update_progress("Computing djinv_dx ", i/(nnodes_ie*nnodes_r*ndirs))
i += 1
# Differentiate metrics
dmetric_dx_ = MutableSparseNDimArray.zeros(3,3,nnodes_ie,nnodes_r,ndirs)
i = 1
for inode in range(0,nnodes_ie):
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
for irow in range(0,3):
for icol in range(0,3):
dmetric_dx_[irow,icol,inode,inode_diff,idir] = metrics_[irow,icol,inode].diff(coords_[inode_diff,idir])
update_progress("Computing dmetric_dx ", i/(nnodes_ie*nnodes_r*ndirs*9))
i += 1
# Differentiate invmass
dinvmass_dx_ = MutableSparseNDimArray.zeros(nterms_s,nterms_s,nnodes_r,ndirs)
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
for irow in range(0,nterms_s):
for icol in range(0,nterms_s):
dinvmass_dx_[irow,icol,inode_diff,idir] = invmass_[irow,icol].diff(coords_[inode_diff,idir])
update_progress("Computing dinvmass_dx ", i/(nnodes_r*ndirs*nterms_s*nterms_s))
i += 1
# Differentiate BR2_vol
dbr2_vol_face1_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nnodes_if,nnodes_r,ndirs)
dbr2_vol_face2_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nnodes_if,nnodes_r,ndirs)
dbr2_vol_face3_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nnodes_if,nnodes_r,ndirs)
dbr2_vol_face4_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nnodes_if,nnodes_r,ndirs)
dbr2_vol_face5_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nnodes_if,nnodes_r,ndirs)
dbr2_vol_face6_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nnodes_if,nnodes_r,ndirs)
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
dbr2_vol_face1_dx_[irow,icol,inode_diff,idir] = br2_vol_face1_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_vol_face2_dx_[irow,icol,inode_diff,idir] = br2_vol_face2_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_vol_face3_dx_[irow,icol,inode_diff,idir] = br2_vol_face3_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_vol_face4_dx_[irow,icol,inode_diff,idir] = br2_vol_face4_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_vol_face5_dx_[irow,icol,inode_diff,idir] = br2_vol_face5_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_vol_face6_dx_[irow,icol,inode_diff,idir] = br2_vol_face6_[irow,icol].diff(coords_[inode_diff,idir])
update_progress("Computing dbr2_vol_faces_dx ", i/(nnodes_r*ndirs*nnodes_ie*nnodes_if))
i += 1
# Differentiate BR2_face
dbr2_face_face1_dx_ = MutableSparseNDimArray.zeros(nnodes_if,nnodes_if,nnodes_r,ndirs)
dbr2_face_face2_dx_ = MutableSparseNDimArray.zeros(nnodes_if,nnodes_if,nnodes_r,ndirs)
dbr2_face_face3_dx_ = MutableSparseNDimArray.zeros(nnodes_if,nnodes_if,nnodes_r,ndirs)
dbr2_face_face4_dx_ = MutableSparseNDimArray.zeros(nnodes_if,nnodes_if,nnodes_r,ndirs)
dbr2_face_face5_dx_ = MutableSparseNDimArray.zeros(nnodes_if,nnodes_if,nnodes_r,ndirs)
dbr2_face_face6_dx_ = MutableSparseNDimArray.zeros(nnodes_if,nnodes_if,nnodes_r,ndirs)
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
dbr2_face_face1_dx_[irow,icol,inode_diff,idir] = br2_face_face1_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_face_face2_dx_[irow,icol,inode_diff,idir] = br2_face_face2_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_face_face3_dx_[irow,icol,inode_diff,idir] = br2_face_face3_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_face_face4_dx_[irow,icol,inode_diff,idir] = br2_face_face4_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_face_face5_dx_[irow,icol,inode_diff,idir] = br2_face_face5_[irow,icol].diff(coords_[inode_diff,idir])
dbr2_face_face6_dx_[irow,icol,inode_diff,idir] = br2_face_face6_[irow,icol].diff(coords_[inode_diff,idir])
update_progress("Computing dbr2_face_faces_dx ", i/(nnodes_r*ndirs*nnodes_if*nnodes_if))
i += 1
## Differentaite Gradients
#dgrad1_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nterms_s,nnodes_r,ndirs)
#dgrad2_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nterms_s,nnodes_r,ndirs)
#dgrad3_dx_ = MutableSparseNDimArray.zeros(nnodes_ie,nterms_s,nnodes_r,ndirs)
#i = 1
#for inode in range(0,nnodes_ie):
# for inode_diff in range(0,nnodes_r):
# for idir in range(0,ndirs):
# for inode in range(0,nnodes_ie):
# for iterm in range(0,nterms_s):
# dgrad1_dx_[inode,iterm,inode_diff,idir] = grad1_[inode,iterm].diff(coords_[inode_diff,idir])
# dgrad2_dx_[inode,iterm,inode_diff,idir] = grad2_[inode,iterm].diff(coords_[inode_diff,idir])
# dgrad3_dx_[inode,iterm,inode_diff,idir] = grad3_[inode,iterm].diff(coords_[inode_diff,idir])
# update_progress("Computing dgrad1_dx, dgrad2_dx, .. ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_ie*nterms_s))
# i += 1
#WRITE_____________________
##
## Metrics
##
#f = open("metrics.txt","w")
#i = 1
#for inode in range (0,nnodes_ie):
# f.write("Metric interpolation node %d \n" % (inode+1))
# array = numpy.zeros([3, 3])
# for irow in range(0,3):
# for icol in range(0,3):
# data_sym = lambdify(coords_,metrics_[irow,icol,inode],"numpy")
# data_val = data_sym(*flatten(coords))
# array[irow,icol] = data_val
# update_progress("Writing metrics to file ", i/(nnodes_ie*9))
# i += 1
# numpy.savetxt(f,array)
#f.close()
#
##
## jinv
##
#f = open("jinv.txt","w")
#array = numpy.zeros([1])
#i = 1
#for inode in range (0,nnodes_ie):
# f.write("Jinv interpolation node %d \n" % (inode+1))
# data_sym = lambdify(coords_,jinv_[inode],"numpy")
# data_val = data_sym(*flatten(coords))
# array[0] = data_val
# numpy.savetxt(f,array)
# update_progress("Writing jinv to file ", i/(nnodes_ie))
# i += 1
#f.close()
##
## Grad1
##
#f = open("grad1.txt","w")
#f.write("Grad1 \n")
#array = numpy.zeros([nnodes_ie,nterms_s])
#i = 1
#for inode in range (0,nnodes_ie):
# for iterm in range(0,nterms_s):
# data_sym | |
<gh_stars>10-100
from __future__ import absolute_import, division, print_function, with_statement
from __future__ import unicode_literals
from deepstreampy.constants import topic as topic_constants
from deepstreampy.constants import actions as action_constants
from deepstreampy.constants import event as event_constants
from deepstreampy.constants import connection_state
from deepstreampy.message import message_parser, message_builder
from deepstreampy.utils import ResubscribeNotifier, SingleNotifier, Listener
from deepstreampy.utils import str_types
from deepstreampy.constants import merge_strategies
from deepstreampy import jsonpath
from pyee import EventEmitter
from tornado import gen, concurrent
import json
from functools import partial
from copy import deepcopy
ALL_EVENT = 'ALL_EVENT'
ENTRY_ADDED_EVENT = 'ENTRY_ADDED_EVENT'
ENTRY_REMOVED_EVENT = 'ENTRY_REMOVED_EVENT'
ENTRY_MOVED_EVENT = 'ENTRY_MOVED_EVENT'
class Record(EventEmitter, object):
def __init__(self, name, connection, options, client):
super(Record, self).__init__()
self.name = name
self.usages = 0
self._connection = connection
self._client = client
self._options = options
self._has_provider = False
self._is_ready = False
self._is_destroyed = False
self._data = {}
self._version = None
self._old_value = None
self._old_path_values = None
self._queued_method_calls = list()
self._write_callbacks = {}
self.merge_strategy = merge_strategies.remote_wins
self._emitter = EventEmitter()
if 'merge_strategy' in options:
self.merge_strategy = options['merge_strategy']
self._resubscribe_notifier = ResubscribeNotifier(
client, self._send_read)
record_read_ack_timeout = options.get("recordReadAckTimeout", 15)
self._read_ack_timeout = client.io_loop.call_later(
record_read_ack_timeout,
partial(self._on_timeout, event_constants.ACK_TIMEOUT))
record_read_timeout = options.get("recordReadTimeout", 15)
self._read_timeout = client.io_loop.call_later(
record_read_timeout,
partial(self._on_timeout, event_constants.RESPONSE_TIMEOUT))
self._record_delete_timeout = options.get("recordDeleteTimeout", 15)
self._delete_ack_timeout = None
self._discard_timeout = None
def get(self, path=None):
"""
Returns a copy of either the entire dataset of the record or, if called
with a path - the value of that path within the record's dataset.
Returning a copy rather than the actual value helps to prevent the
record getting out of sync due to unintentional changes to its data.
Args:
path (str, optional): a JSON path
"""
return self._get_path(path)
def set(self, data, path=None, callback=None):
"""
Sets the value of either the entire dataset or of a specific path within
the record and submits the changes to the server.
If the new data is equal to the current data, nothing happens.
Args:
data: the new value of the data
path (str, optional): a JSON path
callback (callable)
"""
config = {}
if callback:
state = self._client.connection_state
if state in (connection_state.CLOSED,
connection_state.RECONNECTING):
callback('Connection error: error updating record as '
'connection was closed')
return
else:
config['writeSuccess'] = True
self._set_up_callback(self.version, callback)
if path is None and not isinstance(data, (dict, list)):
raise ValueError(
"Invalid record data {0}: Record data must be a dict or list.")
if self._check_destroyed('set'):
return
if not self._is_ready:
self._queued_method_calls.append(partial(self.set, data, path))
return
old_value = self._data
deep_copy = self._options.get('recordDeepCopy', True)
new_value = jsonpath.set(old_value, path, data, deep_copy)
if new_value == old_value:
if callback:
callback(None)
return
self._send_update(path, data, config)
self._apply_change(new_value)
def subscribe(self, callback, path=None, trigger_now=False):
"""
Subscribe to changes to the record's dataset.
When called with a path it will only subscribe to updates to that path,
rather than the entire record.
Args:
callback (callable)
path (str, optional): a JSON path to subscribe for
trigger_now (bool): specifies whether the callback should be invoked
immediately with the current value
"""
if self._check_destroyed('subscribe'):
return
if path is None:
event = ALL_EVENT
else:
event = path
self._emitter.on(event, callback)
if trigger_now and self._is_ready:
if path:
callback(jsonpath.get(self._data, path, True))
else:
callback(self._data)
def unsubscribe(self, callback, path=None):
"""
Remove a subscription that was previously made.
Args:
callback (callable)
path (str, optional): the JSON path to unsibscribe for
"""
if self._check_destroyed('unsubscribe'):
return
if path is None:
event = ALL_EVENT
else:
event = path
self._emitter.remove_listener(event, callback)
def discard(self):
"""
Remove all change listeners and notify the server that the client is no
longer interested in updates for this record.
"""
future = concurrent.Future()
if self._check_destroyed('discard'):
return
def ready_callback(record):
self.usages -= 1
if self.usages <= 0:
self.emit('destroyPending')
self._discard_timeout = self._client.io_loop.call_later(
1, partial(self._on_timeout, event_constants.ACK_TIMEOUT))
send_future = self._connection.send_message(
topic_constants.RECORD, action_constants.UNSUBSCRIBE,
[self.name])
send_future.add_done_callback(
lambda f: future.set_result(f.result()))
self.when_ready(ready_callback)
return future
def delete(self):
"""
Delete the record on the server.
"""
future = concurrent.Future()
if self._check_destroyed('delete'):
return
def ready_callback(record):
self.emit('destroyPending')
self._delete_ack_timeout = self._client.io_loop.call_later(
self._record_delete_timeout,
partial(self._on_timeout, event_constants.DELETE_TIMEOUT))
send_future = self._connection.send_message(
topic_constants.RECORD, action_constants.DELETE, [self.name])
send_future.add_done_callback(
lambda f: future.set_result(f.result()))
self.when_ready(ready_callback)
return future
def when_ready(self, callback):
if self._is_ready:
callback(self)
else:
self.once('ready', partial(callback, self))
def _set_up_callback(self, current_version, callback):
new_version = (current_version or 0) + 1
self._write_callbacks[new_version] = callback
def _on_message(self, message):
action = message['action']
if action == action_constants.READ:
if self.version is None:
self._client.io_loop.remove_timeout(self._read_timeout)
self._on_read(message)
else:
self._apply_update(message)
elif action == action_constants.ACK:
self._process_ack_message(message)
elif action in (action_constants.UPDATE, action_constants.PATCH):
self._apply_update(message)
elif action == action_constants.WRITE_ACKNOWLEDGEMENT:
versions = json.loads(message['data'][1])
for version in versions:
if version in self._write_callbacks:
callback = self._write_callbacks[version]
callback(
message_parser.convert_typed(message['data'][2],
self._client))
del self._write_callbacks[version]
elif message['data'][0] == event_constants.VERSION_EXISTS:
self._recover_record(message['data'][2],
json.loads(message['data'][3]), message)
elif action == event_constants.MESSAGE_DENIED:
self._clear_timeouts()
elif action == action_constants.SUBSCRIPTION_HAS_PROVIDER:
has_provider = message_parser.convert_typed(
message['data'][1], self._client)
self._has_provider = has_provider
self.emit('hasProviderChanged', has_provider)
def _recover_record(self, remote_version, remote_data, message):
if self.merge_strategy:
self.merge_strategy(self, remote_data, remote_version,
partial(self._on_record_recovered,
remote_version, remote_data, message))
else:
self.emit('error', event_constants.VERSION_EXISTS,
'received update for {0} but version is {1}'.format(
remote_version, self.version))
def _on_record_recovered(self, remote_version, remote_data, message, error,
data):
if not error:
old_version = self.version
self._version = int(remote_version)
old_value = self._data
new_value = jsonpath.set(old_value, None, data, True)
if data == remote_data:
self._apply_change(data)
callback = self._write_callbacks.get(self.version, None)
if callback:
callback(None)
if remote_version in self._write_callbacks.keys():
del self._write_callbacks[remote_version]
return
config = message['data'][4] if len(message['data']) >= 5 else None
if config and json.loads(config)['writeSuccess']:
callback = self._write_callbacks[old_version]
del self._write_callbacks[old_version]
self._set_up_callback(self.version, callback)
self._send_update(None, data, config)
self._apply_change(new_value)
else:
self.emit('error', event_constants.VERSION_EXISTS,
'received update for {0} but version is {1}'.format(
remote_version, self.version))
def _process_ack_message(self, message):
acknowledge_action = message['data'][0]
if acknowledge_action == action_constants.SUBSCRIBE:
self._client.io_loop.remove_timeout(self._read_ack_timeout)
elif acknowledge_action == action_constants.DELETE:
self.emit('delete')
self._destroy()
elif acknowledge_action == action_constants.UNSUBSCRIBE:
self.emit('discard')
self._destroy()
def _apply_update(self, message):
version = int(message['data'][1])
if message['action'] == action_constants.PATCH:
data = message_parser.convert_typed(message['data'][3],
self._client)
else:
data = json.loads(message['data'][2])
if self.version is None:
self._version = version
elif self.version + 1 != version:
if message['action'] == action_constants.PATCH:
self._connection.send_message(topic_constants.RECORD,
action_constants.SNAPSHOT,
[self.name])
else:
self._recover_record(version, data, message)
return
self._begin_change()
self._version = version
if message['action'] == action_constants.PATCH:
jsonpath.set(self._data, message['data'][2], data, False)
else:
self._data = data
self._complete_change()
def _send_update(self, path, data, config):
self._version += 1
if not path:
if config:
msg_data = [self.name, self.version, data, config]
else:
msg_data = [self.name, self.version, data]
self._connection.send_message(topic_constants.RECORD,
action_constants.UPDATE, msg_data)
else:
if config:
msg_data = [
self.name, self.version, path,
message_builder.typed(data), config
]
else:
msg_data = [
self.name, self.version, path,
message_builder.typed(data)
]
self._connection.send_message(topic_constants.RECORD,
action_constants.PATCH, msg_data)
def _apply_change(self, new_data):
if self.is_destroyed:
return
old_data = self._data
self._data = new_data
if not self._emitter._events:
return
paths = self._emitter._events.keys()
for path in paths:
if path == 'new_listener':
continue
if path == 'ALL_EVENT' and new_data != old_data:
self._emitter.emit(ALL_EVENT, new_data)
continue
new_value = jsonpath.get(new_data, path, False)
old_value = jsonpath.get(old_data, path, False)
if new_value != old_value:
self._emitter.emit(path, self._get_path(path))
def _on_read(self, message):
self._begin_change()
self._version = int(message['data'][1])
self._data = json.loads(message['data'][2])
self._complete_change()
self._set_ready()
def _set_ready(self):
self._is_ready = True
for call in self._queued_method_calls:
call()
self._queued_method_calls = []
self.emit('ready')
def _send_read(self):
"""
Sends the read message, either initially at record creation or after a
lost connection has been re-established.
"""
return self._connection.send_message(
topic_constants.RECORD, action_constants.CREATEORREAD, [self.name])
def _get_path(self, path=None):
deep_copy = self._options.get('recordDeepCopy', False)
return jsonpath.get(self._data, path, deep_copy)
def _begin_change(self):
if not self._emitter._events:
return
# Hacky way of getting active listeners, except a special one
paths = [
event for event in self._emitter._events.keys()
if event != 'new_listener'
]
self._old_path_values = dict()
if self._emitter.listeners(ALL_EVENT):
self._old_value = deepcopy(self._data)
for path in paths:
if path != ALL_EVENT:
self._old_path_values[path] = jsonpath.get(
self._data, path, True)
def _complete_change(self):
if (self._emitter.listeners(ALL_EVENT)
and self._old_value != self._data):
self._emitter.emit(ALL_EVENT, self._data)
self._old_value = None
if not self._old_path_values:
return
for path in self._old_path_values:
current_value = self._get_path(
path) #jsonpath.get(self._data, path, True)
if current_value != self._old_path_values[path]:
self._emitter.emit(path, current_value)
self._old_path_values = None
def _clear_timeouts(self):
if self._read_ack_timeout:
self._client.io_loop.remove_timeout(self._read_ack_timeout)
if self._discard_timeout:
self._client.io_loop.remove_timeout(self._discard_timeout)
if self._delete_ack_timeout:
self._client.io_loop.remove_timeout(self._delete_ack_timeout)
def _check_destroyed(self, method_name):
if self._is_destroyed:
self.emit(
'error',
"Can't invoke {0}. Record {1} is already destroyed".format(
method_name, self.name))
return True
return False
def _on_timeout(self, timeout_type):
self._clear_timeouts()
self.emit('error', timeout_type)
def _destroy(self):
self._clear_timeouts()
self._emitter.remove_all_listeners()
self._resubscribe_notifier.destroy()
self._is_destroyed = True
self._is_ready = False
self._client = None
self._connection = None
@property
def has_provider(self):
return self._has_provider
@property
def is_destroyed(self):
return self._is_destroyed
@property
def is_ready(self):
return self._is_ready
@property
def version(self):
return self._version
class List(Record):
def __init__(self, name, connection, options, client):
super(List, self).__init__(name, connection, options, client)
self._before_structure = None
self._has_add_listener = None
self._has_remove_listener = None
self._has_move_listener = None
def get(self):
"""
Return the list of entries or an empty array if the list hasn't been
populated yet.
"""
entries = super(List, self).get()
if not | |
# (c) Copyright IBM Corp. 2020. All Rights Reserved.
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
import json
import csv
import re
import sys
import time
if sys.version_info.major < 3:
from StringIO import StringIO
from io import BytesIO
else:
from io import StringIO, BytesIO
from collections import OrderedDict
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, \
FunctionResult, FunctionError
from fn_datatable_utils.util.helper import RESDatatable, get_function_input
from resilient_lib import ResultPayload, get_file_attachment, get_file_attachment_name
PACKAGE_NAME = "fn_datatable_utils"
LOG = logging.getLogger(__name__)
TZ_FORMAT = re.compile(r"%[zZ]")
TZ_VALUE = re.compile(r"[-+]\d{4}")
class FunctionPayload(object):
"""Class that contains the payload sent back to UI and available in the post-processing script"""
def __init__(self, inputs):
self.success = True
self.inputs = inputs
self.rows = None
def as_dict(self):
"""Return this class as a Dictionary"""
return self.__dict__
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'dt_utils_create_csv_table''"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get(PACKAGE_NAME, {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get(PACKAGE_NAME, {})
@function("dt_utils_create_csv_table")
def _dt_utils_create_csv_table_function(self, event, *args, **kwargs):
"""Function: Create a utility function to take csv data and add the results to a named datatable."""
try:
# Instantiate new Resilient API object
res_client = self.rest_client()
inputs = {
"incident_id": get_function_input(kwargs, "incident_id", optional=False), # number (required)
"attachment_id": get_function_input(kwargs, "attachment_id", optional=True), # number (optional)
"has_headers": get_function_input(kwargs, "dt_has_headers", optional=False), # boolean (optional)
"csv_data": get_function_input(kwargs, "dt_csv_data", optional=True), # text (optional)
"datable_name": get_function_input(kwargs, "dt_datable_name", optional=False), # text (required)
"mapping_table": get_function_input(kwargs, "dt_mapping_table", optional=False), # text (optional)
"date_time_format": get_function_input(kwargs, "dt_date_time_format", optional=True), # text (optional)
"start_row": get_function_input(kwargs, "dt_start_row", optional=True), # number (optional)
"max_rows": get_function_input(kwargs, "dt_max_rows", optional=True), # number (optional)
}
LOG.info(inputs)
yield StatusMessage("Starting ...")
mapping_table = convert_json(inputs['mapping_table'])
if not mapping_table:
raise ValueError(u"Unable to convert mapping_table to json: %s", inputs['mapping_table'])
# Create payload dict with inputs
rp = ResultPayload(PACKAGE_NAME, **kwargs)
if (inputs["attachment_id"] and inputs["csv_data"]) or \
not (inputs["attachment_id"] or inputs["csv_data"]):
raise ValueError("Specify either attachment_id or csv_data")
# Either an attachment ID or CSV Data is needed to be able to add rows
if inputs["attachment_id"]:
attachment_name = get_file_attachment_name(res_client, inputs['incident_id'],
attachment_id=inputs["attachment_id"])
b_csv_data = get_file_attachment(res_client, inputs['incident_id'],
attachment_id=inputs["attachment_id"])
csv_data = b_csv_data.decode("utf-8")
if sys.version_info.major < 3:
inline_data = BytesIO(b_csv_data)
else:
inline_data = StringIO(csv_data)
else:
attachment_name = None
csv_data = inputs["csv_data"]
if sys.version_info.major < 3:
inline_data = StringIO(csv_data.encode("utf-8"))
else:
inline_data = StringIO(csv_data)
datatable = RESDatatable(res_client, inputs["incident_id"], inputs["datable_name"])
# Retrieve the column names for the datatable, and their data_types,
# to compare against what the user provides, and attempt data conversion, if necessary
fields = datatable.get_dt_headers()
dt_ordered_columns = {fields[field]['order']: (fields[field]['name'],fields[field]['input_type']) for field in fields}
# ordered column names if we need to assign the headers to the columns in column order
dt_column_names = OrderedDict([dt_ordered_columns[field] for field in sorted (dt_ordered_columns.keys())])
# different readers if we have headers or not
dialect = csv.Sniffer().sniff(csv_data[0:csv_data.find('\n')]) # limit analysis to first row
# py2 needs changes to dialect to avoid unicode attributes
if sys.version_info.major < 3:
for attr in dir(dialect):
a = getattr(dialect, attr)
if type(a) == unicode:
setattr(dialect, attr, bytes(a))
LOG.debug(dialect.__dict__)
if inputs["has_headers"]:
reader = csv.DictReader(inline_data, dialect=dialect) # each row is a dictionary keyed by the column name
csv_headers = reader.fieldnames # just the headers
else:
reader = csv.reader(inline_data, dialect=dialect) # each row is a list of values
csv_headers = []
mapping_table = build_mapping_table(mapping_table, csv_headers, dt_column_names)
LOG.debug("csv headers to datatable columns: %s", mapping_table)
# perform the api calls to the datatable
number_of_added_rows, number_of_rows_with_errors = self.add_to_datatable(reader, datatable,
mapping_table, dt_column_names,
inputs['date_time_format'],
inputs['start_row'],
inputs['max_rows'])
LOG.info("Number of rows added: %s ", number_of_added_rows)
LOG.info("Number of rows that could not be added: %s", number_of_rows_with_errors)
row_data = {
"data_source": attachment_name if attachment_name else "CSV data",
"rows_added": number_of_added_rows,
"rows_with_errors": number_of_rows_with_errors
}
results = rp.done(True, row_data)
yield StatusMessage("Ending ...")
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as err:
yield FunctionError(err)
def add_to_datatable(self, reader, datatable, mapping_table, dt_column_names, date_format,
start_row, max_rows):
"""add the csv data to the named datatable. Filter out fields which don't map to the datatable columns
and convert the data to the column's format, as necessary
Args:
reader (csv reader): reader for csv data
datatable (object): object to handle datatable API calls
mapping_table (dict): provided by the function
dt_column_names (OrderDict): column_name: column_type
date_format (str): '%Y-%m-%dT%H:%M:%S%z'
start_row (int): Number of row to start adding or None
max_rows (int): None or max number to add
Returns:
[int, int]: number_of_added_rows, number_of_rows_with_errors
"""
number_of_added_rows = 0
number_of_rows_with_errors = 0
indx = 1
for row in reader:
LOG.debug("%s: %s",indx, row)
if not start_row or (start_row and indx >= start_row):
cells_data = build_row(row, mapping_table, dt_column_names, date_format)
LOG.debug("cells: %s", cells_data)
new_row = datatable.dt_add_rows(cells_data)
if "error" in new_row:
number_of_rows_with_errors += 1
else:
number_of_added_rows += 1
if max_rows and number_of_added_rows >= max_rows:
break
indx += 1
return number_of_added_rows, number_of_rows_with_errors
def build_mapping_table(input_mapping_table, csv_headers, dt_column_names):
"""Build a mapping table of the columns and datatable columns which are possible to map.
Args:
input_mapping_table (dict): provided to the function
csv_headers (list): list of csv_headers if they present
dt_column_names (OrderedDict): column_name: column_type
Returns:
[dict]: valid mapping table with incorrect datatable columns filtered out. if input_mapping_table is None,
keys in returned dict are positional values: 0, 1, 2, 3
"""
mapping_table = {}
# convert list of fields for csv data without a header
if isinstance(input_mapping_table, list):
indx = 0
for col_name in input_mapping_table:
if col_name in dt_column_names:
if csv_headers:
if indx < len(csv_headers):
mapping_table[csv_headers[indx]] = col_name
else:
LOG.warning("csv header index: %s larger than list of headers: %s", indx, csv_headers)
else:
mapping_table[indx] = col_name
else:
LOG.warning("Skipping datatable column not found. Entry: %s, column name: %s", indx, col_name)
indx += 1
else:
# only use columns which match the column names in our datatable
mapping_table = input_mapping_table.copy()
for csv_header, dt_column_name in input_mapping_table.items():
if dt_column_name not in dt_column_names:
LOG.warning(u"Column '%s' not found in datatable. Ignoring.", dt_column_name)
mapping_table.pop(csv_header)
continue
return mapping_table
def build_row(csv_row, matching_table, dt_column_names, date_format):
"""
Build the json structure needed to import a datatable row into Resilient.
The matching_table is used to identify the datatable column. If matching_table keys are integers,
refer to the dt_column_names by index location.
Args:
csv_row (list): list column data to add to the datatable
matching_table (dict): "csv_hdr_name":"column_name"
dt_column_names (OrderedDict): column_name: column_type
date_format (str): For converting string-based date fields. Ex. '%Y-%m-%dT%H:%M:%S%z'
Returns:
[dict]: set of columns and values to import into the datatable
"""
row = {}
indx = 0
for csv_column in csv_row:
# excel spreadsheets can have BOM ("ByteOrder Mark")
try:
if csv_column.startswith(u'\ufeff'):
csv_column_encoded = csv_column[1:]
else:
csv_column_encoded = csv_column
except UnicodeDecodeError:
# py2
if csv_column.startswith('\xef\xbb\xbf'):
csv_column_encoded = csv_column[3:]
else:
csv_column_encoded = csv_column
try:
# index specific mapping_table (no headers used)
if csv_column_encoded in matching_table:
dt_col_name = matching_table[csv_column_encoded]
csv_value = csv_row[csv_column]
# get the matching datatable column name
elif indx in matching_table:
dt_col_name = matching_table[indx]
csv_value = csv_column
else:
LOG.debug(u"Unable to find mapping entry for csv column: %s", csv_column_encoded)
continue
converted_value = convert_field(csv_value,
dt_column_names[dt_col_name],
date_format)
if dt_col_name in row:
LOG.warning("Replacing value: '%s' with '%s' for column: '%s'",
row.get(dt_col_name),
converted_value,
dt_col_name)
row[dt_col_name] = {"value": converted_value}
except Exception as err:
LOG.error(u"%s, indx: %s, column: %s, mapping_table: %s",
err, indx, csv_column, matching_table)
indx += 1
return row
def convert_json(str_json):
"""convert string-encoded json
Args:
str_json (string)
Returns:
dictionary: converted json or None if an error occurred
"""
try:
return json.loads(str_json)
except:
return None
def convert_field(value, column_type, date_format):
"""convert values based on the datatable column type
Args:
value (str, int, bool, list): value to convert. No conversion returns value unchanged
column_type (str): column type: text, number, boolean, datetimepicker, select, etc.
date_format (str): when string-based date values exist, the format of the string:
ex. "%d/%m/%YT%H:%M:%S%Z"
Returns:
multiple: converted value, if needed or None if a conversion error occurred
"""
if not value:
return None
if column_type in ["text", "textarea"]:
return str(value)
if column_type.startswith("date") and not isinstance(value, int):
return date_to_timestamp(value, date_format)
if column_type == "number" and not isinstance(value, int):
try:
return int(value)
except:
LOG.error(u"Unable to convert value to int: %s", value)
return None
if column_type == "boolean" and not isinstance(value, (bool, int)):
return value.lower() in ['true', '1', 'yes', 'y']
if column_type == "multiselect":
return [a.strip() for a | |
"""
SPADE is the combination of a mining technique and multiple statistical tests
to detect and assess the statistical significance of repeated occurrences of
spike sequences (spatio-temporal patterns, STP).
Given a list of Neo Spiketrain objects, assumed to be recorded in parallel, the
SPADE analysis can be applied as demonstrated in this short toy example of 10
artificial spike trains of exhibiting fully synchronous events of order 10.
This modules relies on the implementation of the fp-growth algorithm contained
in the file fim.so which can be found here (http://www.borgelt.net/pyfim.html)
and should be available in the spade_src folder (elephant/spade_src/).
If the fim.so module is not present in the correct location or cannot be
imported (only available for linux OS) SPADE will make use of a python
implementation of the fast fca algorithm contained in
elephant/spade_src/fast_fca.py, which is about 10 times slower.
import elephant.spade
import elephant.spike_train_generation
import quantities as pq
# Generate correlated data
sts = elephant.spike_train_generation.cpp(
rate=5*pq.Hz, A=[0]+[0.99]+[0]*9+[0.01], t_stop=10*pq.s)
# Mining patterns with SPADE using a binsize of 1 ms and a window length of 1
# bin (i.e., detecting only synchronous patterns).
patterns = spade.spade(
data=sts, binsize=1*pq.ms, winlen=1, dither=5*pq.ms,
min_spikes=10, n_surr=10, psr_param=[0,0,3],
output_format='patterns')['patterns'][0]
# Plotting
plt.figure()
for neu in patterns['neurons']:
if neu == 0:
plt.plot(
patterns['times'], [neu]*len(patterns['times']), 'ro',
label='pattern')
else:
plt.plot(
patterns['times'], [neu] * len(patterns['times']), 'ro')
# Raster plot of the data
for st_idx, st in enumerate(sts):
if st_idx == 0:
plt.plot(st.rescale(pq.ms), [st_idx] * len(st), 'k.', label='spikes')
else:
plt.plot(st.rescale(pq.ms), [st_idx] * len(st), 'k.')
plt.ylim([-1, len(sts)])
plt.xlabel('time (ms)')
plt.ylabel('neurons ids')
plt.legend()
plt.show()
:copyright: Copyright 2017 by the Elephant team, see `doc/authors.rst`.
:license: BSD, see LICENSE.txt for details.
"""
import time
import warnings
import operator
from itertools import chain, combinations
from functools import reduce
from collections import defaultdict
import numpy as np
import neo
import quantities as pq
from scipy import sparse
import elephant.spike_train_surrogates as surr
import elephant.conversion as conv
from elephant.spade_src import fast_fca
warnings.simplefilter('once', UserWarning)
try:
from mpi4py import MPI # for parallelized routines
HAVE_MPI = True
except ImportError: # pragma: no cover
HAVE_MPI = False
try:
from elephant.spade_src import fim
HAVE_FIM = True
except ImportError: # pragma: no cover
HAVE_FIM = False
def spade(data, binsize, winlen, min_spikes=2, min_occ=2, max_spikes=None,
max_occ=None, min_neu=1, n_subsets=0, delta=0, epsilon=0,
stability_thresh=None, n_surr=0, dither=15 * pq.ms, spectrum='#',
alpha=1, stat_corr='fdr', psr_param=None, output_format='concepts'):
r"""
Perform the SPADE [1,2] analysis for the parallel spike trains given in the
input. The data are discretized with a temporal resolution equal binsize
in a sliding window of winlen*binsize milliseconds.
First, spike patterns are mined from the data using a technique termed
frequent itemset mining (FIM) or formal concept analysis (FCA). In this
framework, a particular spatio-temporal spike pattern is termed a
"concept". It is then possible to compute the stability and the signature
significance of all pattern candidates. In a final step, it is possible to
select a stability threshold and the significance level to select only
stable/significant concepts.
Parameters
----------
data: list of neo.SpikeTrains
List containing the parallel spike trains to analyze
binsize: Quantity
The time precision used to discretize the data (binning).
winlen: int (positive)
The size (number of bins) of the sliding window used for the analysis.
The maximal length of a pattern (delay between first and last spike) is
then given by winlen*binsize
min_spikes: int (positive)
Minimum number of spikes of a sequence to be considered a pattern.
Default: 2
min_occ: int (positive)
Minimum number of occurrences of a sequence to be considered as a
pattern.
Default: 2
max_spikes: int (positive)
Maximum number of spikes of a sequence to be considered a pattern. If
None no maximal number of spikes is considered.
Default: None
max_occ: int (positive)
Maximum number of occurrences of a sequence to be considered as a
pattern. If None, no maximal number of occurrences is considered.
Default: None
min_neu: int (positive)
Minimum number of neurons in a sequence to considered a pattern.
Default: 1
n_subsets: int
Number of subsets of a concept used to approximate its stability. If
n_subset is set to 0 the stability is not computed. If, however,
for parameters delta and epsilon (see below) delta + epsilon == 0,
then an optimal n_subsets is calculated according to the formula given
in Babin, Kuznetsov (2012), proposition 6:
..math::
n_subset = frac{1}{2\eps^2} \ln(frac{2}{\delta}) +1
Default:0
delta: float
delta: probability with at least ..math:$1-\delta$
Default: 0
epsilon: float
epsilon: absolute error
Default: 0
stability_thresh: None or list of float
List containing the stability thresholds used to filter the concepts.
If stab_thr is None, then the concepts are not filtered. Otherwise,
only concepts with intensional stability > stab_thr[0] or extensional
stability > stab_thr[1] are returned and used for further analysis
within SPADE.
Default: None
n_surr: int
Number of surrogates to generate to compute the p-value spectrum.
This number should be large (n_surr>=1000 is recommended for 100
spike trains in *sts*). If n_surr is 0, then the p-value spectrum is
not computed.
Default: 0
dither: Quantity
Amount of spike time dithering for creating the surrogates for
filtering the pattern spectrum. A spike at time t is placed randomly
within ]t-dither, t+dither[ (see also
elephant.spike_train_surrogates.dither_spikes).
Default: 15*pq.ms
spectrum: str
Define the signature of the patterns, it can assume values:
'#': pattern spectrum using the as signature the pair:
(number of spikes, number of occurrences)
'3d#': pattern spectrum using the as signature the triplets:
(number of spikes, number of occurrence, difference between last
and first spike of the pattern)
Default: '#'
alpha: float
The significance level of the hypothesis tests performed. If alpha=1
all the concepts are returned. If 0<alpha<1 the concepts
are filtered according to their signature in the p-value spectrum.
Default: 1
stat_corr: str
Statistical correction to be applied:
'', 'no' : no statistical correction
'f', 'fdr' : false discovery rate
'b', 'bonf': Bonferroni correction
'hb', 'holm_bonf': Holm-Bonferroni correction
Default: 'fdr'
psr_param: None or list of int
This list contains parameters used in the pattern spectrum filtering:
psr_param[0]: correction parameter for subset filtering
(see h_subset_filtering in pattern_set_reduction()).
psr_param[1]: correction parameter for superset filtering
(see k_superset_filtering in pattern_set_reduction()).
psr_param[2]: correction parameter for covered-spikes criterion
(see l_covered_spikes in pattern_set_reduction()).
output_format: str
distinguish the format of the output (see Returns). Can assume values
'concepts' and 'patterns'.
Returns
-------
The output depends on the value of the parameter output_format.
If output_format is 'concepts':
output: dict
Dictionary containing the following keys:
patterns: tuple
Each element of the tuple corresponds to a pattern and is
itself a tuple consisting of:
(spikes in the pattern, occurrences of the patterns)
For details see function concepts_mining().
If n_subsets>0:
(spikes in the pattern, occurrences of the patterns,
(intensional stability, extensional stability))
corresponding pvalue
The patterns are filtered depending on the parameters in input:
If stability_thresh==None and alpha==1:
output['patterns'] contains all the candidates patterns
(all concepts mined with the fca algorithm)
If stability_thresh!=None and alpha==1:
output contains only patterns candidates with:
intensional stability>stability_thresh[0] or
extensional stability>stability_thresh[1]
If stability_thresh==None and alpha!=1:
output contains only pattern candidates with a signature
significant in respect the significance level alpha corrected
If stability_thresh!=None and alpha!=1:
output['patterns'] contains only pattern candidates with a
signature significant in respect the significance level alpha
corrected and such that:
intensional stability>stability_thresh[0] or
extensional stability>stability_thresh[1]
In addition, output['non_sgnf_sgnt'] contains the list of
non-significant signature for the significance level alpha.
If n_surr>0:
output['pvalue_spectrum'] contains a tuple of signatures and
the corresponding p-value.
If output_format is 'patterns':
output: list
List of dictionaries. Each dictionary corresponds to a patterns and
has the following keys:
neurons: array containing the indices of the neurons of the
pattern.
lags: array containing the lags (integers corresponding to the
number of bins) between the spikes of the patterns. The
first lag is always assumed to be 0 and corresponds to the
first spike ['times'] array containing the times.
(integers corresponding to the bin idx) of the occurrences of the
patterns
signature: tuple containing two integers:
(number of spikes of the patterns,
number of occurrences of the pattern)
pvalue: the p-value corresponding to the pattern. If n_surr==0 the
p-values are set to 0.0.
Notes
-----
If detected, this function will utilize MPI to parallelize the analysis.
Example
-------
The following applies SPADE | |
tick_delta,
tikz_str=""):
x, y = start_cs
return [
vertical_line_segment([x + i * tick_spacing, y], tick_delta, tikz_str)
for i in range(num_ticks)
]
def vertical_ticks(start_cs, num_ticks, tick_spacing, tick_delta, tikz_str=""):
x, y = start_cs
return [
vertical_line_segment([x, y + i * tick_spacing], tick_delta, tikz_str)
for i in range(num_ticks)
]
def arrow(shaft_width,
shaft_height,
head_width,
head_height,
angle,
tikz_str=""):
cs_lst = [
[0.0, shaft_height / 2.0],
[shaft_width, shaft_height / 2.0],
[shaft_width, head_height / 2.0],
[shaft_width + head_width, 0.0],
[shaft_width, -head_height / 2.0],
[shaft_width, -shaft_height / 2.0],
[0.0, -shaft_height / 2.0],
]
# axis_cs = center_coords(closed_path(cs_lst))
axis_cs = [(shaft_width + head_width) / 2.0, 0.0]
cs_lst = [rotate_coords(cs, axis_cs, angle) for cs in cs_lst]
return closed_path(cs_lst, tikz_str)
### helper functions for placing coords
def coords_on_circle(center_cs, radius, angle):
cs = translate_coords_horizontally(center_cs, radius)
return rotate_coords(cs, center_cs, angle)
def antipodal_coords(cs, radius, angle):
return [
coords_on_circle(cs, radius, angle),
coords_on_circle(cs, radius, angle + 180.0)
]
def equispaced_coords_on_circle(center_cs, radius, n):
delta = 360.0 / n
return [coords_on_circle(center_cs, radius, i * delta) for i in range(n)]
def coords_on_ellipse(center_cs, horizontal_radius, vertical_radius, angle):
raise NotImplementedError
def are_coords_inside_rectangle(cs, top_left_cs, bottom_right_cs):
return (top_left_cs[0] <= cs[0] and cs[0] <= bottom_right_cs[0] and
bottom_right_cs[1] <= cs[1] and cs[1] <= top_left_cs[1])
def coords_on_rectangle(top_left_cs, bottom_right_cs, angle):
angle = normalize_angle_to_standard_interval(angle)
center_cs = midway_coords(top_left_cs, bottom_right_cs)
end_cs = coords_on_circle(center_cs, 1.0, angle)
top_right_cs = top_right_coords(top_left_cs, bottom_right_cs)
delta_angle = vector_to_angle([center_cs, top_right_cs])
# return coords depending on the side it falls in.
if (angle >= 0 and angle <= delta_angle) or (angle >= 360.0 - delta_angle):
cs = coords_on_line_with_x_value(center_cs, end_cs, bottom_right_cs[0])
elif angle > delta_angle and angle <= 180.0 - delta_angle:
cs = coords_on_line_with_y_value(center_cs, end_cs, top_left_cs[1])
elif angle > 180.0 - delta_angle and angle <= 180 + delta_angle:
cs = coords_on_line_with_x_value(center_cs, end_cs, top_left_cs[0])
elif angle > 180.0 + delta_angle and angle <= 360.0 - delta_angle:
cs = coords_on_line_with_y_value(center_cs, end_cs, bottom_right_cs[1])
return cs
def coords_on_top_edge(top_left_cs, bottom_right_cs, alpha):
top_right_cs = top_right_coords(top_left_cs, bottom_right_cs)
cs = convex_combination_coords(top_left_cs, top_right_cs, alpha)
return cs
def coords_on_bottom_edge(top_left_cs, bottom_right_cs, alpha):
bottom_left_cs = bottom_left_coords(top_left_cs, bottom_right_cs)
cs = convex_combination_coords(bottom_left_cs, bottom_right_cs, alpha)
return cs
def coords_on_left_edge(top_left_cs, bottom_right_cs, alpha):
bottom_left_cs = bottom_left_coords(top_left_cs, bottom_right_cs)
cs = convex_combination_coords(bottom_left_cs, top_left_cs, alpha)
return cs
def coords_on_right_edge(top_left_cs, bottom_right_cs, alpha):
top_right_cs = top_right_coords(top_left_cs, bottom_right_cs)
cs = convex_combination_coords(bottom_right_cs, top_right_cs, alpha)
return cs
# t in [0, 1]. for symmetric curves, it should be 0.5 for the middle
def coords_on_bezier(from_cs, to_cs, c1_cs, c2_cs, t):
raise NotImplementedError
def coords_on_line_segment(start_cs, end_cs, t):
return [(1.0 - t) * start_cs[0] + t * end_cs[0],
(1.0 - t) * start_cs[1] + t * end_cs[1]]
def coords_on_line_with_x_value(start_cs, end_cs, x):
# y = mx + b
# y1 = mx1 + b
# y2 = mx2 + b
# b = y - mx
# y1 - y2 = m(x1 - x2)
x1, y1 = start_cs
x2, y2 = end_cs
if abs(x1 - x2) < 1.0e-6:
# vertical line.
raise ValueError
else:
m = (y1 - y2) / float(x1 - x2)
b = y1 - m * x1
y = m * x + b
return [x, y]
def coords_on_line_with_y_value(start_cs, end_cs, y):
x1, y1 = start_cs
x2, y2 = end_cs
if abs(y1 - y2) < 1.0e-6:
# horizontal line.
raise ValueError
else:
m = (x1 - x2) / float(y1 - y2)
b = x1 - m * y1
x = m * y + b
return [x, y]
def coords_from_deltas(start_cs, deltas_lst):
cs_lst = [start_cs]
cs = start_cs
for (delta_x, delta_y) in deltas_lst:
cs = translate_coords(cs, delta_x, delta_y)
cs_lst.append(cs)
return cs_lst
def coords_from_horizontal_deltas(start_cs, delta_lst):
cs_lst = [start_cs]
cs = start_cs
for delta in delta_lst:
cs = translate_coords_horizontally(cs, delta)
cs_lst.append(cs)
return cs_lst
def coords_from_vertical_deltas(start_cs, delta_lst):
cs_lst = [start_cs]
cs = start_cs
for delta in delta_lst:
cs = translate_coords_vertically(cs, delta)
cs_lst.append(cs)
return cs_lst
def coords_on_grid(top_left_cs, num_rows, num_columns, cell_width, cell_height):
grid_cs = []
for i in range(num_rows + 1):
row_cs = []
for j in range(num_columns + 1):
cs = translate_coords(top_left_cs, j * cell_width, -i * cell_height)
row_cs.append(cs)
grid_cs.append(row_cs)
return grid_cs
def coords_on_irregular_grid(top_left_cs, column_width_lst, row_height_lst):
cs = top_left_cs
grid_cs = [coords_from_horizontal_deltas(cs, column_width_lst)]
for x in row_height_lst:
cs = translate_coords_vertically(cs, -x)
row_cs = coords_from_horizontal_deltas(cs, column_width_lst)
grid_cs.append(row_cs)
return grid_cs
# for canvas aligned axis; transform the data first with log if log coords necessary.
def axis_value_to_canvas_value(canvas_v, axis_v, other_canvas_v, other_axis_v,
queried_axis_v):
m = (other_canvas_v - canvas_v) / float(other_axis_v - axis_v)
b = canvas_v - m * axis_v
return m * queried_axis_v + b
def canvas_value_to_axis_value(canvas_v, axis_v, other_canvas_v, other_axis_v,
queried_canvas_v):
m = float(other_axis_v - axis_v) / (other_canvas_v - canvas_v)
b = axis_v - m * canvas_v
return m * queried_canvas_v + b
def draw_to_tikz(e):
cmd_lst = []
if isinstance(e, list):
assert len(e) > 0
for e_i in e:
cmd_lst.extend(draw_to_tikz(e_i))
elif e["type"] == 'open_path':
cmd_lst.append(
"\\draw[%s] " % e["tikz_str"] +
" -- ".join(["(%f, %f)" % tuple(cs) for cs in e["cs_lst"]]) + ";")
elif e["type"] == 'closed_path':
# print e
cmd_lst.append(
"\\draw[%s] " % e["tikz_str"] +
" -- ".join(["(%f, %f)" % (cs[0], cs[1]) for cs in e["cs_lst"]]) +
" -- cycle;")
elif e["type"] == "circle":
cmd_lst.append(
"\\draw[%s] (%f, %f) circle (%f);" %
(e["tikz_str"], e["center_cs"][0], e["center_cs"][1], e["radius"]))
elif e["type"] == 'ellipse':
cmd_lst.append("\\draw[%s] (%f, %f) ellipse (%f and %f);" %
(e["tikz_str"], e["center_cs"][0], e["center_cs"][1],
e["horizontal_radius"], e["vertical_radius"]))
elif e["type"] == "bezier":
cmd_lst.append(
"\\draw[%s] (%f, %f) .. controls (%f, %f) and (%f, %f) .. (%f, %f);"
% tuple([e["tikz_str"]] + e["from_cs"] + e["c1_cs"] + e["c2_cs"] +
e["to_cs"]))
elif e["type"] == "circular_arc":
cmd_lst.append("\\draw[%s] (%f,%f) arc (%f:%f:%f);" %
(e["tikz_str"], e["center_cs"][0], e["center_cs"][1],
e["start_angle"], e["end_angle"], e["radius"]))
elif e["type"] == "elliptical_arc":
cmd_lst.append("\\draw[%s] (%f,%f) arc (%f:%f:%f and %f);" %
(e["tikz_str"], e["center_cs"][0], e["center_cs"][1],
e["start_angle"], e["end_angle"],
e["horizontal_radius"], e["vertical_radius"]))
elif e["type"] == "latex":
cmd_lst.append("\\node[%s] at (%f,%f) {%s};" %
(e["tikz_str"], e["cs"][0], e["cs"][1], e["expr"]))
elif e["type"] == "image":
center_cs = translate_coords(e["top_left_cs"], e["width"] / 2.0,
-e["height"] / 2.0)
cmd_lst.append(
"\\node[inner sep=0pt, %s] at (%f,%f) {\\includegraphics[height=%f, width=%f]{%s}};"
% (e["tikz_str"], center_cs[0], center_cs[1], e["height"],
e["width"], e["filepath"]))
else:
raise ValueError("draw not implemented for element: %s" % e["type"])
return cmd_lst
def write_textfile(filepath, lines):
with open(filepath, 'w') as f:
for line in lines:
f.write(line + "\n")
# TODO: have to define colors by hand.
def draw_to_tikz_standalone(e, filepath, name2color_in_rgb=None):
tikz_lines = []
tikz_lines.extend([
'\\documentclass{standalone}',
"\\usepackage[T1]{fontenc}"
'\\usepackage{tikz}',
'\\usepackage{amsmath, amsfonts}',
'\\usetikzlibrary{arrows.meta}',
'\\begin{document}',
'\\begin{tikzpicture}',
])
# define the colors used.
if name2color_in_rgb is not None:
tikz_lines.extend([
'\\definecolor{%s}{RGB}{%d,%d,%d}' % (name, rgb[0], rgb[1], rgb[2])
for (name, rgb) in name2color_in_rgb.items()
])
tikz_lines.extend(draw_to_tikz(e))
tikz_lines.extend([
'\\end{tikzpicture}',
'\\end{document}',
])
write_textfile(filepath, tikz_lines)
#### tikz reference
#===> linewidth
# \tikzset{
# ultra thin/.style= {line width=0.1pt},
# very thin/.style= {line width=0.2pt},
# thin/.style= {line width=0.4pt},% (*** DEFAULT ***; close to 1/64cm)
# semithick/.style= {line width=0.6pt},
# thick/.style= {line width=0.8pt},
# very thick/.style= {line width=1.2pt},
# ultra thick/.style={line width=1.6pt}
# }
#===> linestyle
# \tikzstyle{solid}= [dash pattern=]
# \tikzstyle{dotted}= [dash pattern=on \pgflinewidth off 2pt]
# \tikzstyle{densely dotted}= [dash pattern=on \pgflinewidth off 1pt]
# \tikzstyle{loosely dotted}= [dash pattern=on \pgflinewidth off 4pt]
# \tikzstyle{dashed}= [dash pattern=on 3pt off 3pt]
# \tikzstyle{densely dashed}= [dash pattern=on 3pt off 2pt]
# \tikzstyle{loosely dashed}= [dash pattern=on 3pt off 6pt]
# \tikzstyle{dashdotted}= [dash pattern=on 3pt off 2pt on \the\pgflinewidth off 2pt]
# \tikzstyle{densely dashdotted}= [dash pattern=on 3pt off 1pt on \the\pgflinewidth off 1pt]
# \tikzstyle{loosely dashdotted}= [dash pattern=on 3pt off 4pt on \the\pgflinewidth off 4pt]
#===> fontsize (TODO: add the size in pts)
# \tiny
# \scriptsize
# \footnotesize
# \small
# \normalsize
# \large
# \Large
# \LARGE
# \huge
# \Huge
#===> font type
# https://www.overleaf.com/learn/latex/Font_typefaces
# https://tug.org/FontCatalogue/
# https://fonts.google.com/
# http://mirror.las.iastate.edu/tex-archive/fonts/
# https://tex.stackexchange.com/questions/84533/font-installation-woes-texshop-on-a-mac?rq=1
#===> external links
# https://tex.stackexchange.com/questions/45275/tikz-get-values-for-predefined-dash-patterns
# https://tex.stackexchange.com/questions/255234/how-does-one-pick-control-points-to-control-b%C3%A9zier-curves-in-tikz
# https://tex.stackexchange.com/questions/20885/draw-curly-braces-in-tikz
# https://cremeronline.com/LaTeX/minimaltikz.pdf
# https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003833 ; for tips on scientific figures.
#===> colors
# https://www.kennethmoreland.com/color-advice/
#### TODO
# - continuous compilation with file changes (useful for finding the right dimensions).
# - additional print options for generating tikz code.
# - easier handling of tikz styling options
# - easier handling of sizing options.
# - better defaults for colors.
# - better defaults for sizes. (powers (both positive and negative) of 2)
# - better default aspect ratios.
# - ability to draw auxiliary controls for bezier curves.
# - better interactivity (e.g., multiple drawings with binary search)
# - computation of the convex hull of a set of points.
# - computation of contour sets given a function.
# - basic scaling options
# - few basic default styles.
# - antipodal points for reference.
# - dealing with text appropriately (currently it is somewhat of a 2nd class citizen)
# - dealing with styling options appropriately (these are not made super easy; require styling strings).
# --> maybe manage the creation of these strings.
# - support for simple plotting.
# - good | |
<filename>support.py
"""
All necessary utilities (take and evaluate a picture, estimate position, drive towards position,...)
are implemented in this module
"""
import configparser
import anki_vector
import time
import threading
from anki_vector.util import degrees, distance_mm, speed_mmps, Pose, Angle
from anki_vector import behavior
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry, Region
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
import cv2
from anki_vector.connection import ControlPriorityLevel
import io
import navigation
from navigation import BALLOON_SIZE_MM
import tensorflow as tf
from PIL import Image
from offline_predict import TFObjectDetection
INITIALIZED = False
class img_prediction(object):
"""
A class to perform a neccessary step to evaluate a picture:
Initialize a connection to cloud model
1. Take a picture (in online or offline format)
2. Evaluate the picture online
and filter results
"""
def __init__(self, config_file_path=r'azure_config.txt'):
"""
Instanciate an image_prediction class,
by setting up a predictor that is connected to azure custom vision
Parameters
----------
config_file_path: str
Path to a textfile with the azure credentials
"""
config_parser = configparser.RawConfigParser()
config_parser.read(config_file_path)
self.ENDPOINT = config_parser.get('CustomVision', 'endpoint')
self.prediction_key = config_parser.get(
'CustomVision', 'prediction_key')
self.prediction_resource_id = config_parser.get(
'CustomVision', 'ressource_id')
self.publish_iteration_name = config_parser.get(
'CustomVision', 'publish_iteration_name')
self.project_id = config_parser.get('CustomVision', 'project_id')
self.predictor = CustomVisionPredictionClient(
self.prediction_key, self.ENDPOINT)
def take_picture(self, robot):
"""
Takes a picture with the given robot and process it binary
Parameters
----------
robot: anki_vector.Robot
The robot instance that should take the picture
Returns
-------
BytesIO
Binary IO stream that can be handed to the cloud predictor
"""
image = robot.camera.capture_single_image().raw_image
with io.BytesIO() as output:
image.save(output, 'BMP')
image_to_predict = output.getvalue()
return image_to_predict
def take_picture_offline(self, robot):
"""
Takes a picture with the given robot and process it binary
Parameters
----------
robot: anki_vector.Robot
The robot instance that should take the picture
Returns
-------
PIL
PIL image that can be handed to the tensorflow model (respective the preprocessing)
"""
image = robot.camera.capture_single_image().raw_image
return image
def predict_picture(self, binary_image):
"""
Evaluate a given image. This is done using an Azure Custom Vision predictor.
Looping through the results of the JSON answer.
Only considering results with a probability >0.5.
Saving only the instance of each (balloon, robot) with highest probability.
Parameters
----------
binary_image:
The picture that should be evaluated
Returns
-------
dict
Dictionary with the bounding boxes for ballon / robot
"""
results = self.predictor.detect_image(
'002e7a08-8696-4ca8-8769-fe0cbc2bd9b0', self.publish_iteration_name, binary_image)
probability_b = 0.5
probability_r = 0.5
tag_dict = dict()
for prediction in results.predictions:
print("\t ----ONLINE PREDICTION---" + prediction.tag_name +
": {0:.2f}% bbox.left = {1:.2f}, bbox.top = {2:.2f}, bbox.width = {3:.2f}, bbox.height = {4:.2f}".format(prediction.probability * 100,
prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height))
if prediction.tag_name == 'balloon':
if prediction.probability > probability_b:
probability_b = prediction.probability
tag_dict[prediction.tag_name] = (
prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height)
if prediction.tag_name == 'robot':
if prediction.probability > probability_r:
probability_r = prediction.probability
tag_dict[prediction.tag_name] = (
prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height)
return tag_dict
class offline_img_prediction(object):
"""
A class that is necessary if the offline prediction is used.
Initializes the TensorFlow model by the graph definition.
"""
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile('model9.pb', 'rb') as f:
graph_def.ParseFromString(f.read())
with open('labels.txt', 'r') as f:
labels = [l.strip() for l in f.readlines()]
print('opened labels: ', labels)
od_model = TFObjectDetection(graph_def, labels)
@staticmethod
def offline_predict(image):
"""
Evaluates the given picture using the initialized offline model.
Calling the class that includes pre&postpreoccesing as well
Parameters
----------
image: PIL
The robot instance that should be controlled
Returns
-------
dict
Dictionary with the bounding boxes for ballon / robot
"""
tag_dict = dict()
predictions = offline_img_prediction.od_model.predict_image(image)
print('---OFFLINE RESULTS---\n', predictions)
for prediction in predictions:
tag_dict[prediction['tagName']] = (prediction['boundingBox']['left'], prediction['boundingBox']
['top'], prediction['boundingBox']['width'], prediction['boundingBox']['height'])
return tag_dict
def draw_bounding_boxes(im_path, result_dict):
"""
NOT IN USE
Can be used to draw bounding boxes to a given image
Parameters
----------
im_path: str
Path to the image that should be used in the background
result:dict: dict
Dictionary with bounding boxes as values.
"""
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
height, width, channels = img.shape
for result in result_dict.values():
left = result[0] * width
top = result[1] * height
w = result[2] * width
h = result[3] * height
cv2.rectangle(img, (int(left), int(top)), (int(left + w),int(top + h)), (0, 0, 255), 5)
window = cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 900, 900)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def robot_initiate(robot):
"""
Sets the robot in a starting position
Parameters
----------
robot: anki_vector.Robot
The robot instance that should be used
"""
robot.behavior.set_head_angle(degrees(0.0))
robot.behavior.set_lift_height(1.0)
robot.behavior.drive_off_charger()
def return_from_cliff(robot):
"""
Brings the robot back from a cliff
Parameters
----------
robot: anki_vector.Robot
The robot instance that should be used
"""
robot.behavior.turn_in_place(180)
robot.behavior.drive_straight(100)
def drive_towards_baloon(robot, data, MAX_DRIVING_DISTANCE=600):
"""
Drive the robot straight towards the given position,
directly setting the motor speed (for parralelization).
Capturing the time to stop the motors when the distance is reached
Optional: Uncomment 274-276 to check for cliff while driving --> causes control loss if cliff is detected
Optional: Comment 271-273 if the robot should not try to shutddown the other robot
Parameters
----------
robot: anki_vector.Robot
The robot instance that should be used
data: tuple
Tuple consiting of the degree and distance to the estmated position
MAX_DRIVING_DISTANCE: int
Setting a maximum driving distance, since it is likely that the setup changes when driving for too long.
"""
robot.behavior.turn_in_place(degrees(data[0]))
v_0 = 200
a = 3/2 * (v_0**2 / data[1])
robot.motors.set_wheel_motors(v_0, v_0, 0, 0)
t = time.time()
spoken = False
while (time.time() < t + (v_0/a)): # (data[1]/65)):
print(time.time()-t)
if not spoken:
if data[1] > 400:
spoken = threading.Thread(target=shutdown(robot))
# if (robot.status.is_cliff_detected):
# robot.motors.set_wheel_motors(-10,-10)
# return_from_cliff(robot)
robot.motors.stop_all_motors()
def shutdown(robot):
"""
Playing the soundfile "Hey Vector, shutdown!"
Outsourced to a function to enable threading
Parameters
----------
robot: anki_vector.Robot
The robot instance that should be used
Returns
-------
bool
True if sound was played succesfull.
"""
try:
robot.audio.stream_wav_file("vector_shutdown.wav", 100)
return True
except:
return False
def drive_towards_pose(robot, data, MAX_DRIVING_DISTANCE=600):
"""
Using a simplified path planing, in case the robots are right in front of each other.
In this case the robot moves out of the way a little and attacks the other robot from behind.
For every other relation the robot drives straight towards the estimated position.
Optional: Uncomment 351-369 to enable path planing for all possible relations of robot and ballon
Parameters
----------
robot: anki_vector.Robot
The robot instance that should be used
data: tuple
Tuple consiting of the degree, distance to the estmated position and the relation of the other robot and balloon
MAX_DRIVING_DISTANCE: int
Setting a maximum driving distance, since it is likely that the setup changes when driving for too long.
"""
direct = data[0]
dist = min(data[1], MAX_DRIVING_DISTANCE)
relation = data[2]
print(relation)
if relation == 'front':
robot.behavior.turn_in_place(degrees(data[0]))
robot.behavior.turn_in_place(degrees(45))
robot.behavior.drive_straight(distance_mm(dist/2+100), speed_mmps(250))
robot.behavior.turn_in_place(degrees(-90))
robot.behavior.drive_straight(distance_mm(dist/2+100), speed_mmps(250))
robot.behavior.turn_in_place(degrees(-135))
else:
drive_towards_baloon(robot, (direct, dist), MAX_DRIVING_DISTANCE)
# if relation == 'back':
# pose = Pose(x = dist, y = 0, z = 0, angle_z = Angle(degrees = 0))
# robot.behavior.go_to_pose(pose, relative_to_robot=True)
# robot.behavior.set_lift_height(1.0)
# else:
# if relation == 'front':
# pose1 = Pose(x = dist/2, y = max(dist/4,50), z = 0, angle_z=anki_vector.util.Angle(degrees=0))
# pose2 = Pose(x = dist/2, y = -max(dist/4,50), z = 0, angle_z = Angle(degrees = 30))
# elif relation == 'to the left':
# pose1 = Pose(x = dist/2, y = -max(dist/4,50), z = 0, angle_z=anki_vector.util.Angle(degrees=0))
# pose2 = Pose(x = dist/2, y = max(dist/4,50), z = 0, angle_z = Angle(degrees = 80))
# elif relation == 'to the right':
# pose1 = Pose(x = dist/2, y = max(dist/4,50), z = 0, angle_z=anki_vector.util.Angle(degrees=0))
# pose2 = Pose(x = dist/2, y = -max(dist/4,50), z = 0, angle_z = Angle(degrees = 260))
# robot.behavior.go_to_pose(pose1, relative_to_robot=True)
# robot.behavior.go_to_pose(pose2, relative_to_robot=True)
# robot.behavior.set_lift_height(1.0)
def evaluate_picture(robot, img_prediction, balloon_size=BALLOON_SIZE_MM):
"""
Fundamental function that does the entire picture evaluation process
this includes:
1. Taking a picture
2. Getting information about the content
3. Dynamically setting the balloon size
4. Calculating the turn degree and distance
5. Evaluating the relation of robot and balloon
Optional: Uncomment for online image prediction
Optional: Uncomment 438 to prevent constant recalculation of the ballon size
Parameters
----------
robot: anki_vector.Robot
The robot | |
"""
Mininet: A simple networking testbed for OpenFlow/SDN!
author: <NAME> (<EMAIL>)
author: <NAME> (<EMAIL>)
Mininet creates scalable OpenFlow test networks by using
process-based virtualization and network namespaces.
Simulated hosts are created as processes in separate network
namespaces. This allows a complete OpenFlow network to be simulated on
top of a single Linux kernel.
Each host has:
A virtual console (pipes to a shell)
A virtual interfaces (half of a veth pair)
A parent shell (and possibly some child processes) in a namespace
Hosts have a network interface which is configured via ifconfig/ip
link/etc.
This version supports both the kernel and user space datapaths
from the OpenFlow reference implementation (openflowswitch.org)
as well as OpenVSwitch (openvswitch.org.)
In kernel datapath mode, the controller and switches are simply
processes in the root namespace.
Kernel OpenFlow datapaths are instantiated using dpctl(8), and are
attached to the one side of a veth pair; the other side resides in the
host namespace. In this mode, switch processes can simply connect to the
controller via the loopback interface.
In user datapath mode, the controller and switches can be full-service
nodes that live in their own network namespaces and have management
interfaces and IP addresses on a control network (e.g. 192.168.123.1,
currently routed although it could be bridged.)
In addition to a management interface, user mode switches also have
several switch interfaces, halves of veth pairs whose other halves
reside in the host nodes that the switches are connected to.
Consistent, straightforward naming is important in order to easily
identify hosts, switches and controllers, both from the CLI and
from program code. Interfaces are named to make it easy to identify
which interfaces belong to which node.
The basic naming scheme is as follows:
Host nodes are named h1-hN
Switch nodes are named s1-sN
Controller nodes are named c0-cN
Interfaces are named {nodename}-eth0 .. {nodename}-ethN
Note: If the network topology is created using mininet.topo, then
node numbers are unique among hosts and switches (e.g. we have
h1..hN and SN..SN+M) and also correspond to their default IP addresses
of 10.x.y.z/8 where x.y.z is the base-256 representation of N for
hN. This mapping allows easy determination of a node's IP
address from its name, e.g. h1 -> 10.0.0.1, h257 -> 10.0.1.1.
Note also that 10.0.0.1 can often be written as 10.1 for short, e.g.
"ping 10.1" is equivalent to "ping 10.0.0.1".
Currently we wrap the entire network in a 'mininet' object, which
constructs a simulated network based on a network topology created
using a topology object (e.g. LinearTopo) from mininet.topo or
mininet.topolib, and a Controller which the switches will connect
to. Several configuration options are provided for functions such as
automatically setting MAC addresses, populating the ARP table, or
even running a set of terminals to allow direct interaction with nodes.
After the network is created, it can be started using start(), and a
variety of useful tasks maybe performed, including basic connectivity
and bandwidth tests and running the mininet CLI.
Once the network is up and running, test code can easily get access
to host and switch objects which can then be used for arbitrary
experiments, typically involving running a series of commands on the
hosts.
After all desired tests or activities have been completed, the stop()
method may be called to shut down the network.
"""
import os
import re
import select
import signal
import random
from time import sleep
from itertools import chain, groupby
from math import ceil
from mininet.cli import CLI
from mininet.log import info, error, debug, output, warn
from mininet.node import ( Node, Host, OVSKernelSwitch, DefaultController,
Controller )
from mininet.nodelib import NAT
#from mininet.link import Link, Intf
from mininet.util import ( quietRun, fixLimits, numCores, ensureRoot,
macColonHex, ipStr, ipParse, netParse, ipAdd,
waitListening, BaseString, encode )
from mininet.term import cleanUpScreens, makeTerms
from mininet.link import (Intf, TCIntf)
# DSA ########################
from mininet.dutil import _info
from mininet.cloudlink import (CloudLink)
from mininet.lxc_container import (LxcNode)
from mininet.cloudswitch import (LxcSwitch)
from mininet.cloudcontroller import (LxcRemoteController)
import asyncio
import time
from threading import Thread
from mininet.assh import ASsh
##############################
# Mininet version: should be consistent with README and LICENSE
from mininet.net import VERSION as MININET_VERSION
# Distrinet version
VERSION = "2.0 (Mininet {})".format(MININET_VERSION)
from mininet.net import Mininet
class Distrinet( Mininet ):
"Network emulation with hosts spawned in network namespaces."
def __init__( self, topo=None, switch=LxcSwitch, host=LxcNode,
controller=LxcRemoteController, link=CloudLink, intf=TCIntf,
mapper=None,
build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8',
adminIpBase='192.168.0.1/8',
autoSetMacs=False, autoPinCpus=False,
listenPort=None, waitConnected=False, waitConnectionTimeout=5,
jump=None, user="root", client_keys=None, master=None, pub_id=None,
**kwargs):
"""Create Mininet object.
topo: Topo (topology) object or None
switch: default Switch class
host: default Host class/constructor
controller: default Controller class/constructor
link: default Link class/constructor
intf: default Intf class/constructor
ipBase: base IP address for hosts,
mapper: mapper to map virtual topology onto physical topology
build: build now from topo?
xterms: if build now, spawn xterms?
cleanup: if build now, cleanup before creating?
inNamespace: spawn switches and controller in net namespaces?
autoSetMacs: set MAC addrs automatically like IP addresses?
autoStaticArp: set all-pairs static MAC addrs?
autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)?
listenPort: base listening port to open; will be incremented for
each additional switch in the net if inNamespace=False
waitConnected: wait for the switches to be connected to their controller
waitConnectionTimeout: timeout to wait to decide if a switch is connected to its controller
jump: SSH jump host
master: master node"""
self.topo = topo
self.switch = switch
self.host = host
self.controller = controller
self.link = link
self.intf = intf
self.ipBase = ipBase
self.ipBaseNum, self.prefixLen = netParse( self.ipBase )
hostIP = ( 0xffffffff >> self.prefixLen ) & self.ipBaseNum
# Start for address allocation
self.nextIP = hostIP if hostIP > 0 else 1
self.adminIpBase = adminIpBase
self.adminIpBaseNum, self.adminPrefixLen = netParse( self.adminIpBase )
adminIP = ( 0xffffffff >> self.adminPrefixLen ) & self.adminIpBaseNum
# Start for address allocation
self.adminNextIP = adminIP if adminIP > 0 else 1
# self.inNamespace = inNamespace
self.xterms = xterms
self.cleanup = cleanup
self.autoSetMacs = autoSetMacs
# self.autoStaticArp = autoStaticArp
self.autoPinCpus = autoPinCpus
# self.numCores = numCores()
# self.nextCore = 0 # next core for pinning hosts to CPUs
self.listenPort = listenPort
self.waitConn = waitConnected
self.waitConnectionTimeout = waitConnectionTimeout
self.mapper = mapper
#
self.hosts = []
self.switches = []
self.controllers = []
self.links = []
self.loop = asyncio.get_event_loop()
def runforever(loop):
time.sleep(0.001) ### DSA - WTF ?????????????
loop.run_forever()
self.thread = Thread(target=runforever, args=(self.loop,))
self.thread.start()
self.jump = jump
self.user = user
self.pub_id = pub_id
self.client_keys = client_keys
self.masterhost = master
_info ("Connecting to master node\n")
self.masterSsh = ASsh(loop=self.loop, host=self.masterhost, username=self.user, bastion=self.jump, client_keys=self.client_keys)
self.masterSsh.connect()
self.masterSsh.waitConnected()
_info ("connected to master node\n")
self.nameToNode = {} # name to Node (Host/Switch) objects
self.terms = [] # list of spawned xterm processes
self.init() # Initialize Mininet if necessary
self.built = False
if topo and build:
self.build()
# DSA - OK
def addHost( self, name, cls=None, **params ):
"""Add host.
name: name of host to add
cls: custom host class/constructor (optional)
params: parameters for host
returns: added host"""
# Default IP and MAC addresses
defaults = { 'ip': ipAdd( self.nextIP,
ipBaseNum=self.ipBaseNum,
prefixLen=self.prefixLen ) +
'/%s' % self.prefixLen}
if "image" in self.topo.nodeInfo(name):
defaults.update({"image":self.topo.nodeInfo(name)["image"]})
# XXX DSA - doesn't make sense to generate MAC automatically here, we
# keep for compatibility prurpose but never use it...
if self.autoSetMacs:
defaults[ 'mac' ] = macColonHex( self.nextIP )
if self.autoPinCpus:
raise Exception("to be implemented")
# defaults[ 'cores' ] = self.nextCore
# self.nextCore = ( self.nextCore + 1 ) % self.numCores
self.nextIP += 1
defaults.update( params )
if not cls:
cls = self.host
if self.mapper:
defaults.update({"target":self.mapper.place(name)})
h = cls(name=name, **defaults )
self.hosts.append( h )
self.nameToNode[ name ] = h
return h
# DSA - OK
def addSwitch( self, name, cls=None, **params ):
"""Add switch.
name: name of switch to add
cls: custom switch class/constructor (optional)
returns: added switch
side effect: increments listenPort ivar ."""
defaults = { 'listenPort': self.listenPort}
if "image" in self.topo.nodeInfo(name):
defaults.update({"image":self.topo.nodeInfo(name)})
else:
error ("we are missing an image for {} \n".format(name))
exit()
defaults.update( params )
if not cls:
cls = self.switch
if self.mapper:
defaults.update({"target":self.mapper.place(name)})
sw = cls(name=name, **defaults )
self.switches.append( sw )
self.nameToNode[ name ] = sw
return sw
def delSwitch( self, switch ):
"Delete a switch"
self.delNode( switch, nodes=self.switches )
# DSA - OK
def addController( self, name='c0', controller=None, **params ):
"""Add controller.
controller: Controller class
params: Parameters for the controller"""
# Get controller class
params.update({'pub_id':self.pub_id})
if not controller:
controller = self.controller
controller_new = controller(name=name,
loop=self.loop,
master=self.masterSsh,
username=self.user,
bastion=self.jump,
client_keys=self.client_keys,
**params)
self.controllers.append(controller_new)
self.nameToNode[ name ] = | |
list_sk_pvalue
del H
Comp_Nuba_350_df = pd.DataFrame(Composites_Nuba_350, index = c)
Sk_Nuba_stat_350_df = pd.DataFrame(Sk_Nuba_stat_350, index = c)
Sk_Nuba_pvalue_350_df = pd.DataFrame(Sk_Nuba_pvalue_350, index = c)
Sk_Nuba_stat_348 = {}
Sk_Nuba_pvalue_348 = {}
Composites_Nuba_348 = {}
for i in df_FH_nuba_348_groupH.index:
H = str(i)
if len(df_FH_nuba_348_groupH.loc[i]) == 1 :
list = df_P348_h[df_P348_h.index.date == df_FH_nuba_348_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_nuba_348_groupH.loc[i]) > 1 :
temporal = pd.DataFrame()
for j in range(len(df_FH_nuba_348_groupH.loc[i])):
temporal = temporal.append(pd.DataFrame(df_P348_h[df_P348_h.index.date == df_FH_nuba_348_groupH.loc[i][j]]['radiacion']))
stat_348 = []
pvalue_348 = []
for k in c:
temporal_sk = temporal[temporal.index.hour == k].radiacion.values
Rad_sk = df_P348_h['radiacion'][df_P348_h.index.hour == k].values
try:
SK = ks_2samp(temporal_sk,Rad_sk)
stat_348.append(SK[0])
pvalue_348.append(SK[1])
except ValueError:
stat_348.append(np.nan)
pvalue_348.append(np.nan)
temporal_CD = temporal.groupby(by=[temporal.index.hour]).mean()
list = temporal_CD['radiacion'].values
list_sk_stat = stat_348
list_sk_pvalue = pvalue_348
Composites_Nuba_348[H] = list
Sk_Nuba_stat_348 [H] = list_sk_stat
Sk_Nuba_pvalue_348 [H] = list_sk_pvalue
del H
Comp_Nuba_348_df = pd.DataFrame(Composites_Nuba_348, index = c)
Sk_Nuba_stat_348_df = pd.DataFrame(Sk_Nuba_stat_348, index = c)
Sk_Nuba_pvalue_348_df = pd.DataFrame(Sk_Nuba_pvalue_348, index = c)
##----------ENCONTRANDO LAS RADIACIONES CORRESPONDIENTES A LAS HORAS DESPEJADAS----------##
df_FH_desp_348 = pd.DataFrame()
df_FH_desp_348 ['Fechas'] = Fecha_desp_348
df_FH_desp_348 ['Horas'] = Hora_desp_348
df_FH_desp_350 = pd.DataFrame()
df_FH_desp_350 ['Fechas'] = Fecha_desp_350
df_FH_desp_350 ['Horas'] = Hora_desp_350
df_FH_desp_975 = pd.DataFrame()
df_FH_desp_975 ['Fechas'] = Fecha_desp_975
df_FH_desp_975 ['Horas'] = Hora_desp_975
df_FH_desp_348_groupH = df_FH_desp_348.groupby('Horas')['Fechas'].unique()
df_desp_348_groupH = pd.DataFrame(df_FH_desp_348_groupH[df_FH_desp_348_groupH.apply(lambda x: len(x)>1)]) ##NO entiendo bien acá que se está haciendo
df_FH_desp_350_groupH = df_FH_desp_350.groupby('Horas')['Fechas'].unique()
df_desp_350_groupH = pd.DataFrame(df_FH_desp_350_groupH[df_FH_desp_350_groupH.apply(lambda x: len(x)>1)])
df_FH_desp_975_groupH = df_FH_desp_975.groupby('Horas')['Fechas'].unique()
df_desp_975_groupH = pd.DataFrame(df_FH_desp_975_groupH[df_FH_desp_975_groupH.apply(lambda x: len(x)>1)])
Sk_Desp_stat_975 = {}
Sk_Desp_pvalue_975 = {}
Composites_Desp_975 = {}
for i in df_FH_desp_975_groupH.index:
H = str(i)
if len(df_FH_desp_975_groupH.loc[i]) == 1 :
list = df_P975_h[df_P975_h.index.date == df_FH_desp_975_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_desp_975_groupH.loc[i]) > 1 :
temporal = pd.DataFrame()
for j in range(len(df_FH_desp_975_groupH.loc[i])):
temporal = temporal.append(pd.DataFrame(df_P975_h[df_P975_h.index.date == df_FH_desp_975_groupH.loc[i][j]]['radiacion']))
stat_975 = []
pvalue_975 = []
for k in c:
temporal_sk = temporal[temporal.index.hour == k].radiacion.values
Rad_sk = df_P975_h['radiacion'][df_P975_h.index.hour == k].values
try:
SK = ks_2samp(temporal_sk,Rad_sk)
stat_975.append(SK[0])
pvalue_975.append(SK[1])
except ValueError:
stat_975.append(np.nan)
pvalue_975.append(np.nan)
temporal_CD = temporal.groupby(by=[temporal.index.hour]).mean()
list = temporal_CD['radiacion'].values
list_sk_stat = stat_975
list_sk_pvalue = pvalue_975
Composites_Desp_975[H] = list
Sk_Desp_stat_975 [H] = list_sk_stat
Sk_Desp_pvalue_975 [H] = list_sk_pvalue
del H
Comp_Desp_975_df = pd.DataFrame(Composites_Desp_975, index = c)
Sk_Desp_stat_975_df = pd.DataFrame(Sk_Desp_stat_975, index = c)
Sk_Desp_pvalue_975_df = pd.DataFrame(Sk_Desp_pvalue_975, index = c)
Sk_Desp_stat_350 = {}
Sk_Desp_pvalue_350 = {}
Composites_Desp_350 = {}
for i in df_FH_desp_350_groupH.index:
H = str(i)
if len(df_FH_desp_350_groupH.loc[i]) == 1 :
list = df_P350_h[df_P350_h.index.date == df_FH_desp_350_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_desp_350_groupH.loc[i]) > 1 :
temporal = pd.DataFrame()
for j in range(len(df_FH_desp_350_groupH.loc[i])):
temporal = temporal.append(pd.DataFrame(df_P350_h[df_P350_h.index.date == df_FH_desp_350_groupH.loc[i][j]]['radiacion']))
stat_350 = []
pvalue_350 = []
for k in c:
temporal_sk = temporal[temporal.index.hour == k].radiacion.values
Rad_sk = df_P350_h['radiacion'][df_P350_h.index.hour == k].values
try:
SK = ks_2samp(temporal_sk,Rad_sk)
stat_350.append(SK[0])
pvalue_350.append(SK[1])
except ValueError:
stat_350.append(np.nan)
pvalue_350.append(np.nan)
temporal_CD = temporal.groupby(by=[temporal.index.hour]).mean()
list = temporal_CD['radiacion'].values
list_sk_stat = stat_350
list_sk_pvalue = pvalue_350
Composites_Desp_350[H] = list
Sk_Desp_stat_350 [H] = list_sk_stat
Sk_Desp_pvalue_350 [H] = list_sk_pvalue
del H
Comp_Desp_350_df = pd.DataFrame(Composites_Desp_350, index = c)
Sk_Desp_stat_350_df = pd.DataFrame(Sk_Desp_stat_350, index = c)
Sk_Desp_pvalue_350_df = pd.DataFrame(Sk_Desp_pvalue_350, index = c)
Sk_Desp_stat_348 = {}
Sk_Desp_pvalue_348 = {}
Composites_Desp_348 = {}
for i in df_FH_desp_348_groupH.index:
H = str(i)
if len(df_FH_desp_348_groupH.loc[i]) == 1 :
list = df_P348_h[df_P348_h.index.date == df_FH_desp_348_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_desp_348_groupH.loc[i]) > 1 :
temporal = pd.DataFrame()
for j in range(len(df_FH_desp_348_groupH.loc[i])):
temporal = temporal.append(pd.DataFrame(df_P348_h[df_P348_h.index.date == df_FH_desp_348_groupH.loc[i][j]]['radiacion']))
stat_348 = []
pvalue_348 = []
for k in c:
temporal_sk = temporal[temporal.index.hour == k].radiacion.values
Rad_sk = df_P348_h['radiacion'][df_P348_h.index.hour == k].values
try:
SK = ks_2samp(temporal_sk,Rad_sk)
stat_348.append(SK[0])
pvalue_348.append(SK[1])
except ValueError:
stat_348.append(np.nan)
pvalue_348.append(np.nan)
temporal_CD = temporal.groupby(by=[temporal.index.hour]).mean()
list = temporal_CD['radiacion'].values
list_sk_stat = stat_348
list_sk_pvalue = pvalue_348
Composites_Desp_348[H] = list
Sk_Desp_stat_348 [H] = list_sk_stat
Sk_Desp_pvalue_348 [H] = list_sk_pvalue
del H
Comp_Desp_348_df = pd.DataFrame(Composites_Desp_348, index = c)
Sk_Desp_stat_348_df = pd.DataFrame(Sk_Desp_stat_348, index = c)
Sk_Desp_pvalue_348_df = pd.DataFrame(Sk_Desp_pvalue_348, index = c)
##-------------------ESTANDARIZANDO LAS FORMAS DE LOS DATAFRAMES A LAS HORAS CASO DESPEJADO----------------##
Comp_Desp_348_df = Comp_Desp_348_df[(Comp_Desp_348_df.index >= 6)&(Comp_Desp_348_df.index <18)]
Comp_Desp_350_df = Comp_Desp_350_df[(Comp_Desp_350_df.index >= 6)&(Comp_Desp_350_df.index <18)]
Comp_Desp_975_df = Comp_Desp_975_df[(Comp_Desp_975_df.index >= 6)&(Comp_Desp_975_df.index <18)]
s = [str(i) for i in Comp_Nuba_348_df.index.values]
ListNan = np.empty((1,len(Comp_Desp_348_df)))
ListNan [:] = np.nan
def convert(set):
return [*set, ]
a_Desp_348 = convert(set(s).difference(Comp_Desp_348_df.columns.values))
a_Desp_348.sort(key=int)
if len(a_Desp_348) > 0:
idx = [i for i,x in enumerate(s) if x in a_Desp_348]
for i in range(len(a_Desp_348)):
Comp_Desp_348_df.insert(loc = idx[i], column = a_Desp_348[i], value=ListNan[0])
del idx
a_Desp_350 = convert(set(s).difference(Comp_Desp_350_df.columns.values))
a_Desp_350.sort(key=int)
if len(a_Desp_350) > 0:
idx = [i for i,x in enumerate(s) if x in a_Desp_350]
for i in range(len(a_Desp_350)):
Comp_Desp_350_df.insert(loc = idx[i], column = a_Desp_350[i], value=ListNan[0])
del idx
a_Desp_975 = convert(set(s).difference(Comp_Desp_975_df.columns.values))
a_Desp_975.sort(key=int)
if len(a_Desp_975) > 0:
idx = [i for i,x in enumerate(s) if x in a_Desp_975]
for i in range(len(a_Desp_975)):
Comp_Desp_975_df.insert(loc = idx[i], column = a_Desp_975[i], value=ListNan[0])
del idx
s = [str(i) for i in Comp_Desp_348_df.index.values]
Comp_Desp_348_df = Comp_Desp_348_df[s]
Comp_Desp_350_df = Comp_Desp_350_df[s]
Comp_Desp_975_df = Comp_Desp_975_df[s]
##-------------------ESTANDARIZANDO LAS FORMAS DE LOS DATAFRAMES A LAS HORAS CASO NUBADO----------------##
Comp_Nuba_348_df = Comp_Nuba_348_df[(Comp_Nuba_348_df.index >= 6)&(Comp_Nuba_348_df.index <18)]
Comp_Nuba_350_df = Comp_Nuba_350_df[(Comp_Nuba_350_df.index >= 6)&(Comp_Nuba_350_df.index <18)]
Comp_Nuba_975_df = Comp_Nuba_975_df[(Comp_Nuba_975_df.index >= 6)&(Comp_Nuba_975_df.index <18)]
s = [str(i) for i in Comp_Nuba_348_df.index.values]
ListNan = np.empty((1,len(Comp_Nuba_348_df)))
ListNan [:] = np.nan
def convert(set):
return [*set, ]
a_Nuba_348 = convert(set(s).difference(Comp_Nuba_348_df.columns.values))
a_Nuba_348.sort(key=int)
if len(a_Nuba_348) > 0:
idx = [i for i,x in enumerate(s) if x in a_Nuba_348]
for i in range(len(a_Nuba_348)):
Comp_Nuba_348_df.insert(loc = idx[i], column = a_Nuba_348[i], value=ListNan[0])
del idx
a_Nuba_350 = convert(set(s).difference(Comp_Nuba_350_df.columns.values))
a_Nuba_350.sort(key=int)
if len(a_Nuba_350) > 0:
idx = [i for i,x in enumerate(s) if x in a_Nuba_350]
for i in range(len(a_Nuba_350)):
Comp_Nuba_350_df.insert(loc = idx[i], column = a_Nuba_350[i], value=ListNan[0])
del idx
a_Nuba_975 = convert(set(s).difference(Comp_Nuba_975_df.columns.values))
a_Nuba_975.sort(key=int)
if len(a_Nuba_975) > 0:
idx = [i for i,x in enumerate(s) if x in a_Nuba_975]
for i in range(len(a_Nuba_975)):
Comp_Nuba_975_df.insert(loc = idx[i], column = a_Nuba_975[i], value=ListNan[0])
del idx
Comp_Nuba_348_df = Comp_Nuba_348_df[s]
Comp_Nuba_350_df = Comp_Nuba_350_df[s]
Comp_Nuba_975_df = Comp_Nuba_975_df[s]
##-------------------CONTEO DE LA CANTIDAD DE DÍAS CONSIDERADOS NUBADOS Y DESPEJADOS----------------##
Cant_Days_Nuba_348 = []
for i in range(len(s)):
try:
Cant_Days_Nuba_348.append(len(df_FH_nuba_348_groupH[df_FH_nuba_348_groupH .index == int(s[i])].values[0]))
except IndexError:
Cant_Days_Nuba_348.append(0)
Cant_Days_Nuba_350 = []
for i in range(len(s)):
try:
Cant_Days_Nuba_350.append(len(df_FH_nuba_350_groupH[df_FH_nuba_350_groupH .index == int(s[i])].values[0]))
except IndexError:
Cant_Days_Nuba_350.append(0)
Cant_Days_Nuba_975 = []
for i in range(len(s)):
try:
Cant_Days_Nuba_975.append(len(df_FH_nuba_975_groupH[df_FH_nuba_975_groupH .index == int(s[i])].values[0]))
except IndexError:
Cant_Days_Nuba_975.append(0)
Cant_Days_Desp_348 = []
for i in range(len(s)):
try:
Cant_Days_Desp_348.append(len(df_FH_desp_348_groupH[df_FH_desp_348_groupH .index == int(s[i])].values[0]))
except IndexError:
Cant_Days_Desp_348.append(0)
Cant_Days_Desp_350 = []
for i in range(len(s)):
try:
Cant_Days_Desp_350.append(len(df_FH_desp_350_groupH[df_FH_desp_350_groupH .index == int(s[i])].values[0]))
except IndexError:
Cant_Days_Desp_350.append(0)
Cant_Days_Desp_975 = []
for i in range(len(s)):
try:
Cant_Days_Desp_975.append(len(df_FH_desp_975_groupH[df_FH_desp_975_groupH .index == int(s[i])].values[0]))
except IndexError:
Cant_Days_Desp_975.append(0)
##-------------------AJUSTADO LOS DATAFRAMES DE LOS ESTADÍSTICOS Y DEL VALOR P----------------##
for i in range(len(c)):
if str(c[i]) not in Sk_Desp_pvalue_975_df.columns:
Sk_Desp_pvalue_975_df.insert(int(c[i]-6), str(c[i]), np.ones(12)*np.nan)
if str(c[i]) not in Sk_Desp_pvalue_350_df.columns:
Sk_Desp_pvalue_350_df.insert(int(c[i]-6), str(c[i]), np.ones(12)*np.nan)
if str(c[i]) not in Sk_Desp_pvalue_348_df.columns:
Sk_Desp_pvalue_348_df.insert(int(c[i]-6), str(c[i]), np.ones(12)*np.nan)
if str(c[i]) not in Sk_Nuba_pvalue_350_df.columns:
Sk_Nuba_pvalue_350_df.insert(int(c[i]-6), str(c[i]), np.ones(12)*np.nan)
if str(c[i]) not in Sk_Nuba_pvalue_348_df.columns:
Sk_Nuba_pvalue_348_df.insert(int(c[i]-6), str(c[i]), np.ones(12)*np.nan)
if str(c[i]) not in Sk_Nuba_pvalue_975_df.columns:
Sk_Nuba_pvalue_975_df.insert(int(c[i]-6), str(c[i]), np.ones(12)*np.nan)
Significancia = 0.05
for i in c:
Sk_Desp_pvalue_348_df.loc[Sk_Desp_pvalue_348_df[str(i)]< Significancia, str(i)] = 100
Sk_Desp_pvalue_350_df.loc[Sk_Desp_pvalue_350_df[str(i)]< Significancia, str(i)] = 100
Sk_Desp_pvalue_975_df.loc[Sk_Desp_pvalue_975_df[str(i)]< Significancia, str(i)] = 100
Sk_Nuba_pvalue_348_df.loc[Sk_Nuba_pvalue_348_df[str(i)]< Significancia, str(i)] = 100
Sk_Nuba_pvalue_350_df.loc[Sk_Nuba_pvalue_350_df[str(i)]< Significancia, str(i)] = 100
Sk_Nuba_pvalue_975_df.loc[Sk_Nuba_pvalue_975_df[str(i)]< Significancia, str(i)] = 100
row_Desp_348 = []
col_Desp_348 = []
for row in range(Sk_Desp_pvalue_348_df.shape[0]):
for col in range(Sk_Desp_pvalue_348_df.shape[1]):
if Sk_Desp_pvalue_348_df.get_value((row+6),str(col+6)) == 100:
row_Desp_348.append(row)
col_Desp_348.append(col)
#print(row+6, col+6)
row_Desp_350 = []
col_Desp_350 = []
for row in range(Sk_Desp_pvalue_350_df.shape[0]):
for col in range(Sk_Desp_pvalue_350_df.shape[1]):
if Sk_Desp_pvalue_350_df.get_value((row+6),str(col+6)) == 100:
row_Desp_350.append(row)
col_Desp_350.append(col)
row_Desp_975 = []
col_Desp_975 = []
for row in range(Sk_Desp_pvalue_975_df.shape[0]):
for col in range(Sk_Desp_pvalue_975_df.shape[1]):
if Sk_Desp_pvalue_975_df.get_value((row+6),str(col+6)) == 100:
row_Desp_975.append(row)
col_Desp_975.append(col)
row_Nuba_348 = []
col_Nuba_348 = []
for row in range(Sk_Nuba_pvalue_348_df.shape[0]):
for col in range(Sk_Nuba_pvalue_348_df.shape[1]):
if Sk_Nuba_pvalue_348_df.get_value((row+6),str(col+6)) == 100:
row_Nuba_348.append(row)
col_Nuba_348.append(col)
#print(row+6, col+6)
row_Nuba_350 = []
col_Nuba_350 = []
for row in range(Sk_Nuba_pvalue_350_df.shape[0]):
for col in range(Sk_Nuba_pvalue_350_df.shape[1]):
if Sk_Nuba_pvalue_350_df.get_value((row+6),str(col+6)) == 100:
row_Nuba_350.append(row)
col_Nuba_350.append(col)
row_Nuba_975 = []
col_Nuba_975 = []
for row in range(Sk_Nuba_pvalue_975_df.shape[0]):
for col in range(Sk_Nuba_pvalue_975_df.shape[1]):
if Sk_Nuba_pvalue_975_df.get_value((row+6),str(col+6)) == 100:
row_Nuba_975.append(row)
col_Nuba_975.append(col)
##-------------------GRÁFICO DEL COMPOSITE NUBADO DE LA RADIACIÓN PARA CADA PUNTO Y LA CANT DE DÍAS----------------##
s_f = [int(s[i]) for i in range(len(s))]
plt.close("all")
fig = plt.figure(figsize=(10., 8.),facecolor='w',edgecolor='w')
ax1=fig.add_subplot(2,3,1)
mapa = ax1.imshow(Comp_Nuba_348_df, interpolation = 'none', cmap = 'Spectral_r')
ax1.set_yticks(range(0,12), minor=False)
ax1.set_yticklabels(s, minor=False)
ax1.set_xticks(range(0,12), minor=False)
ax1.set_xticklabels(s, minor=False, rotation = 20)
ax1.set_xlabel('Hora del caso', fontsize=10, fontproperties = prop_1)
ax1.set_ylabel('Hora en el CD de radiación', fontsize=10, fontproperties = prop_1)
ax1.scatter(range(0,12),range(0,12), marker='x', facecolor = 'k', edgecolor = 'k', linewidth='1.', s=30)
ax1.set_title(' x = Horas nubadas en JV', loc = 'center', fontsize=9)
ax2=fig.add_subplot(2,3,2)
mapa = ax2.imshow(Comp_Nuba_350_df, interpolation = 'none', cmap = 'Spectral_r')
ax2.set_yticks(range(0,12), minor=False)
ax2.set_yticklabels(s, minor=False)
ax2.set_xticks(range(0,12), minor=False)
ax2.set_xticklabels(s, minor=False, rotation = 20)
ax2.set_xlabel('Hora del caso', fontsize=10, fontproperties = prop_1)
ax2.set_ylabel('Hora en el CD de radiación', fontsize=10, fontproperties = prop_1)
ax2.scatter(range(0,12),range(0,12), marker='x', facecolor = 'k', edgecolor = 'k', linewidth='1.', s=30)
ax2.set_title(' x = Horas nubadas en CI', loc = 'center', fontsize=9)
ax3 = fig.add_subplot(2,3,3)
mapa = ax3.imshow(Comp_Nuba_975_df, interpolation = 'none', cmap = 'Spectral_r')
ax3.set_yticks(range(0,12), minor=False)
ax3.set_yticklabels(s, minor=False)
ax3.set_xticks(range(0,12), minor=False)
ax3.set_xticklabels(s, minor=False, rotation = 20)
ax3.set_xlabel('Hora del caso', fontsize=10, fontproperties = prop_1)
ax3.set_ylabel('Hora en el CD de radiación', fontsize=10, fontproperties = prop_1)
ax3.scatter(range(0,12),range(0,12), marker='x', facecolor = 'k', edgecolor = 'k', linewidth='1.', s=30)
ax3.set_title(' x = Horas nubadas en TS', loc = 'center', fontsize=9)
cbar_ax = fig.add_axes([0.11, 0.93, 0.78, 0.008])
cbar = fig.colorbar(mapa, cax=cbar_ax, orientation='horizontal', format="%.2f")
cbar.set_label(u"Intensidad de la radiación | |
>>> # xdoc: +REQUIRES(--download, module:ndsampler)
>>> from netharn.models.yolo2.yolo2 import * # NOQA
>>> info = dev_demodata()
>>> self, output, target = ub.take(info, ['criterion', 'outputs', 'target'])
>>> model = info['model']
>>> self = YoloLoss(model.coder)
>>> loss_parts = self.forward(output, target)
>>> print('loss_parts = {!r}'.format(loss_parts))
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
>>> kwplot.figure(fnum=1, doclf=True)
>>> sf = info['orig_sizes'][0]
>>> dets.boxes.scale(sf, inplace=True)
>>> kwplot.imshow(info['rgb255'], colorspace='rgb')
>>> dets.draw()
>>> kwplot.show_if_requested()
"""
class_energy = output['class_energy']
score_energy = output['score_energy']
cxywh_energy = output['cxywh_energy']
# Get x,y,w,h,conf,cls
nB, nA, nC, nH, nW = class_energy.data.shape
nC = self.num_classes
assert nA == self.num_anchors
assert nC == self.num_classes
device = class_energy.device
if seen is not None:
self.seen = torch.tensor(seen)
elif self.training:
self.seen += nB
if self.anchors.device != device:
self.anchors = self.anchors.to(device)
conf = score_energy.sigmoid()
if nC > 1:
# Swaps the dimensions from [B, A, C, H, W] to be [B, A, H, W, C]
cls = class_energy.permute(0, 1, 3, 4, 2).contiguous()
# Note: can't do inplace ops here.
coord = torch.empty_like(cxywh_energy)
coord[:, :, 0:2, :, :] = cxywh_energy[:, :, 0:2, :, :].sigmoid() # cx,cy
coord[:, :, 2:4, :, :] = cxywh_energy[:, :, 2:4, :, :] # w,h
info = self.coder.decode_batch(output, forloss=True)
coord = info['coord']
with torch.no_grad():
# We only use the "decoded" coords (which are pred_boxes) to
# determine the ground truth mask. We don't need to backprop
# through here.
# Get target values
# CREATE ENCODED TRUTH VALUES *BASED ON THE OUTPUT*
# TODO: refactor, minimize, verify correctness
pred_boxes = info['pred_boxes']
masks, truth = self.build_targets(pred_boxes, target, nB, nA, nC, nH, nW, device)
if nC > 1:
pcls_mask = masks['cls'].view(-1, 1)
if _TORCH_HAS_BOOL_COMP:
tcls = truth['cls'][masks['cls'].bool()].view(-1).long()
else:
tcls = truth['cls'][masks['cls']].view(-1).long()
if nC > 1:
if _TORCH_HAS_BOOL_COMP:
pcls = cls.view(-1, nC)[pcls_mask.view(-1).bool()]
else:
pcls = cls.view(-1, nC)[pcls_mask.view(-1)]
coord_mask = masks['coord'].repeat(1, 1, 4, 1)
coord_ = coord.view(coord_mask.shape)
conf_ = conf.view(masks['conf'].shape)
# Compute losses
coord_part = self.mse(coord_ * coord_mask, truth['coord'] * coord_mask)
conf_part = self.mse(conf_ * masks['conf'], truth['conf'] * masks['conf'])
loss_parts = {}
loss_parts['coord'] = self.coord_scale * (coord_part / nB)
loss_parts['conf'] = conf_part / nB
if nC > 1 and pcls.numel() > 0:
clf_part = (self.clf(pcls, tcls) / nB)
loss_parts['cls'] = self.class_scale * 2 * clf_part
return loss_parts
def build_targets(self, pred_boxes, target, nB, nA, nC, nH, nW, device=None):
"""
For each ground truth, assign it to a predicted box, and construct
appropriate truth tensors and masks.
Args:
pred_boxes : OUTPUT_SPACE cywh boxes
target : contains 01-NORMALIZED cxywh boxes, cidxs, and weights
TODO:
standardize nonrmalization of inputs (best case is probably to
simply allow raw targets and normalize insidet this func)
Example:
>>> # xdoc: +REQUIRES(--download, module:ndsampler)
>>> from netharn.models.yolo2.yolo2 import * # NOQA
>>> info = dev_demodata()
>>> self, output, target = ub.take(info, ['criterion', 'outputs', 'target'])
>>> decoded_info = self.coder.decode_batch(output, forloss=True)
>>> pred_boxes = decoded_info['pred_boxes']
>>> nB, nA, nC, nH, nW = output['class_energy'].shape
>>> device = pred_boxes.device
>>> self.seen += 100000
>>> masks, truth = self.build_targets(pred_boxes, target, nB, nA, nC, nH, nW, device)
>>> print('masks sum = {}'.format(ub.map_vals(lambda x: x.sum(), masks)))
>>> print('truth sum = {}'.format(ub.map_vals(lambda x: x.sum(), truth)))
>>> print('masks shape = {}'.format(ub.map_vals(lambda x: x.shape, masks)))
>>> print('truth shape = {}'.format(ub.map_vals(lambda x: x.shape, truth)))
Example:
>>> # xdoc: +REQUIRES(--download, module:ndsampler)
>>> # Test empty case
>>> from netharn.models.yolo2.yolo2 import * # NOQA
>>> self = YoloLoss.demo()
>>> target = self.demo_truth(bsize=2)
>>> for k in target:
>>> target[k] = torch.Tensor()
>>> output = self.coder.demo_output(bsize=2)
>>> decoded_info = self.coder.decode_batch(output, forloss=True)
>>> pred_boxes = decoded_info['pred_boxes']
>>> nB, nA, nC, nH, nW = output['class_energy'].shape
>>> device = pred_boxes.device
>>> self.seen += 100000
>>> masks, truth = self.build_targets(pred_boxes, target, nB, nA, nC, nH, nW, device)
>>> print('masks sum = {}'.format(ub.map_vals(lambda x: x.sum(), masks)))
>>> print('truth sum = {}'.format(ub.map_vals(lambda x: x.sum(), truth)))
>>> print('masks shape = {}'.format(ub.map_vals(lambda x: x.shape, masks)))
>>> print('truth shape = {}'.format(ub.map_vals(lambda x: x.shape, truth)))
>>> print('masks sum = {}'.format(ub.map_vals(lambda x: x.sum(), masks)))
>>> print('truth sum = {}'.format(ub.map_vals(lambda x: x.sum(), truth)))
>>> print('masks shape = {}'.format(ub.map_vals(lambda x: x.shape, masks)))
>>> print('truth shape = {}'.format(ub.map_vals(lambda x: x.shape, truth)))
"""
import math
# Parameters
target_cxwh = target['cxywh'] # normalized
target_cidx = target['class_idxs']
target_weight = target['weight']
# TODO: ensure normalized
# nB = target_cxwh.size(0)
# nA = self.num_anchors
nPixels = nH * nW
item_stride = nA * nH * nW
# Tensors
conf_mask = torch.full((nB, nA, nPixels), self.noobject_scale, requires_grad=False)
coord_mask = torch.zeros(nB, nA, 1, nPixels, requires_grad=False)
cls_mask = torch.zeros(nB, nA, nPixels, requires_grad=False).byte()
tcoord = torch.zeros(nB, nA, 4, nPixels, requires_grad=False)
tconf = torch.zeros(nB, nA, nPixels, requires_grad=False)
tcls = torch.zeros(nB, nA, nPixels, requires_grad=False)
# 12800
if self.seen < self.seen_thresh:
coord_mask.fill_(1)
# coord_mask.fill_(.01 / self.coord_scale)
if self.anchor_step == 4:
# TODO: use permute
tcoord[:, :, 0] = self.anchors[:, 2].contiguous().view(1, nA, 1, 1).repeat(nB, 1, 1, nPixels)
tcoord[:, :, 1] = self.anchors[:, 3].contiguous().view(
1, nA, 1, 1).repeat(nB, 1, 1, nPixels)
else:
tcoord[:, :, 0].fill_(0.5)
tcoord[:, :, 1].fill_(0.5)
self.anchors = self.anchors.to(pred_boxes.device)
if self.anchor_step == 4:
anchors_wh = self.anchors[:, 2:4]
else:
anchors_wh = self.anchors
if target_weight.numel() > 0:
# Determine the number of true boxes in each batch item by checking
# if the weight has a padding value (which should be -1)
target_numgt = (target_weight >= 0).sum(dim=1).data.cpu().numpy()
# For each batch item, ...
for b, num_gt in enumerate(target_numgt):
if num_gt > 0:
# Remove dummy batch padding (which should be trailing)
gtb = target_cxwh[b, 0:num_gt].view(-1, 4)
gtc = target_cidx[b, 0:num_gt].view(-1)
gtw = target_weight[b, 0:num_gt].view(-1)
# Build up tensors
cur_pred_boxes = pred_boxes[b * item_stride: (b + 1) * item_stride]
# convert to output space
cur_true_boxes = gtb.clone()
cur_true_boxes[:, ::2] *= nW
cur_true_boxes[:, 1::2] *= nH
# Set confidence mask of matching detections to 0, we
# will selectively reenable a subset of these values later
iou_gt_pred = bbox_ious(cur_true_boxes, cur_pred_boxes)
mask = (iou_gt_pred > self.thresh).sum(0) >= 1
conf_mask_b = conf_mask[b]
conf_mask_b[mask.view_as(conf_mask_b)] = 0
# Find best anchor for each gt
gt_wh = cur_true_boxes[:, 2:4]
iou_gt_anchors = wh_ious(gt_wh, anchors_wh)
_, best_anchors = iou_gt_anchors.max(1)
# Set masks and target values for each gt
for i in range(num_gt):
cx, cy, w, h = cur_true_boxes[i, 0:4]
gi = min(nW - 1, max(0, int(cx)))
gj = min(nH - 1, max(0, int(cy)))
anch_idx = best_anchors[i]
grid_idx = gj * nW + gi
weight = gtw[i]
coord_mask[b, anch_idx, 0, grid_idx] = (2 - (w * h) / nPixels) * weight
cls_mask[b, anch_idx, grid_idx] = weight
conf_mask[b, anch_idx, grid_idx] = self.object_scale * weight
tcoord[b, anch_idx, 0, grid_idx] = cx - gi
tcoord[b, anch_idx, 1, grid_idx] = cy - gj
tcoord[b, anch_idx, 2, grid_idx] = math.log(w / self.anchors[anch_idx, 0])
tcoord[b, anch_idx, 3, grid_idx] = math.log(h / self.anchors[anch_idx, 1])
iou = iou_gt_pred[i, anch_idx * nPixels + grid_idx]
tconf[b, anch_idx, grid_idx] = iou
tcls[b, anch_idx, grid_idx] = gtc[i]
masks = {
'coord': coord_mask.to(device).sqrt_(),
'conf': conf_mask.to(device).sqrt_(),
'cls': cls_mask.to(device),
}
truth = {
'coord': tcoord.to(device),
'conf': tconf.to(device),
'cls': tcls.to(device),
}
return masks, truth
def wh_ious(wh1, wh2):
"""
Compute IOU between centered boxes with given wh.
Slightly faster than zeroing the center coords.
"""
half_wh1 = (wh1 / 2)
half_wh2 = (wh2 / 2)
b1x2, b1y2 = (half_wh1).split(1, 1)
b2x2, b2y2 = (half_wh2).split(1, 1)
b1x1, b1y1 = -b1x2, -b1y2
b2x1, b2y1 = -b2x2, -b2y2
dx = (b1x2.min(b2x2.t()) - b1x1.max(b2x1.t())).clamp_(min=0)
dy = (b1y2.min(b2y2.t()) - b1y1.max(b2y1.t())).clamp_(min=0)
intersections = dx * dy
areas1 = wh1.prod(dim=1, keepdim=True)
areas2 = wh2.prod(dim=1, keepdim=True)
unions = (areas1 + areas2.t()) - intersections
return intersections / unions
def bbox_ious(boxes1, boxes2):
""" Compute IOU between all boxes from ``boxes1`` with all boxes from ``boxes2``.
Args:
boxes1 (torch.Tensor): List of bounding boxes
boxes2 (torch.Tensor): List of bounding boxes
Note:
List format: [[xc, yc, w, h],...]
"""
b1x1, b1y1 = (boxes1[:, :2] - (boxes1[:, 2:4] / 2)).split(1, 1)
b1x2, b1y2 = (boxes1[:, :2] + (boxes1[:, 2:4] / 2)).split(1, 1)
b2x1, b2y1 = (boxes2[:, :2] - (boxes2[:, 2:4] / 2)).split(1, 1)
b2x2, b2y2 = (boxes2[:, :2] + (boxes2[:, 2:4] / 2)).split(1, 1)
dx = (b1x2.min(b2x2.t()) - b1x1.max(b2x1.t())).clamp(min=0)
dy = (b1y2.min(b2y2.t()) - b1y1.max(b2y1.t())).clamp(min=0)
intersections = dx | |
<gh_stars>0
import hashlib
import itertools
import json
import logging
import multiprocessing
import os
import shutil
import stat
import tarfile
import time
from _hashlib import HASH as Hash
from enum import Enum
from fnmatch import fnmatch
from datetime import datetime
from collections import defaultdict
from glob import glob
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
from typing import Union
from urllib.parse import urlparse
from zipfile import ZipFile, ZIP_DEFLATED
import click
import mgzip
from datetime import datetime
from collections import defaultdict
from pymatgen.core import Structure
from pymatgen.util.provenance import StructureNL
from datetime import datetime
from collections import defaultdict
from pymatgen.core import Structure
from pymatgen.util.provenance import StructureNL
from atomate.vasp.database import VaspCalcDb
from atomate.vasp.drones import VaspDrone
from bravado.client import SwaggerClient
from bravado.requests_client import RequestsClient, Authenticator
import requests
from dotty_dict import dotty
from fireworks.fw_config import FW_BLOCK_FORMAT
from keycloak import KeycloakOpenID
from maggma.core.store import Sort
from maggma.stores.advanced_stores import MongograntStore
from mongogrant.client import Client
from pymatgen.util.provenance import StructureNL
from pydantic import BaseModel, Field
from pymatgen import Structure
from pymongo.errors import DocumentTooLarge
from tqdm import tqdm
from emmet.cli import SETTINGS
from emmet.core.utils import group_structures
logger = logging.getLogger("emmet")
perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
class EmmetCliError(Exception):
pass
class ReturnCodes(Enum):
"""codes to print command exit message in github issue comments"""
SUCCESS = "COMPLETED"
ERROR = "encountered ERROR"
WARNING = "exited with WARNING"
SUBMITTED = "submitted to SLURM"
def structures_match(s1, s2):
return bool(len(list(group_structures([s1, s2]))) == 1)
def ensure_indexes(indexes, colls):
created = defaultdict(list)
for index in indexes:
for coll in colls:
keys = [k.rsplit("_", 1)[0] for k in coll.index_information().keys()]
if index not in keys:
coll.ensure_index(index)
created[coll.full_name].append(index)
if created:
indexes = ", ".join(created[coll.full_name])
logger.debug(f"Created the following index(es) on {coll.full_name}:\n{indexes}")
def calcdb_from_mgrant(spec_or_dbfile):
if os.path.exists(spec_or_dbfile):
return VaspCalcDb.from_db_file(spec_or_dbfile)
client = Client()
role = "rw" # NOTE need write access to source to ensure indexes
host, dbname_or_alias = spec_or_dbfile.split("/", 1)
auth = client.get_auth(host, dbname_or_alias, role)
if auth is None:
raise Exception("No valid auth credentials available!")
return VaspCalcDb(
auth["host"],
27017,
auth["db"],
"tasks",
auth["username"],
auth["password"],
authSource=auth["db"],
)
def get_meta_from_structure(struct):
d = {"formula_pretty": struct.composition.reduced_formula}
d["nelements"] = len(set(struct.composition.elements))
d["nsites"] = len(struct)
d["is_ordered"] = struct.is_ordered
d["is_valid"] = struct.is_valid()
return d
def aggregate_by_formula(coll, q, key=None):
query = {"$and": [q, SETTINGS.exclude]}
query.update(SETTINGS.base_query)
nested = False
if key is None:
for k in SETTINGS.aggregation_keys:
q = {k: {"$exists": 1}}
q.update(SETTINGS.base_query)
doc = coll.find_one(q)
if doc:
key = k
nested = int("snl" in doc)
break
else:
raise ValueError(
f"could not find one of the aggregation keys {SETTINGS.aggregation_keys} in {coll.full_name}!"
)
push = {k.split(".")[-1]: f"${k}" for k in structure_keys[nested]}
return coll.aggregate(
[
{"$match": query},
{"$sort": {"nelements": 1, "nsites": 1}},
{"$group": {"_id": f"${key}", "structures": {"$push": push}}},
],
allowDiskUse=True,
batchSize=1,
)
def load_structure(dct):
s = Structure.from_dict(dct)
s.remove_oxidation_states()
return s.get_primitive_structure()
# a utility function to get us a slice of an iterator, as an iterator
# when working with iterators maximum lazyness is preferred
def iterator_slice(iterator, length):
iterator = iter(iterator)
while True:
res = tuple(itertools.islice(iterator, length))
if not res:
break
yield res
def chunks(lst, n):
return [lst[i: i + n] for i in range(0, len(lst), n)]
def get_subdir(dn):
return dn.rstrip(os.sep).rsplit(os.sep, 1)[-1]
def get_timestamp_dir(prefix="launcher"):
time_now = datetime.utcnow().strftime(FW_BLOCK_FORMAT)
return "_".join([prefix, time_now])
def is_vasp_dir(list_of_files):
for f in list_of_files:
if f.startswith("INCAR"):
return True
def make_block(base_path):
ctx = click.get_current_context()
run = ctx.parent.parent.params["run"]
block = get_timestamp_dir(prefix="block")
block_dir = os.path.join(base_path, block)
if run:
os.mkdir(block_dir)
return block_dir
def get_symlinked_path(root, base_path_index):
"""organize directory in block_*/launcher_* via symbolic links"""
ctx = click.get_current_context()
run = ctx.parent.parent.params["run"]
root_split = root.split(os.sep)
base_path = os.sep.join(root_split[:base_path_index])
if root_split[base_path_index].startswith("block_"):
block_dir = os.sep.join(root_split[: base_path_index + 1])
else:
all_blocks = glob(os.path.join(base_path, "block_*/"))
for block_dir in all_blocks:
p = os.path.join(block_dir, "launcher_*/")
if len(glob(p)) < 300:
break
else:
# didn't find a block with < 300 launchers
block_dir = make_block(base_path)
if root_split[-1].startswith("launcher_"):
launch_dir = os.path.join(block_dir, root_split[-1])
if not os.path.exists(launch_dir):
if run:
os.rename(root, launch_dir)
logger.debug(f"{root} -> {launch_dir}")
else:
launch = get_timestamp_dir(prefix="launcher")
launch_dir = os.path.join(block_dir, launch)
if run:
os.rename(root, launch_dir)
os.symlink(launch_dir, root)
logger.debug(f"{root} -> {launch_dir}")
return launch_dir
def create_orig_inputs(vaspdir):
ctx = click.get_current_context()
run = ctx.parent.parent.params["run"]
for inp in ["INCAR", "KPOINTS", "POTCAR", "POSCAR"]:
input_path = os.path.join(vaspdir, inp)
if not glob(input_path + ".orig*"):
matches = glob(input_path + "*")
if matches:
input_path = matches[0]
orig_path = input_path.replace(inp, inp + ".orig")
if run:
shutil.copyfile(input_path, orig_path)
logger.debug(f"{input_path} -> {orig_path}")
# https://stackoverflow.com/a/34073559
class VaspDirsGenerator:
def __init__(self):
self.gen = get_vasp_dirs()
def __iter__(self):
self.value = yield from self.gen
def get_vasp_dirs():
ctx = click.get_current_context()
run = ctx.parent.parent.params["run"]
nmax = ctx.parent.params["nmax"]
pattern = ctx.parent.params["pattern"]
reorg = ctx.parent.params["reorg"]
base_path = ctx.parent.params["directory"].rstrip(os.sep)
base_path_index = len(base_path.split(os.sep))
if pattern:
pattern_split = pattern.split(os.sep)
pattern_split_len = len(pattern_split)
counter = 0
for root, dirs, files in os.walk(base_path, topdown=True):
if counter == nmax:
break
level = len(root.split(os.sep)) - base_path_index
if pattern and dirs and pattern_split_len > level:
p = pattern_split[level]
dirs[:] = [d for d in dirs if fnmatch(d, p)]
for d in dirs:
dn = os.path.join(root, d)
st = os.stat(dn)
if not bool(st.st_mode & perms):
raise EmmetCliError(f"Insufficient permissions {st.st_mode} for {dn}.")
if is_vasp_dir(files):
gzipped = False
for f in files:
fn = os.path.join(root, f)
if os.path.islink(fn):
if run:
os.unlink(fn)
logger.warning(f"Unlinked {fn}.")
else:
logger.warning(f"Would unlink {fn}.")
continue
st = os.stat(fn)
if not bool(st.st_mode & perms):
raise EmmetCliError(
f"Insufficient permissions {st.st_mode} for {fn}."
)
if run and not f.endswith(".gz"):
fn_gz = fn + ".gz"
if os.path.exists(fn_gz):
os.remove(fn_gz) # remove left-over gz (cancelled job)
with open(fn, "rb") as fo, mgzip.open(fn_gz, "wb", thread=0) as fw:
fw.write(fo.read())
os.remove(fn) # remove original
shutil.chown(fn_gz, group="matgen")
gzipped = True
# NOTE skip symlink'ing on MP calculations from the early days
vasp_dir = get_symlinked_path(root, base_path_index) if reorg else root
create_orig_inputs(vasp_dir)
dirs[:] = [] # don't descend further (i.e. ignore relax1/2)
logger.log(logging.INFO if gzipped else logging.DEBUG, vasp_dir)
yield vasp_dir
counter += 1
return counter
def reconstruct_command(sbatch=False):
ctx = click.get_current_context()
command = []
for level, (cmd, params) in enumerate(
zip(
ctx.command_path.split(),
[ctx.grand_parent.params, ctx.parent.params, ctx.params],
)
):
command.append(cmd)
if level:
command.append("\\\n")
for k, v in params.items():
k = k.replace("_", "-")
if v:
if isinstance(v, bool):
if (sbatch and k != "sbatch" and k != "bb") or not sbatch:
command.append(f"--{k}")
elif isinstance(v, str):
command.append(f'--{k}="{v}"')
elif isinstance(v, tuple) or isinstance(v, list):
for x in v:
command.append(f'--{k}="{x}"')
command.append("\\\n")
else:
command.append(f"--{k}={v}")
if level:
command.append("\\\n")
return " ".join(command).strip().strip("\\")
def parse_vasp_dirs(vaspdirs, tag, task_ids, snl_metas): # noqa: C901
process = multiprocessing.current_process()
name = process.name
chunk_idx = int(name.rsplit("-")[1]) - 1
logger.info(f"{name} starting.")
tags = [tag, SETTINGS.year_tags[-1]]
ctx = click.get_current_context()
spec_or_dbfile = ctx.parent.parent.params["spec_or_dbfile"]
target = calcdb_from_mgrant(spec_or_dbfile)
snl_collection = target.db.snls_user
sbxn = list(filter(None, target.collection.distinct("sbxn")))
logger.info(f"Using sandboxes {sbxn}.")
no_dupe_check = ctx.parent.parent.params["no_dupe_check"]
run = ctx.parent.parent.params["run"]
projection = {"tags": 1, "task_id": 1}
# projection = {"tags": 1, "task_id": 1, "calcs_reversed": 1}
count = 0
drone = VaspDrone(
additional_fields={"tags": tags},
store_volumetric_data=ctx.params["store_volumetric_data"],
)
# fs_keys = ["bandstructure", "dos", "chgcar", "locpot", "elfcar"]
# for i in range(3):
# fs_keys.append(f"aeccar{i}")
for vaspdir in vaspdirs:
logger.info(f"{name} VaspDir: {vaspdir}")
launcher = get_subdir(vaspdir)
query = {"dir_name": {"$regex": launcher}}
manual_taskid = isinstance(task_ids, dict)
docs = list(
target.collection.find(query, projection).sort([("_id", -1)]).limit(1)
)
if docs:
if no_dupe_check:
logger.warning(f"FORCING re-parse of {launcher}!")
if not manual_taskid:
raise ValueError("need --task-ids when re-parsing!")
else:
if run:
shutil.rmtree(vaspdir)
logger.warning(f"{name} {launcher} already parsed -> removed.")
else:
logger.warning(f"{name} {launcher} already parsed -> would remove.")
continue
try:
task_doc = drone.assimilate(vaspdir)
except Exception as ex:
logger.error(f"Failed to assimilate {vaspdir}: {ex}")
continue
task_doc["sbxn"] = sbxn
manual_taskid = isinstance(task_ids, dict)
snl_metas_avail = isinstance(snl_metas, dict)
task_id = task_ids[launcher] if manual_taskid else task_ids[chunk_idx][count]
task_doc["task_id"] = task_id
logger.info(f"Using {task_id} for {launcher}.")
if docs:
# make sure that task gets the same tags as the previously parsed task
# (run through set to implicitly remove duplicate tags)
if docs[0]["tags"]:
existing_tags = list(set(docs[0]["tags"]))
task_doc["tags"] += existing_tags
logger.info(f"Adding existing tags {existing_tags} to {tags}.")
snl_dct = None
if snl_metas_avail:
snl_meta = snl_metas.get(launcher)
if snl_meta:
references = snl_meta.get("references")
authors = snl_meta.get(
"authors", ["Materials Project <<EMAIL>>"]
)
kwargs = {"projects": [tag]}
if references:
kwargs["references"] = references
struct = Structure.from_dict(task_doc["input"]["structure"])
snl = StructureNL(struct, authors, **kwargs)
snl_dct = snl.as_dict()
snl_dct.update(get_meta_from_structure(struct))
snl_id = snl_meta["snl_id"]
snl_dct["snl_id"] = snl_id
logger.info(f"Created SNL object for {snl_id}.")
snl_dct = None
if snl_metas_avail:
snl_meta = snl_metas.get(launcher)
if snl_meta:
references = snl_meta.get("references")
authors = snl_meta.get("authors", ["Materials Project <<EMAIL>>"])
kwargs = {"projects": [tag]}
if references:
kwargs["references"] = references
struct = Structure.from_dict(task_doc["input"]["structure"])
snl = StructureNL(struct, authors, **kwargs)
snl_dct = snl.as_dict()
snl_dct.update(get_meta_from_structure(struct))
snl_id = snl_meta["snl_id"]
snl_dct["snl_id"] = snl_id
logger.info(f"Created SNL object for {snl_id}.")
if run:
if task_doc["state"] == "successful":
if docs and no_dupe_check:
# new_calc = task_doc["calcs_reversed"][0]
# existing_calc = docs[0]["calcs_reversed"][0]
# print(existing_calc.keys())
# for fs_key in fs_keys:
# print(fs_key)
# fs_id_key = f"{fs_key}_fs_id"
# | |
result.detrivialize()
self.negated = thing = self.negated.detrivialize()
if thing is None: return ~BTPredicate() # uniquely false
if thing.false: return None # uniquely true
return self
def __call__(self, boundtuples, toplevel=0):
from types import IntType
tt = type
current = BTPredicate.__call__(self, boundtuples, toplevel)
omit = self.negated(current)
for i in xrange(len(current)):
if tt(omit[i]) is not IntType:
current[i]=0
return current
def negated_constraints(self):
"""the negated constraints of a NOT are the
negated constraints of the thing negated."""
return self.negated.constraints
def __and__(self, other):
"""do the obvious thing."""
return BTand_pred([self, other])
def __or__(self, other):
"""do the obvious thing"""
return BTor_pred([self, other])
def __invert__(self):
return self.negated
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.negated,other.negated)
if test: return test
return BTPredicate.__cmp__(self,other)
def __hash__(self):
return hash(self.negated)^787876^hash(self.constraints)
class BTand_pred(BTPredicate):
def __init__(self, members, precondition=None, *othermembers):
#print "BTand_pred", (members, precondition)
members = list(members) + list(othermembers)
members = self.members = kjbuckets.kjSet(members).items()
self.constraints = precondition # common constraints
if members:
# common constraints are those in any member
if precondition is not None:
constraints = precondition
else:
constraints = BoundTuple()
for i in xrange(len(members)):
m = members[i]
mc = m.constraints
if mc:
#print "constraints", constraints
constraints = constraints + mc
if constraints is None: break
if m.__class__==BTPredicate:
members[i] = None # subsumed above
members = self.members = filter(None, members)
for m in members:
if m.contains_aggregate:
self.contains_aggregate=1
### consider propagating constraints down?
self.constraints = constraints
if constraints is None: self.false = 1
def initargs(self):
#print "self.members", self.members
#print "self.constraints", self.constraints
#return (list(self.members), self.constraints)
return ((), self.constraints) + tuple(self.members)
def relbind(self, dict, db):
ms = []
for m in self.members:
ms.append( m.relbind(dict, db) )
c = self.constraints.relbind(dict, db)
return BTand_pred(ms, c)
def uncache(self):
for m in self.members:
m.uncache()
def domain(self):
all = BTPredicate.domain(self).items()
for x in self.members:
all = all + x.domain().items()
return kjbuckets.kjSet(all)
def __repr__(self):
m = self.members
c = self.constraints
r = map(repr, m)
if self.false: r.insert(0, "FALSE")
r = ' AND '.join(r)
r = "(%s)" % r
if c: r = "[conj](%s and %s)" % (c, r)
return r
def detrivialize(self):
"""hook added to allow elimination of trivialities
return None if completely true, or simpler form
or self, if no simplification is possible."""
# first apply demorgan's law to push ands down
# (exponential in worst case).
#print "detrivialize"
#print self
ms = self.members
some_or = None
c = self.constraints
for m in ms:
if m.__class__==BTor_pred:
some_or = m
ms.remove(m)
break
if some_or is not None:
result = some_or
if c is not None:
some_or = some_or & BTPredicate(c)
for m in ms:
result = result & m # preserves or/and precedence
if result.__class__!=BTor_pred:
raise "what the?"
result = result.detrivialize()
#print "or detected, returning"
#print result
return result
for i in xrange(len(ms)):
ms[i] = ms[i].detrivialize()
ms[:] = filter(None, ms)
if not ms:
#print "returning boundary case of condition"
if c is None:
return None
else:
return BTPredicate(c).detrivialize()
ms[:] = kjbuckets.kjSet(ms).items()
if len(ms)==1 and c is None:
#print "and of 1, returning"
#print ms[0]
return ms[0] # and of 1
return self
def __call__(self, boundtuples, toplevel=0):
# apply common constraints first
current = BTPredicate.__call__(self, boundtuples, toplevel)
for m in self.members:
current = m(current)
return current
def negated_constraints(self):
"""the negated constraints of an AND are
the negated constraints of *any* member"""
ms = self.members
result = BoundTuple()
for m in ms:
mc = m.negated_constraints()
if mc: result = result + mc
return result
def __and__(self, other):
"""push "and" down if other is an or"""
if other.__class__==BTor_pred:
return other & self
c = self.constraints
# merge in other and
if other.__class__==BTand_pred:
allmem = self.members+other.members
oc = other.constraints
if c is None:
c = oc
elif oc is not None:
c = c+oc
return BTand_pred(allmem, c)
return BTand_pred(self.members + [other], c)
def __or__(self, other):
"""do the obvious thing."""
return BTor_pred([self, other])
def __invert__(self):
"""translate to or-not"""
ms = self.members
if not ms: return ~BTPredicate() # boundary case
result = ~ms[0]
for m in ms[1:]:
result = result | ~m
return result
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
kjSet = kjbuckets.kjSet
test = cmp(kjSet(self.members), kjSet(other.members))
if test: return test
return BTPredicate.__cmp__(self, other)
def __hash__(self):
return hash(kjbuckets.kjSet(self.members))
class NontrivialEqPred(BTPredicate):
"""equation of nontrivial expressions."""
def __init__(self, left, right):
#print "making pred", self.__class__, left, right
# maybe should used reflexivity...
self.left = left
self.right = right
self.contains_aggregate = left.contains_aggregate or right.contains_aggregate
def initargs(self):
return (self.left, self.right)
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.right, other.right)
if test: return test
return cmp(other.left, other.left)
def hash(self, other):
return hash(self.left) ^ hash(self.right)
def relbind(self, dict, db):
Class = self.__class__
return Class(self.left.relbind(dict,db), self.right.relbind(dict,db) )
def uncache(self):
self.left.uncache()
self.right.uncache()
def domain(self):
return self.left.domain() + self.right.domain()
op = "=="
def __repr__(self):
return "(%s)%s(%s)" % (self.left, self.op, self.right)
def detrivialize(self):
return self
def __call__(self, assigns, toplevel=0):
from types import IntType
tt = type
lv = self.left.value(assigns)
rv = self.right.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if type(t) is not IntType and lv[i]!=rv[i]:
result[i] = 0
return result
def negated_constraints(self):
return None
def __and__(self, other):
return BTand_pred( [self, other] )
def __or__(self, other):
return BTor_pred( [self, other] )
def __invert__(self):
return BTnot_pred(self)
class BetweenPredicate(NontrivialEqPred):
"""e1 BETWEEN e2 AND e3"""
def __init__(self, middle, lower, upper):
self.middle = middle
self.lower = lower
self.upper = upper
def initargs(self):
return (self.middle, self.lower, self.upper)
def domain(self):
return (
self.middle.domain() + self.lower.domain() + self.upper.domain())
def relbind(self, dict, db):
self.middle = self.middle.relbind(dict, db)
self.lower = self.lower.relbind(dict, db)
self.upper = self.upper.relbind(dict, db)
return self
def uncache(self):
self.middle.uncache()
self.upper.uncache()
self.lower.uncache()
def __repr__(self):
return "(%s BETWEEN %s AND %s)" % (
self.middle, self.lower, self.upper)
def __hash__(self):
return hash(self.middle)^~hash(self.lower)^hash(self.upper)^55
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.lower, other.lower)
if test: return test
test = cmp(self.middle, other.middle)
if test: return test
return cmp(self.upper, other.upper)
def __call__(self, assigns, toplevel=0):
from types import IntType
tt = type
lowv = self.lower.value(assigns)
upv = self.upper.value(assigns)
midv = self.middle.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if tt(t) is not IntType:
midvi = midv[i]
if lowv[i]>midvi or upv[i]<midvi:
result[i] = 0
return result
class ExistsPred(NontrivialEqPred):
"""EXISTS subquery."""
contains_aggregate = 0
def __init__(self, subq):
self.cached_result = None
self.cachable = None
self.subq = subq
def initargs(self):
return (self.subq,)
def domain(self):
result = self.subq.unbound()
# if there are no outer bindings, evaluate ONCE!
if not result:
self.cachable = 1
return result
def relbind(self, dict, db):
self.subq = self.subq.relbind(db, dict)
return self
def uncache(self):
self.cached_result = None
self.subq.uncache()
def __repr__(self):
return "\nEXISTS\n%s\n" % (self.subq,)
def __call__(self, assigns, toplevel=0):
### should optimize!!!
#print "exists"
#print self.subq
from types import IntType
tt = type
eval = self.subq.eval
result = assigns[:]
# shortcut: if cachable, eval only once and cache
if self.cachable:
test = self.cached_result
if test is None:
self.cached_result = test = eval()
#print "exists cached", self.cached_result
if test:
return result
else:
return [0] * len(result)
kjDict = kjbuckets.kjDict
for i in xrange(len(assigns)):
#print "exists uncached"
assignsi = assigns[i]
if tt(assignsi) is IntType: continue
testbtup = BoundTuple()
testbtup.assns = kjDict(assignsi)
test = eval(outerboundtuple=testbtup).rows()
#for x in test:
#print "exists for", assignsi
#print x
#break
if not test:
result[i] = 0
return result
def __hash__(self):
return hash(self.subq)^3333
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
return cmp(self.subq, other.subq)
class QuantEQ(NontrivialEqPred):
"""Quantified equal any predicate"""
def __init__(self, expr, subq):
self.expr = expr
self.subq = subq
self.cachable = 0
self.cached_column = None
self.att = None
def initargs(self):
return (self.expr, self.subq)
def uncache(self):
self.cached_column = None
def domain(self):
first = self.subq.unbound()
if not first:
self.cachable = 1
more = self.expr.domain()
return first + more
def relbind(self, dict, db):
subq = self.subq = self.subq.relbind(db, dict)
self.expr = self.expr.relbind(dict, db)
# test that subquery is single column and determine att
sl = subq.select_list
atts = sl.attorder
if len(atts)<>1:
raise ValueError, \
"Quantified predicate requires unit select list: %s" % atts
self.att = | |
""" Module to access the Channels endpoints """
# pylint: disable=too-many-lines,too-many-locals,too-many-public-methods,too-few-public-methods
from typing import Any, Dict, List, Optional, Union, cast
from pydantic import BaseModel
from ...models import (
AddChannelMemberJsonBody,
Channel,
ChannelListWithTeamData,
ChannelMember,
ChannelModeration,
ChannelModerationPatch,
ChannelNotifyProps,
ChannelStats,
ChannelUnread,
CreateChannelJsonBody,
MoveChannelJsonBody,
OrderedSidebarCategories,
PatchChannelJsonBody,
PostList,
SearchAllChannelsJsonBody,
SearchAllChannelsResponse200,
SearchArchivedChannelsJsonBody,
SearchChannelsJsonBody,
SearchGroupChannelsJsonBody,
SidebarCategory,
StatusOK,
UpdateChannelJsonBody,
UpdateChannelMemberSchemeRolesJsonBody,
UpdateChannelPrivacyJsonBody,
UpdateChannelRolesJsonBody,
UpdateChannelSchemeJsonBody,
ViewChannelJsonBody,
ViewChannelResponse200,
)
from ..base import ApiBaseClass
class ChannelsApi(ApiBaseClass):
"""Endpoints for creating, getting and interacting with channels."""
async def get_all_channels(
self,
*,
not_associated_to_group: Optional[str] = None,
page: Optional[int] = 0,
per_page: Optional[int] = 0,
exclude_default_channels: Optional[bool] = False,
include_deleted: Optional[bool] = False,
include_total_count: Optional[bool] = False,
exclude_policy_constrained: Optional[bool] = False,
) -> ChannelListWithTeamData:
"""Get a list of all channels
Permissions:
`manage_system`
Api Reference:
`GetAllChannels <https://api.mattermost.com/#operation/GetAllChannels>`_
"""
url = "/channels"
params: Dict[str, Any] = {
"not_associated_to_group": not_associated_to_group,
"page": page,
"per_page": per_page,
"exclude_default_channels": exclude_default_channels,
"include_deleted": include_deleted,
"include_total_count": include_total_count,
"exclude_policy_constrained": exclude_policy_constrained,
}
params = {k: v for k, v in params.items() if v is not None}
request_kwargs = {
"url": url,
"params": params,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = ChannelListWithTeamData.parse_obj(response.json())
return response200
return response
async def create_channel(
self,
*,
json_body: Union[CreateChannelJsonBody, Dict],
) -> Channel:
"""Create a channel
Create a new channel.
Permissions:
If creating a public channel, `create_public_channel`
permission is required. If creating a private channel,
`create_private_channel` permission is required.
Api Reference:
`CreateChannel <https://api.mattermost.com/#operation/CreateChannel>`_
"""
url = "/channels"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 201:
response201 = Channel.parse_obj(response.json())
return response201
return response
async def create_direct_channel(
self,
*,
json_body: Union[List[str], Dict],
) -> Channel:
"""Create a direct message channel
Create a new direct message channel between two users.
Permissions:
Must be one of the two users and have
`create_direct_channel` permission. Having the
`manage_system` permission voids the previous requirements.
Api Reference:
`CreateDirectChannel <https://api.mattermost.com/#operation/CreateDirectChannel>`_
"""
url = "/channels/direct"
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 201:
response201 = Channel.parse_obj(response.json())
return response201
return response
async def create_group_channel(
self,
*,
json_body: Union[List[str], Dict],
) -> Channel:
"""Create a group message channel
Create a new group message channel to group of users. If the logged in
user's id is not included in the list, it will be appended to the end.
Permissions:
Must have `create_group_channel` permission.
Api Reference:
`CreateGroupChannel <https://api.mattermost.com/#operation/CreateGroupChannel>`_
"""
url = "/channels/group"
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 201:
response201 = Channel.parse_obj(response.json())
return response201
return response
async def search_all_channels(
self,
*,
json_body: Union[SearchAllChannelsJsonBody, Dict],
system_console: Optional[bool] = True,
) -> SearchAllChannelsResponse200:
"""Search all private and open type channels across all teams
Returns all private and open type channels where 'term' matches on the
name, display name, or purpose of
the channel.
Configured 'default' channels (ex Town Square and Off-Topic) can be
excluded from the results
with the `exclude_default_channels` boolean parameter.
Channels that are associated (via GroupChannel records) to a given group
can be excluded from the results
with the `not_associated_to_group` parameter and a group id string.
Api Reference:
`SearchAllChannels <https://api.mattermost.com/#operation/SearchAllChannels>`_
"""
url = "/channels/search"
params: Dict[str, Any] = {
"system_console": system_console,
}
params = {k: v for k, v in params.items() if v is not None}
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
"params": params,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = SearchAllChannelsResponse200.parse_obj(response.json())
return response200
return response
async def search_group_channels(
self,
*,
json_body: Union[SearchGroupChannelsJsonBody, Dict],
) -> List[Channel]:
"""Search Group Channels
Get a list of group channels for a user which members' usernames match
the search term.
Minimum Server Version:
5.14
Api Reference:
`SearchGroupChannels <https://api.mattermost.com/#operation/SearchGroupChannels>`_
"""
url = "/channels/group/search"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = []
_response200 = response.json()
for response200_item_data in _response200:
response200_item = Channel.parse_obj(response200_item_data)
response200.append(response200_item)
return response200
return response
async def get_public_channels_by_ids_for_team(
self,
team_id: str,
*,
json_body: Union[List[str], Dict],
) -> List[Channel]:
"""Get a list of channels by ids
Get a list of public channels on a team by id.
Permissions:
`view_team` for the team the channels are on.
Api Reference:
`GetPublicChannelsByIdsForTeam <https://api.mattermost.com/#operation/GetPublicChannelsByIdsForTeam>`_
"""
url = f"/teams/{team_id}/channels/ids"
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = []
_response200 = response.json()
for response200_item_data in _response200:
response200_item = Channel.parse_obj(response200_item_data)
response200.append(response200_item)
return response200
return response
async def get_channel_members_timezones(
self,
channel_id: str,
) -> List[str]:
"""Get timezones in a channel
Get a list of timezones for the users who are in this channel.
Permissions:
Must have the `read_channel` permission.
Minimum Server Version:
5.6
Api Reference:
`GetChannelMembersTimezones <https://api.mattermost.com/#operation/GetChannelMembersTimezones>`_
"""
url = f"/channels/{channel_id}/timezones"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = cast(List[str], response.json())
return response200
return response
async def get_channel(
self,
channel_id: str,
) -> Channel:
"""Get a channel
Get channel from the provided channel id string.
Permissions:
`read_channel` permission for the channel.
Api Reference:
`GetChannel <https://api.mattermost.com/#operation/GetChannel>`_
"""
url = f"/channels/{channel_id}"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = Channel.parse_obj(response.json())
return response200
return response
async def update_channel(
self,
channel_id: str,
*,
json_body: Union[UpdateChannelJsonBody, Dict],
) -> Channel:
"""Update a channel
Update a channel. The fields that can be updated are listed as
parameters. Omitted fields will be treated as blanks.
Permissions:
If updating a public channel,
`manage_public_channel_members` permission is required. If
updating a private channel, `manage_private_channel_members`
permission is required.
Api Reference:
`UpdateChannel <https://api.mattermost.com/#operation/UpdateChannel>`_
"""
url = f"/channels/{channel_id}"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.put(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = Channel.parse_obj(response.json())
return response200
return response
async def delete_channel(
self,
channel_id: str,
) -> StatusOK:
"""Delete a channel
Archives a channel. This will set the `deleteAt` to the current
timestamp in the database. Soft deleted channels may not be accessible
in the user interface. They can be viewed and unarchived in the **System
Console > User Management > Channels** based on your license. Direct and
group message channels cannot be deleted.
As of server version 5.28, optionally use the `permanent=true` query
parameter to permanently delete the channel for compliance reasons. To
use this feature `ServiceSettings.EnableAPIChannelDeletion` must be set
to `true` in the server's configuration. If you permanently delete a
channel this action is not recoverable outside of a database backup.
`delete_private_channel` permission if the channel is private,
or have `manage_system` permission.
Permissions:
`delete_public_channel` permission if the channel is public,
Api Reference:
`DeleteChannel <https://api.mattermost.com/#operation/DeleteChannel>`_
"""
url = f"/channels/{channel_id}"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.delete(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
async def patch_channel(
self,
channel_id: str,
| |
import json
from io import BytesIO
from app.ext.api.exceptions import (
ChefNotFound,
InvalidToken,
MaximumImageCapacityError,
OperationNotAllowed,
RecipeWithoutIngredient,
RecipeWithoutPreparationMode,
)
from app.ext.api.services import token_services
def test_edit_recipe(client, admin_user):
new_user1 = {
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
}
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
new_chef2 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
user1 = client.post(
"/api/v1/user",
data=json.dumps(new_user1),
headers=headers,
)
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
chef2 = client.post(
"/api/v1/chef",
data=new_chef2,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
token = token_services.generate_token(user1.json["id"], user1.json["email"])
headers["Authorization"] = token
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
(BytesIO(b"recipe_imgs"), "test2.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
recipe_id = recipe.json["id"]
old_imgs = [recipe_img.get("file_id") for recipe_img in recipe.json["recipe_imgs"]]
update_recipe = {
"name": "recipe updated",
"chef_id": chef2.json["id"],
"ingredients": ["Ovo", "Carne de Hamburguer", "Salada"],
"preparation_mode": ["Bata um ovo na frigideira", "Coloque a salada"],
"delete_imgs": old_imgs,
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
],
}
response = client.patch(
f"/api/v1/recipe/{recipe_id}",
data=update_recipe,
headers=headers,
content_type="multipart/form-data",
)
assert response.content_type == "application/json"
assert response.status_code == 200
assert response.json.get("id") == recipe_id
assert response.json.get("name") == update_recipe.get("name")
assert response.json.get("chef").get("id") == update_recipe.get("chef_id")
assert len(response.json.get("recipe_imgs")) == len(
update_recipe.get("recipe_imgs")
)
for ingredient in response.json.get("ingredients"):
assert ingredient in update_recipe.get("ingredients")
for preparation_mode in response.json.get("preparation_mode"):
assert preparation_mode in update_recipe.get("preparation_mode")
def test_edit_recipe_other_users_if_user_is_admin(client, admin_user):
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
users = [
{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
},
{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
},
]
users_created = []
for user in users:
user_info = client.post(
"/api/v1/user",
data=json.dumps(user),
headers=headers,
)
users_created.append(user_info)
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
ids_recipe_list = []
for user in users_created:
token = token_services.generate_token(user.json["id"], user.json["email"])
headers["Authorization"] = token
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
(BytesIO(b"recipe_imgs"), "test2.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
ids_recipe_list.append(recipe.json["id"])
update_recipe = {
"name": "recipe updated",
"chef_id": chef1.json["id"],
"ingredients": ["Ovo", "Carne de Hamburguer", "Salada"],
"preparation_mode": ["Bata um ovo na frigideira", "Coloque a salada"],
}
headers["Authorization"] = admin_user.get("token")
response = client.patch(
f"/api/v1/recipe/{ids_recipe_list[0]}",
data=update_recipe,
headers=headers,
content_type="multipart/form-data",
)
assert response.content_type == "application/json"
assert response.status_code == 200
assert response.json.get("name") == update_recipe.get("name")
assert response.json.get("chef").get("id") == update_recipe.get("chef_id")
for ingredient in response.json.get("ingredients"):
assert ingredient in update_recipe.get("ingredients")
for preparation_mode in response.json.get("preparation_mode"):
assert preparation_mode in update_recipe.get("preparation_mode")
def test_no_edit_recipe_if_user_not_authenticated(client, admin_user):
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
(BytesIO(b"recipe_imgs"), "test2.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
recipe_id = recipe.json["id"]
update_recipe = {
"name": "recipe updated",
"ingredients": ["Ovo", "Carne de Hamburguer", "Salada"],
"preparation_mode": ["Bata um ovo na frigideira", "Coloque a salada"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
],
}
response = client.patch(
f"/api/v1/recipe/{recipe_id}",
data=update_recipe,
content_type="multipart/form-data",
)
assert response.status_code == InvalidToken.code
assert response.json["message"] == InvalidToken.message
def test_no_edit_recipe_if_recipe_is_other_user(client, admin_user):
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
users = [
{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
},
{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
},
]
users_created = []
for user in users:
user_info = client.post(
"/api/v1/user",
data=json.dumps(user),
headers=headers,
)
users_created.append(user_info)
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
ids_recipe_list = []
for user in users_created:
token = token_services.generate_token(user.json["id"], user.json["email"])
headers["Authorization"] = token
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
(BytesIO(b"recipe_imgs"), "test2.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
ids_recipe_list.append(recipe.json["id"])
update_recipe = {
"name": "recipe updated",
"chef_id": chef1.json["id"],
"ingredients": ["Ovo", "Carne de Hamburguer", "Salada"],
"preparation_mode": ["Bata um ovo na frigideira", "Coloque a salada"],
}
response = client.patch(
f"/api/v1/recipe/{ids_recipe_list[0]}",
data=update_recipe,
headers=headers,
content_type="multipart/form-data",
)
assert response.status_code == OperationNotAllowed.code
assert response.json["message"] == OperationNotAllowed.message
def test_no_edit_recipe_if_have_maximum_capacity_images(client, admin_user):
new_user1 = {
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
}
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
user1 = client.post(
"/api/v1/user",
data=json.dumps(new_user1),
headers=headers,
)
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
token = token_services.generate_token(user1.json["id"], user1.json["email"])
headers["Authorization"] = token
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
recipe_id = recipe.json["id"]
update_recipe = {
"name": "recipe updated",
"chef_id": chef1.json["id"],
"ingredients": ["Ovo", "Carne de Hamburguer", "Salada"],
"preparation_mode": ["Bata um ovo na frigideira", "Coloque a salada"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
],
}
response = client.patch(
f"/api/v1/recipe/{recipe_id}",
data=update_recipe,
headers=headers,
content_type="multipart/form-data",
)
assert response.content_type == "application/json"
assert response.status_code == MaximumImageCapacityError.code
assert response.json["message"] == MaximumImageCapacityError.message
def test_no_edit_recipe_if_chef_not_already_exist(client, admin_user):
new_user1 = {
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
}
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
user1 = client.post(
"/api/v1/user",
data=json.dumps(new_user1),
headers=headers,
)
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
token = token_services.generate_token(user1.json["id"], user1.json["email"])
headers["Authorization"] = token
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
recipe_id = recipe.json["id"]
update_recipe = {
"name": "recipe updated",
"chef_id": 10,
"ingredients": ["Ovo", "Carne de Hamburguer", "Salada"],
"preparation_mode": ["Bata um ovo na frigideira", "Coloque a salada"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
],
}
response = client.patch(
f"/api/v1/recipe/{recipe_id}",
data=update_recipe,
headers=headers,
content_type="multipart/form-data",
)
assert response.content_type == "application/json"
assert response.status_code == ChefNotFound.code
assert response.json["message"] == ChefNotFound.message
def test_no_edit_recipe_if_recipe_without_ingredient(client, admin_user):
new_user1 = {
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
}
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
user1 = client.post(
"/api/v1/user",
data=json.dumps(new_user1),
headers=headers,
)
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
token = token_services.generate_token(user1.json["id"], user1.json["email"])
headers["Authorization"] = token
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
recipe_id = recipe.json["id"]
update_recipe = {
"name": "recipe updated",
"chef_id": chef1.json["id"],
"ingredients": [],
"preparation_mode": ["Bata um ovo na frigideira", "Coloque a salada"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
],
}
response = client.patch(
f"/api/v1/recipe/{recipe_id}",
data=update_recipe,
headers=headers,
content_type="multipart/form-data",
)
assert response.content_type == "application/json"
assert response.status_code == RecipeWithoutIngredient.code
assert response.json["message"] == RecipeWithoutIngredient.message
def test_no_edit_recipe_if_recipe_without_preparation_mode(client, admin_user):
new_user1 = {
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"admin": False,
}
new_chef1 = {"name": "chef test", "avatar": (BytesIO(b"avatar"), "test.jpg")}
headers = {
"Authorization": admin_user.get("token"),
"content-type": "application/json",
}
user1 = client.post(
"/api/v1/user",
data=json.dumps(new_user1),
headers=headers,
)
chef1 = client.post(
"/api/v1/chef",
data=new_chef1,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
token = token_services.generate_token(user1.json["id"], user1.json["email"])
headers["Authorization"] = token
new_recipe = {
"name": "recipe test",
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": ["Bata um ovo na frigideira", "Frite a carne"],
"additional_information": "",
"chef_id": chef1.json["id"],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test1.jpg"),
],
}
recipe = client.post(
"/api/v1/recipe",
data=new_recipe,
headers=headers,
follow_redirects=True,
content_type="multipart/form-data",
)
recipe_id = recipe.json["id"]
update_recipe = {
"name": "recipe updated",
"chef_id": chef1.json["id"],
"ingredients": ["Ovo", "Carne de Hamburguer"],
"preparation_mode": [],
"recipe_imgs": [
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
(BytesIO(b"recipe_imgs"), "test-alterado.jpg"),
],
}
response = client.patch(
f"/api/v1/recipe/{recipe_id}",
data=update_recipe,
headers=headers,
content_type="multipart/form-data",
)
assert response.content_type == "application/json"
assert response.status_code == RecipeWithoutPreparationMode.code
assert response.json["message"] == RecipeWithoutPreparationMode.message
def test_delete_recipe(client, admin_user):
new_user1 = {
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat May 9 14:44:18 2020
@author: Janice
"""
#%% imports
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd
import numpy as np
import re
from collections import Counter, defaultdict
# import math
# Plotly imports
import plotly.offline as py
py.init_notebook_mode(connected=True)
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import importlib
importlib.import_module("reader")
from reader import (loadAllFeedsFromFile,getStringContents,
getRssArticleDate,smallDict)
# Other imports
# from scipy.misc import imread
from sklearn.feature_extraction.text import TfidfVectorizer
# from sklearn.decomposition import LatentDirichletAllocation TODO delete?
from fuzzywuzzy import process # for Levenshtein Distance calculations
from nltk.tokenize.treebank import TreebankWordDetokenizer
# Suppress annoying deprecation messages from nltk which I'm not going to fix yet
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from tqdm.notebook import tqdm
# Source: https://www.kaggle.com/arthurtok/spooky-nlp-and-topic-modelling-tutorial#3.-Topic-modelling
#%% getDocList
def getDocList(allEntryDict=None, limit = None, reloaddocs= False,
stop_list=None, with_ids=False):
"""
Returns either a list of RSSEntry contents with stop words removed, limited in length
to limit (if set) or, if with_ids is True, the article UIDs are zipped together
with the document contents.
Parameters
----------
allEntryDict : dict, optional
DESCRIPTION. Dictionalry og RSS entries. The default is None.
limit : int, optional
DESCRIPTION. Max number of entries to return. The default is None.
reloaddocs : TYPE, optional
DESCRIPTION. The default is False.
stop_list : list, optional
DESCRIPTION. List of words or phrases which will be removed from all
finished articles. Case insensitive removal. The default is None.
with_ids : bool False
DESCRIPTION. True if a zip of docids and contents should be fetched)
Returns
-------
docs : TYPE
DESCRIPTION.
"""
if reloaddocs or not allEntryDict:
allEntryDict=loadAllFeedsFromFile()
docs=[]
ids=[]
i=0 # use to break out at limit
for key, val in tqdm( allEntryDict.items(), desc="Removing Stop Words"):
i +=1
finalVal=val.collatedContents
if stop_list : #substitute all phrases for ' ' case insensitive
finalVal = removeStopWords(finalVal, stop_words=stop_list)
docs.append(finalVal)
ids.append(key)
if limit and i > limit :
break
return zip(ids,docs) if with_ids else docs
#%% LemmaCountVectorizer
#Subclassing
lemm = WordNetLemmatizer()
class LemmaCountVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(LemmaCountVectorizer, self).build_analyzer()
return lambda doc: (lemm.lemmatize(w) for w in analyzer(doc))
#%% Stop word processing
def getCustomStopWords():
"""
Add any expressions that need to be ignored in addition to the nltk.corpus
stoplist for english
Returns
-------
myStopWords : list
"""
myStopWords = list(set(stopwords.words('english')))
myStopWords.extend(["view", "entire", "post", "twitter","com", "share","story",
"interested", "friends","interested", "would", "also", "rt"
"cipher", "brief", "continue", "reading", "onenewszetu",
"offibiza", "linkthe", "haunting", "blogfor", "live"])
# myStopWords.extend(getCustomStopPhrases())
return myStopWords
#%% getCustomStopPhrases
def getCustomStopPhrases():
"""
Add any expressions that need to be ignored in addition to the nltk.corpus
stoplist for english
Returns
-------
myStopWords : list
"""
myStopWords=["view entire post", "post", "twitter.com","twitter com", "share story",
"interested friends","interested", "would", "also rt",
"cipher brief", "continue reading", "year old", "per cent",
"last week", "first time", "last year", "last month",
"around the world", "year-old", "live blog", "like story share",
"story share", "think friends share", "NewsZetu",
"rt.com", "Z6Mag", "onz6", "friends", "first onworld weekly news",
"first onworld weekly", "onworld weekly news", "offibiza",
"latest news", "blogforum"]
return myStopWords
def removeStopWords(str, stop_words = getCustomStopWords()):
"""
Use NLTK to remove stop words from string
Parameters
----------
str : TYPE
DESCRIPTION. String to process
stop_words : TYPE, optional
DESCRIPTION. The default is set(stopwords.words('english')).
Returns
-------
filtered_sentence : str
DESCRIPTION. str with stop word removed
"""
word_tokens = word_tokenize(str)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
filtered_sentence = []
for w in word_tokens:
if w.lower() not in stop_words:
filtered_sentence.append(w)
return TreebankWordDetokenizer().detokenize(filtered_sentence)
#%% deriveTopicMaps
def deriveTopicMaps(sentences, stopW=getCustomStopWords(), maxNum=30, ngram_range=(3,4)):
"""
Using
with added Lemmatization, derive the given number
of "Topics" from the supplied sentences.
The implicit use of TfidfTransformer additionally scales down the impact
of tokens that occur very frequently in the given corpus and that are hence
empirically less informative than features occuring in a small fraction
of the corpus.
Parameters
----------
sentences : list
list of articles from corpus
stopW : List of stop words
DESCRIPTION. The default is getCustomStopWords() which is based
on "english" plus other noisy stuff.
maxNum : int, optional
DESCRIPTION. The maximum nr. of topics to generate (default is 30)
Returns
-------
zipped : list of word:frequency tuples
DESCRIPTION.
"""
vectorizer = LemmaCountVectorizer(max_df=0.85,
min_df=3,
ngram_range=ngram_range, #short phrases seem to work better than single words
max_features=maxNum,
stop_words=stopW
)
sentence_transform = vectorizer.fit_transform(sentences)
feature_names = vectorizer.get_feature_names()
count_vec = np.asarray(sentence_transform.sum(axis=0)).ravel()
zipped = list(zip(feature_names, count_vec))
return zipped
#%% unzipLeftSide
def unzipLeftSide(iterable):
return zip(*iterable).__next__()
#%% updateDictionaryByFuzzyRelevanceofTopics
def updateDictionaryByFuzzyRelevanceofTopics(topic_list, allEntryDict, limit = None, threshold=75, remove=False):
"""
Add list of topics to each entry of the given allEntryDict for each topic
that has a fuzzy relevance of greater than the specified threshold
Parameters
----------
topic_list : list of tuple (topic , weight)
DESCRIPTION. List of topics and their relevance for the whole corpus
allEntryDict : dict, optional
DESCRIPTION. Dictionalry og RSS entries. The default is None.
limit : int, optional
DESCRIPTION. Max number of entries to return. The default is None.
reloaddocs : TYPE, optional
DESCRIPTION. The default is False.
threshold : float value for assigning relevant topics
Returns
-------
void
"""
topics=unzipLeftSide(topic_list) #just get the phrases)
toBeRemoved=[]
if not allEntryDict:
allEntryDict=loadAllFeedsFromFile()
i=0 # use to break out at limit
for key, val in tqdm(allEntryDict.items(), desc='Fuzzy Relevance Testing'):
html=""
if hasattr(val , "content"):
for line in val.content:
html = html + " " + line.value
elif hasattr(val , "summary_detail"):
html = val.summary_detail.value
else:
continue
i +=1
finalVal = val.title +" " + getStringContents(html)
# import pdb
# pdb.set_trace()
try:
matchedTopics=process.extract(finalVal,topics)
except:
toBeRemoved.append(key)
# print("An exception occurred with:", key)
goodTops = [tupl for tupl in matchedTopics if tupl[1] > threshold]
if len(goodTops) > 0:
val["topiclist"]=goodTops
else:
val["topiclist"]=None
toBeRemoved.append(key)
if limit and i > limit :
break
if remove:#remove non topics from dict
for gone in tqdm(toBeRemoved, desc="Removing documents"):
try:
allEntryDict.pop(gone)
except:
continue
# print ("removal of", key, "not possible")
return toBeRemoved
#%% simpleTopicDisplay Histogram TODO remove, unused?
# def simpleTopicDisplay(ax,topnames,topNumbers):
# topList = pd.DataFrame({"Topics": topnames,
# "Frequency":topNumbers
# })
# topList = topList.sort_values('Frequency',ascending=True).reset_index()
# # Plot the total crashes
# # sns.set_color_codes("pastel")
# cmap = sns.cubehelix_palette (40, dark = .3, light=.8,start=0.9,
# rot=-1.0,gamma=0.8, as_cmap=False)
# sns.barplot(y="Topics", x="Frequency", data=tagList,
# label="Tags", palette=cmap)
# # Add a legend and informative axis label
# ax.legend(ncol=2, loc="lower right", frameon=True)
# ax.set(xlim=(0, max(topNumbers)+5), xlabel="",
# ylabel="Topics designated to articles")
# plt.title("Topic Frequency Overall", fontsize=20)
# return
#%% displayTopicsAndFeeds
def displayTopicsAndFeeds(allItemDict, numTopics=30):
sns.set()
sns.set(rc={'figure.figsize':(14,5+numTopics*0.35)})
plt.xticks(rotation=45, horizontalalignment='right')
feedTuple=getAllFeedTopics(allItemDict)
feeds=[]
allTopics=getAllTopics(allItemDict)
c_Topics=Counter(allTopics)
topN=c_Topics.most_common(numTopics)
Topicnames=[item[0] for item in topN]
for feed,nrTopics in feedTuple[0].items():
feeds.append(feed)
matr=np.zeros( (len(feeds),len(Topicnames) ) )
df = pd.DataFrame(data= matr, columns=Topicnames, index=feeds)
populateTopicMatrix(allItemDict, df)
df2=makeTopicMatrix(df)
sns.set_context("paper", font_scale=1.0)
cmap = sns.cubehelix_palette (10, dark = .3, light=.8,start=0.9, rot=-1.0,gamma=0.8, as_cmap=True)
ax = sns.scatterplot(data=df2,x="Feeds", y="Topics", size="Number",
hue="Number",sizes=(100,300), markers = False, palette=cmap)
ax.tick_params(labelsize=12)
plt.title("Topic Usage in RSS Feeds", fontsize=20)
plt.tight_layout()
plt.show()
return
#%% displayTopicsAndFeeds2 TODO delete, unused?
# def displayTopicsAndFeeds2(allItemDict, numTopics=30):
# sns.set()
# # plt.xticks(rotation=60)
# # plt.figure(figsize=(50,100))
# sns.set(rc={'figure.figsize':(14,5+numTopics*0.75)})
# plt.xticks(rotation=45, horizontalalignment='right')
# feedTuple=getAllFeedTopics(allItemDict)
# feeds=[]
# allTopics=getAllTopics(allItemDict)
# c_Topics=Counter(allTopics)
# topnames=[item[0] for item in c_Topics]
# topNumbers=[item[1] for item in c_Topics]
# allTopics=getAllTopics(allItemDict)
# c_Topics=Counter(allTopics)
# topN=c_Topics.most_common(numTopics)
# Topicnames=[item[0] for item in topN]
# for feed,nrTopics in feedTuple[0].items():
# feeds.append(feed)
# fig = plt.figure()
# ax = fig.add_subplot(211)
# widg1=simpleTopicDisplay(ax,topnames,topNumbers)
# matr=np.zeros( (len(feeds),len(Topicnames) ) )
# df = pd.DataFrame(data= matr, columns=Topicnames, index=feeds)
# populateTopicMatrix(allItemDict, df)
# df2=makeTopicMatrix(df)
# sns.set_context("paper", font_scale=1.0)
# # sns.set_style("whitegrid", {'axes.grid' : False})
# # cmap = sns.cubehelix_palette (dark = .3, light=.8, as_cmap=True)
# cmap = sns.cubehelix_palette (10, dark = .3, light=.8,start=0.9, rot=-1.0,gamma=0.8, as_cmap=True)
# ax2 = fig.add_subplot(212)
# ax = sns.scatterplot(data=df2,x="Feeds", y="Topics", size="Number", ax=ax2,
# hue="Number",sizes=(100,300), markers = False, palette=cmap)
# ax2.tick_params(labelsize=12)
# plt.title("Topic Usage in RSS Feeds", fontsize=20)
# plt.tight_layout()
# plt.show()
# return
#%% populateTopicMatrix
def populateTopicMatrix(allDocDict, feedTopicMatrix):
"""
Calculate from allDocDict how many of the specified topics occur for each
FeedItem of the named feeds in feedTopicMatrix, summing them in the
x.y position (Feed, Tagname) in the given matrix
Parameters
----------
allDocDict : TYPE
DESCRIPTION.
feedTopicMatrix : TYPE
DESCRIPTION.
Returns
-------
None.
"""
for key, val in allDocDict.items():
if hasattr(val , "topiclist") and val.topiclist:
for topicItem in val.topiclist:
topic = topicItem[0]
if topic in feedTopicMatrix.columns and val.feed_name in feedTopicMatrix.index:
feedTopicMatrix[topic][val.feed_name] +=1
return
#%% preProcessDocs
def preProcessDocs(docList):
"""
Remove stop words and phrases
Parameters
----------
dict : List
DESCRIPTION. list of document contents
Parameters
----------
docList : TYPE
DESCRIPTION.
Returns
-------
newDocList : TYPE
DESCRIPTION.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.