code
stringlengths 1
199k
|
|---|
from JoeAgent import job, event
import db_interface
import os, os.path
import logging
import log_parser
LINEINCR = 30
log = logging.getLogger("agent.LogReader")
class ReadLogCompleteEvent(event.Event):
"""Event to indicate the file is completely read. This event will
be caught by the FindLogJob that is watching it. The file will
continue to be checked for modifications"""
pass
class ReadLogContinueEvent(event.Event):
"""Event to indicate we should continue reading the file. Log file
processing will be done in chunks so as not to block the agent for
too long."""
pass
class ReadLogJob(job.Job):
def __init__(self, agent_obj, logfile):
job.Job.__init__(self, agent_obj)
assert os.path.isfile(logfile), "Not a file: %s" % str(logfile)
self._log_size = os.stat(logfile).st_size
log.debug("Log size is %d" % self._log_size)
self._logfile_path = logfile
self._logfile_hndl = open(logfile, 'r')
self._progress = 0 # Data read from file
self._db = db_interface.getDB()
def getFilePath(self):
return self._logfile_path
def getBytesRead(self):
return self._progress
def getBytesTotal(self):
return self._log_size
def run(self):
evt = ReadLogContinueEvent(self)
self.getAgent().addEvent(evt)
def notify(self, evt):
job.Job.notify(self, evt)
if isinstance(evt, ReadLogContinueEvent) and evt.getSource() == self:
log.debug("Continuing read of file")
# Continue to read the log
try:
self._progress += log_parser.read_log(
self._logfile_hndl, self._db, LINEINCR)
log.debug("Read %d %% of file (%d / %d)" % (self.getProgress(),
self._progress,
self._log_size))
except log_parser.EndOfLogException, e:
self._progress = self._log_size
# Log file is complete, updated the db entry
self._mark_complete()
# Add an event to notify that the file is complete
self._logfile_hndl.close()
new_evt = ReadLogCompleteEvent(self)
self.getAgent().addEvent(new_evt)
except log_parser.InvalidLogException, e:
log.warning("Invalid log file: %s" % str(e))
self._logfile_hndl.close()
new_evt = ReadLogCompleteEvent(self)
self.getAgent().addEvent(new_evt)
else:
# Add an event to continue reading
new_evt = ReadLogContinueEvent(self)
self.getAgent().addEvent(new_evt)
def _update_db(self):
"""Update the entry in the database for this logfile"""
log.debug("Updating file %s" % self._logfile_path)
pass
def _mark_invalid(self):
"""Update the database to indicate that this is not a valid log file"""
log.debug("Marking file %s invalid" % self._logfile_path)
pass
def _mark_complete(self):
log.debug("Marking file %s complete" % self._logfile_path)
pass
def getProgress(self):
"""Return a percentage complete value"""
if self._log_size == 0:
return 0
return int((float(self._progress) / self._log_size) * 100)
|
@app.route('/job/<name>')
def results(name):
job = saliweb.frontend.get_completed_job(name,
flask.request.args.get('passwd'))
# Determine whether the job completed successfully
if os.path.exists(job.get_path('output.pdb')):
template = 'results_ok.html'
else:
template = 'results_failed.html'
return saliweb.frontend.render_results_template(template, job=job)
|
__doc__ = ""
__version__ = "1.0.0"
__author__ = "Fabien Marteau <fabien.marteau@armadeus.com>"
import re
from periphondemand.bin.utils.wrapperxml import WrapperXml
from periphondemand.bin.utils.error import Error
DESTINATION = ["fpga","driver","both"]
PUBLIC = ["true","false"]
class Generic(WrapperXml):
""" Manage generic instance value
"""
def __init__(self,parent,**keys):
""" init Generic,
__init__(self,parent,node)
__init__(self,parent,nodestring)
__init__(self,parent,name)
"""
self.parent=parent
if "node" in keys:
self.__initnode(keys["node"])
elif "nodestring" in keys:
self.__initnodestring(keys["nodestring"])
elif "name" in keys:
self.__initname(keys["name"])
else:
raise Error("Keys unknown in Generic init()",0)
def __initnode(self,node):
WrapperXml.__init__(self,node=node)
def __initnodestring(self,nodestring):
WrapperXml.__init__(self,nodestring=nodestring)
def __initname(self,name):
WrapperXml.__init__(self,nodename="generic")
self.setName(name)
def getOp(self):
return self.getAttributeValue("op")
def setOp(self,op):
self.setAttribute("op",op)
def getTarget(self):
return self.getAttributeValue("target")
def setTarget(self,target):
self.setAttribute("target",target)
def isPublic(self):
if self.getAttributeValue("public")=="true":
return "true"
else:
return "false"
def setPublic(self,public):
public = public.lower()
if not public in PUBLIC:
raise Error("Public value "+str(public)+" wrong")
self.setAttribute("public",public)
def getType(self):
the_type = self.getAttributeValue("type")
if the_type == None:
raise Error("Generic "+self.getName()+\
" description malformed, type must be defined",0)
else:
return the_type
def setType(self,type):
self.setAttribute("type",type)
def getMatch(self):
try:
return self.getAttributeValue("match").encode("utf-8")
except AttributeError:
return None
def setMatch(self,match):
self.setAttribute("match",match)
def getValue(self):
""" return the generic value
"""
component = self.getParent()
if self.getOp() == None:
return self.getAttributeValue("value")
else:
target = self.getTarget().split(".")
if self.getOp() == "realsizeof":
# return the number of connected pin
return str(int(component.getInterface(target[0]).getPort(target[1]).getMaxPinNum())+1)
else:
raise Error("Operator unknown "+self.getOp(),1)
def setValue(self,value):
if self.getMatch() == None:
self.setAttribute("value",value)
elif re.compile(self.getMatch()).match(value):
self.setAttribute("value",value)
else:
raise Error("Value doesn't match for attribute "+str(value),0)
def getDestination(self):
""" return the generic destination (fpga,driver or both)
"""
return self.getAttributeValue("destination")
def setDestination(self,destination):
destination = destination.lower()
if not destination in DESTINATION:
raise Error("Destination value "+str(destination)+\
" unknown")
self.setAttribute("destination",destination)
|
import logging
from ..DataUploader import Plugin as DataUploaderPlugin
from .reader import AndroidReader, AndroidStatsReader
from ...common.interfaces import AbstractPlugin
try:
from volta.core.core import Core as VoltaCore
except Exception:
raise RuntimeError("Please install volta. https://github.com/yandex-load/volta")
logger = logging.getLogger(__name__)
class Plugin(AbstractPlugin):
SECTION = "android"
SECTION_META = "meta"
def __init__(self, core, cfg, name):
self.stats_reader = None
self.reader = None
super(Plugin, self).__init__(core, cfg, name)
self.device = None
try:
self.cfg = cfg['volta_options']
for key, value in self.cfg.items():
if not isinstance(value, dict):
logger.debug('Malformed VoltaConfig key: %s value %s', key, value)
raise RuntimeError('Malformed VoltaConfig passed, key: %s. Should by dict' % key)
except AttributeError:
logger.error('Failed to read Volta config', exc_info=True)
self.volta_core = VoltaCore(self.cfg)
@staticmethod
def get_key():
return __file__
def get_available_options(self):
opts = ["volta_options"]
return opts
def configure(self):
self.volta_core.configure()
def get_reader(self):
if self.reader is None:
self.reader = AndroidReader()
return self.reader
def get_stats_reader(self):
if self.stats_reader is None:
self.stats_reader = AndroidStatsReader()
return self.stats_reader
def prepare_test(self):
self.core.add_artifact_file(self.volta_core.currents_fname)
[self.core.add_artifact_file(fname) for fname in self.volta_core.event_fnames.values()]
def start_test(self):
try:
self.volta_core.start_test()
# FIXME raise/catch appropriate exception here
except: # noqa: E722
logger.info('Failed to start test of Android plugin', exc_info=True)
return 1
def is_test_finished(self):
try:
if hasattr(self.volta_core, 'phone'):
if hasattr(self.volta_core.phone, 'test_performer'):
if not self.volta_core.phone.test_performer:
logger.warning('There is no test performer process on the phone, interrupting test')
return 1
if not self.volta_core.phone.test_performer.is_finished():
logger.debug('Waiting for phone test to finish...')
return -1
else:
return self.volta_core.phone.test_performer.retcode
# FIXME raise/catch appropriate exception here
except: # noqa: E722
logger.error('Unknown exception of Android plugin. Interrupting test', exc_info=True)
return 1
def end_test(self, retcode):
try:
self.volta_core.end_test()
uploaders = self.core.get_plugins_of_type(DataUploaderPlugin)
for uploader in uploaders:
response = uploader.lp_job.api_client.link_mobile_job(
lp_key=uploader.lp_job.number,
mobile_key=self.volta_core.uploader.jobno
)
logger.info(
'Linked mobile job %s to %s for plugin: %s. Response: %s',
self.volta_core.uploader.jobno, uploader.lp_job.number, uploader.backend_type, response
)
# FIXME raise/catch appropriate exception here
except: # noqa: E722
logger.error('Failed to complete end_test of Android plugin', exc_info=True)
retcode = 1
return retcode
def get_info(self):
return AndroidInfo()
def post_process(self, retcode):
try:
self.volta_core.post_process()
# FIXME raise/catch appropriate exception here
except: # noqa: E722
logger.error('Failed to complete post_process of Android plugin', exc_info=True)
retcode = 1
return retcode
class AndroidInfo(object):
def __init__(self):
self.address = ''
self.port = 80
self.ammo_file = ''
self.duration = 0
self.loop_count = 1
self.instances = 1
self.rps_schedule = ''
|
"""
Linux installation
"""
import os
import re
import time
import libvirt
import oz.Guest
import oz.OzException
class LinuxCDGuest(oz.Guest.CDGuest):
"""
Class for Linux installation.
"""
def __init__(self, tdl, config, auto, output_disk, nicmodel, diskbus,
iso_allowed, url_allowed, macaddress, useuefi):
oz.Guest.CDGuest.__init__(self, tdl, config, auto, output_disk,
nicmodel, None, None, diskbus, iso_allowed,
url_allowed, macaddress, useuefi)
def _test_ssh_connection(self, guestaddr):
"""
Internal method to test out the ssh connection before we try to use it.
Under systemd, the IP address of a guest can come up and reportip can
run before the ssh key is generated and sshd starts up. This check
makes sure that we allow an additional 30 seconds (1 second per ssh
attempt) for sshd to finish initializing.
"""
count = 30
success = False
while count > 0:
try:
self.log.debug("Testing ssh connection, try %d", count)
start = time.time()
self.guest_execute_command(guestaddr, 'ls', timeout=1)
self.log.debug("Succeeded")
success = True
break
except oz.ozutil.SubprocessException:
# ensure that we spent at least one second before trying again
end = time.time()
if (end - start) < 1:
time.sleep(1 - (end - start))
count -= 1
if not success:
self.log.debug("Failed to connect to ssh on running guest")
raise oz.OzException.OzException("Failed to connect to ssh on running guest")
def get_default_runlevel(self, g_handle):
"""
Function to determine the default runlevel based on the /etc/inittab.
"""
runlevel = "3"
if g_handle.exists('/etc/inittab'):
lines = g_handle.cat('/etc/inittab').split("\n")
for line in lines:
if re.match('id:', line):
runlevel = line.split(':')[1]
break
return runlevel
def guest_execute_command(self, guestaddr, command, timeout=10):
"""
Method to execute a command on the guest and return the output.
"""
# ServerAliveInterval protects against NAT firewall timeouts
# on long-running commands with no output
#
# PasswordAuthentication=no prevents us from falling back to
# keyboard-interactive password prompting
#
# -F /dev/null makes sure that we don't use the global or per-user
# configuration files
return oz.ozutil.subprocess_check_output(["ssh", "-i", self.sshprivkey,
"-F", "/dev/null",
"-o", "ServerAliveInterval=30",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=" + str(timeout),
"-o", "UserKnownHostsFile=/dev/null",
"-o", "PasswordAuthentication=no",
"-o", "IdentitiesOnly yes",
"root@" + guestaddr, command],
printfn=self.log.debug)
def guest_live_upload(self, guestaddr, file_to_upload, destination,
timeout=10):
"""
Method to copy a file to the live guest.
"""
self.guest_execute_command(guestaddr,
"mkdir -p " + os.path.dirname(destination),
timeout)
# ServerAliveInterval protects against NAT firewall timeouts
# on long-running commands with no output
#
# PasswordAuthentication=no prevents us from falling back to
# keyboard-interactive password prompting
#
# -F /dev/null makes sure that we don't use the global or per-user
# configuration files
return oz.ozutil.subprocess_check_output(["scp", "-i", self.sshprivkey,
"-F", "/dev/null",
"-o", "ServerAliveInterval=30",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=" + str(timeout),
"-o", "UserKnownHostsFile=/dev/null",
"-o", "PasswordAuthentication=no",
"-o", "IdentitiesOnly yes",
file_to_upload,
"root@" + guestaddr + ":" + destination],
printfn=self.log.debug)
def _customize_files(self, guestaddr):
"""
Method to upload the custom files specified in the TDL to the guest.
"""
self.log.info("Uploading custom files")
for name, fp in list(self.tdl.files.items()):
# all of the self.tdl.files are named temporary files; we just need
# to fetch the name out and have scp upload it
self.guest_live_upload(guestaddr, fp.name, name)
def _shutdown_guest(self, guestaddr, libvirt_dom):
"""
Method to shutdown the guest (gracefully at first, then with prejudice).
"""
if guestaddr is not None:
# sometimes the ssh process gets disconnected before it can return
# cleanly (particularly when the guest is running systemd). If that
# happens, ssh returns 255, guest_execute_command throws an
# exception, and the guest is forcibly destroyed. While this
# isn't the end of the world, it isn't desirable. To avoid
# this, we catch any exception thrown by ssh during the shutdown
# command and throw them away. In the (rare) worst case, the
# shutdown will not have made it to the guest and we'll have to wait
# 90 seconds for wait_for_guest_shutdown to timeout and forcibly
# kill the guest.
try:
self.guest_execute_command(guestaddr, 'shutdown -h now')
except Exception:
pass
try:
if not self._wait_for_guest_shutdown(libvirt_dom):
self.log.warning("Guest did not shutdown in time, going to kill")
else:
libvirt_dom = None
except Exception:
self.log.warning("Failed shutting down guest, forcibly killing")
if libvirt_dom is not None:
try:
libvirt_dom.destroy()
except libvirt.libvirtError:
# the destroy failed for some reason. This can happen if
# _wait_for_guest_shutdown times out, but the domain shuts
# down before we get to destroy. Check to make sure that the
# domain is gone from the list of running domains; if so, just
# continue on; if not, re-raise the error.
for domid in self.libvirt_conn.listDomainsID():
if domid == libvirt_dom.ID():
raise
def _collect_setup(self, libvirt_xml): # pylint: disable=unused-argument
"""
Default method to set the guest up for remote access.
"""
raise oz.OzException.OzException("ICICLE generation and customization is not implemented for guest %s" % (self.tdl.distro))
def _collect_teardown(self, libvirt_xml): # pylint: disable=unused-argument
"""
Method to reverse the changes done in _collect_setup.
"""
raise oz.OzException.OzException("ICICLE generation and customization is not implemented for guest %s" % (self.tdl.distro))
def _install_packages(self, guestaddr, packstr): # pylint: disable=unused-argument
"""
Internal method to install packages; expected to be overriden by
child classes.
"""
raise oz.OzException.OzException("Customization is not implemented for guest %s" % (self.tdl.distro))
def _customize_repos(self, guestaddr): # pylint: disable=unused-argument
"""
Internal method to customize repositories; expected to be overriden by
child classes.
"""
raise oz.OzException.OzException("Customization is not implemented for guest %s" % (self.tdl.distro))
def _remove_repos(self, guestaddr): # pylint: disable=unused-argument
"""
Internal method to remove repositories; expected to be overriden by
child classes.
"""
raise oz.OzException.OzException("Repository removal not implemented for guest %s" % (self.tdl.distro))
def do_customize(self, guestaddr):
"""
Method to customize by installing additional packages and files.
"""
if not self.tdl.packages and not self.tdl.files and not self.tdl.commands:
# no work to do, just return
return
self._customize_repos(guestaddr)
for cmd in self.tdl.precommands:
self.guest_execute_command(guestaddr, cmd.read())
self.log.debug("Installing custom packages")
packstr = ''
for package in self.tdl.packages:
packstr += '"' + package.name + '" '
if packstr != '':
self._install_packages(guestaddr, packstr)
self._customize_files(guestaddr)
self.log.debug("Running custom commands")
for cmd in self.tdl.commands:
self.guest_execute_command(guestaddr, cmd.read())
self.log.debug("Removing non-persisted repos")
self._remove_repos(guestaddr)
self.log.debug("Syncing")
self.guest_execute_command(guestaddr, 'sync')
def do_icicle(self, guestaddr):
"""
Default method to collect the package information and generate the
ICICLE XML.
"""
raise oz.OzException.OzException("ICICLE generation is not implemented for this guest type")
def _internal_customize(self, libvirt_xml, action):
"""
Internal method to customize and optionally generate an ICICLE for the
operating system after initial installation.
"""
# the "action" input is actually a tri-state:
# action = "gen_and_mod" means to generate the icicle and to
# potentially make modifications
# action = "gen_only" means to generate the icicle only, and not
# look at any modifications
# action = "mod_only" means to not generate the icicle, but still
# potentially make modifications
self.log.info("Customizing image")
if not self.tdl.packages and not self.tdl.files and not self.tdl.commands:
if action == "mod_only":
self.log.info("No additional packages, files, or commands to install, and icicle generation not requested, skipping customization")
return
elif action == "gen_and_mod":
# It is actually possible to get here with a "gen_and_mod"
# action but a TDL that contains no real customizations.
# In the "safe ICICLE" code below it is important to know
# when we are truly in a "gen_only" state so we modify
# the action here if we detect that ICICLE generation is the
# only task to be done.
self.log.debug("Asked to gen_and_mod but no mods are present - changing action to gen_only")
action = "gen_only"
# when doing an oz-install with -g, this isn't necessary as it will
# just replace the port with the same port. However, it is very
# necessary when doing an oz-customize since the serial port might
# not match what is specified in the libvirt XML
modified_xml = self._modify_libvirt_xml_for_serial(libvirt_xml)
if action == "gen_only" and self.safe_icicle_gen:
# We are only generating ICICLE and the user has asked us to do
# this without modifying the completed image by booting it.
# Create a copy on write snapshot to use for ICICLE
# generation - discard when finished
cow_diskimage = self.diskimage + "-icicle-snap.qcow2"
self._internal_generate_diskimage(force=True,
backing_filename=self.diskimage,
image_filename=cow_diskimage)
modified_xml = self._modify_libvirt_xml_diskimage(modified_xml, cow_diskimage, 'qcow2')
self._collect_setup(modified_xml)
icicle = None
try:
libvirt_dom = self.libvirt_conn.createXML(modified_xml, 0)
try:
guestaddr = None
guestaddr = self._wait_for_guest_boot(libvirt_dom)
self._test_ssh_connection(guestaddr)
if action == "gen_and_mod":
self.do_customize(guestaddr)
icicle = self.do_icicle(guestaddr)
elif action == "gen_only":
icicle = self.do_icicle(guestaddr)
elif action == "mod_only":
self.do_customize(guestaddr)
else:
raise oz.OzException.OzException("Invalid customize action %s; this is a programming error" % (action))
finally:
if action == "gen_only" and self.safe_icicle_gen:
# if this is a gen_only and safe_icicle_gen, there is no
# reason to wait around for the guest to shutdown; we'll
# be removing the overlay file anyway. Just destroy it
libvirt_dom.destroy()
else:
self._shutdown_guest(guestaddr, libvirt_dom)
finally:
if action == "gen_only" and self.safe_icicle_gen:
# no need to teardown because we simply discard the file
# containing those changes
os.unlink(cow_diskimage)
else:
self._collect_teardown(modified_xml)
return icicle
def customize(self, libvirt_xml):
"""
Method to customize the operating system after installation.
"""
return self._internal_customize(libvirt_xml, "mod_only")
def customize_and_generate_icicle(self, libvirt_xml):
"""
Method to customize and generate the ICICLE for an operating system
after installation. This is equivalent to calling customize() and
generate_icicle() back-to-back, but is faster.
"""
return self._internal_customize(libvirt_xml, "gen_and_mod")
def generate_icicle(self, libvirt_xml):
"""
Method to generate the ICICLE from an operating system after
installation. The ICICLE contains information about packages and
other configuration on the diskimage.
"""
return self._internal_customize(libvirt_xml, "gen_only")
|
""" Loaders plugin manager """
from __future__ import print_function
import os.path
from fife import fife
from fife.extensions.serializers.xmlmap import XMLMapLoader
mapFileMapping = { 'xml' : XMLMapLoader}
fileExtensions = set(['xml'])
def loadMapFile(path, engine, callback=None, debug=True, extensions={}):
""" load map file and get (an optional) callback if major stuff is done:
- map creation
- parsed imports
- parsed layers
- parsed cameras
the callback will send both a string and a float (which shows
the overall process), callback(string, float)
@type engine: object
@param engine: FIFE engine instance
@type callback: function
@param callback: callback for maploading progress
@type debug: bool
@param debug: flag to activate / deactivate print statements
@rtype object
@return FIFE map object
"""
(filename, extension) = os.path.splitext(path)
map_loader = mapFileMapping[extension[1:]](engine, callback, debug, extensions)
map = map_loader.loadResource(path)
if debug: print("--- Loading map took: ", map_loader.time_to_load, " seconds.")
return map
def addMapLoader(fileExtension, loaderClass):
"""Add a new loader for fileextension
@type fileExtension: string
@param fileExtension: The file extension the loader is registered for
@type loaderClass: object
@param loaderClass: A fife.ResourceLoader implementation that loads maps
from files with the given fileExtension
"""
mapFileMapping[fileExtension] = loaderClass
_updateMapFileExtensions()
def _updateMapFileExtensions():
global fileExtensions
fileExtensions = set(mapFileMapping.keys())
|
from __future__ import print_function
from __future__ import absolute_import
from enigma import *
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from . crossepglib import *
from . crossepg_downloader import CrossEPG_Downloader
from . crossepg_importer import CrossEPG_Importer
from . crossepg_converter import CrossEPG_Converter
from . crossepg_loader import CrossEPG_Loader
from . crossepg_setup import CrossEPG_Setup
from . crossepg_menu import CrossEPG_Menu
from . crossepg_auto import CrossEPG_Auto
class CrossEPG_Main:
def __init__(self):
self.config = CrossEPG_Config()
self.patchtype = getEPGPatchType()
def downloader(self, session):
self.session = session
CrossEPG_Auto.instance.lock = True
CrossEPG_Auto.instance.stop()
self.config.load()
if self.config.configured == 0:
self.session.openWithCallback(self.configureCallback, MessageBox, _("You need to configure crossepg before starting downloader.\nWould You like to do it now ?"), type=MessageBox.TYPE_YESNO)
else:
self.config.deleteLog()
self.session.openWithCallback(self.downloadCallback, CrossEPG_Downloader, self.config.providers)
def configureCallback(self, result):
if result is True:
self.session.open(CrossEPG_Setup)
def loaderAsPlugin(self, session):
self.session = session
CrossEPG_Auto.instance.lock = True
CrossEPG_Auto.instance.stop()
self.loader()
def downloadCallback(self, ret):
if ret:
if self.config.csv_import_enabled == 1:
self.importer()
else:
if self.patchtype != 3:
self.converter()
else:
self.loader()
else:
CrossEPG_Auto.instance.lock = False
def importer(self):
self.session.openWithCallback(self.importerCallback, CrossEPG_Importer)
def importerCallback(self, ret):
if ret:
if self.patchtype != 3:
self.converter()
else:
self.loader()
else:
CrossEPG_Auto.instance.lock = False
def converter(self):
self.session.openWithCallback(self.converterCallback, CrossEPG_Converter)
def converterCallback(self, ret):
if ret:
if self.patchtype != -1:
self.loader()
else:
if self.config.download_manual_reboot:
self.session.open(TryQuitMainloop, 3)
else:
CrossEPG_Auto.instance.lock = False
else:
CrossEPG_Auto.instance.lock = False
def loader(self):
self.session.openWithCallback(self.loaderCallback, CrossEPG_Loader)
def loaderCallback(self, ret):
CrossEPG_Auto.instance.lock = False
def setup(self, session, **kwargs):
CrossEPG_Auto.instance.lock = True
session.openWithCallback(self.setupCallback, CrossEPG_Menu)
def setupCallback(self):
CrossEPG_Auto.instance.lock = False
CrossEPG_Auto.instance.doneConfiguring()
crossepg_main = CrossEPG_Main()
|
"""
cNotify package provides three main concepts: I{L{signals <signal>}}, I{L{conditions
<condition>}} and I{L{variables <variable>}}. Signals are basically lists of callables
that can be I{emitted} and then will call all contained callables (I{handler} of a signal)
in turn. Conditions are boolean values complemented with a signal that is emitted when
condition’s I{state} changes. Variables are akin to conditions but can hold arbitrary
I{values}, not just booleans. Conditions, unlike variables, can also be combined using
standard logic operators, like negation, conjunction and so on.
All three concepts provide separation between providers (writers, setters) and listeners
(readers, getters) of some entity. Conditions and variables make the entity explicit—it
is a boolean state for the former and arbitrary Python object for the latter (though
derived variable classes can restrict the set of allowed values.)
Here is a quick example:
>>> from cnotify.variable import *
... name = Variable ()
...
... import sys
... name.changed.connect (
... lambda string: sys.stdout.write ('Hello there, %s!\\n' % string))
...
... name.value = 'Chuk'
Note that when setting the C{name} variable, you don’t need to know who, if anyone,
listens to changes to it. Interested parties take care to express their interest
themselves and are informed upon a change automatically.
Here is a little more elaborate example with the same functionality (it requires U{PyGTK
<http://pygtk.org/>}):
>>> from cnotify.variable import *
... import gtk
...
... name = Variable ()
...
... def welcome_user (name_string):
... dialog = gtk.MessageDialog (None, 0, gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
... 'Hello there, %s!' % name_string)
... dialog.run ()
... dialog.destroy ()
...
... name.changed.connect (welcome_user)
...
... def set_name_from_entry (entry):
... name.value = entry.get_text ()
...
... window = gtk.Window ()
... window.set_title ('Enter name')
...
... entry = gtk.Entry ()
... entry.show ()
... window.add (entry)
...
... entry.connect ('activate', set_name_from_entry)
... window.connect ('destroy', lambda window: gtk.main_quit ())
...
... window.present ()
...
... gtk.main ()
Note that C{window} knows absolutely nothing about how changes to C{name} variable are
handled. If you play with this example, you will notice one thing: pressing C{Enter} in
the main window twice doesn’t pop the welcoming dialog twice. That is because both
conditions and variables emit their ‘changed’ signal I{only} when their state/value
actually changes, not on every assignment.
Now a final, quite complicated, example introducing conditions and some other features:
>>> from cnotify.all import *
...
... pilots = Variable ()
... fuel = Variable ()
...
... import sys
...
... pilots.changed.connect (
... lambda pilots: sys.stdout.write ('Pilots are %s\\n' % pilots))
... fuel.changed.connect (
... lambda amount: sys.stdout.write ('Got %d litres of fuel\\n' % amount))
...
... def ready_state_changed (ready):
... if ready:
... sys.stdout.write ('Ready to get off!\\n')
... else:
... sys.stdout.write ('Missing pilots or fuel\\n')
...
... ready = pilots.is_true () & fuel.predicate (lambda amount: amount > 0)
... ready.store (ready_state_changed)
...
... pilots.value = 'Jen and Jim'
... fuel.value = 500
...
... fuel.value = 0
First line of example shows a way to save typing by importing all package contents at
once. Whether to use this technique is up to you. Following lines up to C{ready = ...}
should be familiar.
Now let’s consider that assignment closer. First, C{L{pilots.is_true ()
<variable.AbstractVariable.is_true>}} code creates a condition that is true depending on
C{pilots} value (true for non-empty sequences in our case.) It is just a convenience
wrapper over C{L{AbstractVariable.predicate <variable.AbstractVariable.predicate>}}
method. Now, the latter is also used directly in this line of code. It creates a
condition that is true as long as variable’s value conforms to the passed in predicate.
In particular, C{fuel.predicate (lambda amount: amount > 0)} creates a condition that is
true if C{fuel}’s value is greater than zero. Predicate conditions will recompute their
state each time variable’s value changes and that’s the point in using them.
Finally, two just constructed conditions are combined into a third condition using ‘and’
operator (C{&}). This third condition will be true if and only if I{both} its term
conditions are true. Conditions support four logic operations: negation, conjunction,
disjunction and xoring (with these operators: C{~}, C{&}, C{|} and C{^}.) In addition,
each condition has C{L{if_else <condition.AbstractCondition.if_else>}} method, which is
much like Python’s C{if} operator.
The next line introduces one more new method: C{L{store
<base.AbstractValueObject.store>}}. It is really just like connecting its only argument
to the ‘changed’ signal, except that it is also called once with the current state of the
condition (or value of a variable.)
The example should produce this output::
Missing pilots or fuel
Pilots are Jen and Jim
Got 500 litres of fuel
Ready to get off!
Got 0 litres of fuel
Missing pilots or fuel
Notable here is the output from C{ready_state_changed} function. It is called once at the
beginning from the C{store} method with the state of C{ready} condition (then C{False}.)
Both later calls correspond to changes in C{ready}’s state. When both C{pilots} and
C{fuel} variables are set, corresponding predicate conditions become true and so does the
C{ready} condition. However, when one of the predicate conditions becomes false (as the
result of C{fuel} being set to zero), C{ready} turns false again. Note that
C{ready_state_changed} is not called in between of setting C{pilots} and C{fuel} variable.
C{ready} state is recomputed, but since it remains the same, ‘changed’ signal is not
emitted.
G{packagetree}
"""
__docformat__ = 'epytext en'
__version__ = '0.3.2.1'
"""
Version of Py-cnotify, as a string.
"""
version_tuple = (0, 3, 2, 1)
"""
Version of Py-cnotify, as a tuple of integers. It is guaranteed that version tuples of
later versions will compare greater that those of earlier versions.
"""
|
import sys
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
]
templates_path = ['_templates']
source_suffix = ['.rst']
master_doc = 'index'
project = 'Backend.AI API Documentation'
copyright = '2015-2020, Lablup Inc.'
author = 'Lablup Inc.'
version = 'v5.20191215'
release = '20.03'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'tango'
highlight_language = 'python3'
todo_include_todos = False
numfig = True
intersphinx_mapping = {
'client-py':
('https://client-py.docs.backend.ai/en/latest/', None),
}
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'BackendAIAPIDoc'
latex_elements = {
}
latex_documents = [
(master_doc, 'BackendAIDoc.tex', 'Backend.AI API Documentation',
author, 'manual'),
]
man_pages = [
(master_doc, 'backend.ai', 'Backend.AI API Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'Backend.AI', 'Backend.AI API Documentation',
author, 'Backend.AI', 'Backend.AI is a hassle-free backend for AI programming and service.', 'Miscellaneous'),
]
|
import math
import socket
import tempfile
import unittest
from contextlib import closing
import numpy as np
from shyft.api import (
Calendar, UtcPeriod,
DtsServer, DtsClient,
TimeAxis, TimeSeries, POINT_AVERAGE_VALUE, POINT_INSTANT_VALUE
)
from shyft.pyapi import fixed_tsv, windowed_percentiles_tsv, period_percentiles_tsv, selector_ts
def find_free_port() -> int:
"""
from SO https://stackoverflow.com/questions/1365265/on-localhost-how-to-pick-a-free-port-number
:return: available port number for use
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
class SelectorTsTestCase(unittest.TestCase):
def setUp(self) -> None:
self.port = find_free_port()
self.server = DtsServer()
self.server.set_listening_port(self.port)
self.server.start_async()
self.client = DtsClient(rf'localhost:{self.port}')
def tearDown(self) -> None:
self.server.clear()
del self.server
del self.port
def test_fixed_tsv_empty(self) -> None:
"""Test that an empty TsVector is generated by fixed_tsv when given an empty sequence of values."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
tsv = fixed_tsv(period, [])
self.assertEqual(len(tsv), 0)
def test_fixed_tsv_values(self) -> None:
"""Test that a TsVector with fixed constant values is generated by fixed_tsv when given
a sequence of values."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
values = [12, 15.5]
tsv = fixed_tsv(period, values)
self.assertEqual(len(tsv), 2)
for v, ts in zip(values, tsv):
for ts_v in ts.values:
self.assertEqual(ts_v, v)
def test_windowed_percentiles_tsv_empty(self) -> None:
"""Test that an empty TsVector is generated by windowed_percentiles_tsv
when given an empty sequence of percentiles."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
tsv = windowed_percentiles_tsv(
data_ts, period,
Calendar.HOUR, Calendar.HOUR,
[],
self.client, cal
)
self.assertEqual(len(tsv), 0)
def test_windowed_percentiles_tsv_values(self) -> None:
"""Test that a TsVector is generated by windowed_percentiles_tsv with time-series
fulfilling some properties of being percentiles of the data ts."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
percentiles = [0, 10, 50, 90, 100]
tsv = windowed_percentiles_tsv(
data_ts, period,
3*Calendar.HOUR, 12*Calendar.HOUR,
percentiles,
self.client, cal
)
self.assertEqual(len(tsv), 5)
# assert that the time-series have the correct properties for being percentile series
for i in range(len(tsv[0])):
prev_v = tsv[0].values[i]
for j in range(len(percentiles)-1):
v = tsv[j+1].values[i]
# both values will be NaN at the end - that is ok
if math.isnan(prev_v) and math.isnan(v):
continue
# check that no larger percentile have values greater than lower percentiles
self.assertLessEqual(prev_v, v)
prev_v = v
def test_period_percentiles_tsv_empty(self) -> None:
"""Test that an empty TsVector is generated by period_percentiles_tsv
when given an empty sequence of percentiles."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
tsv = period_percentiles_tsv(
data_ts, period,
3*Calendar.HOUR, period,
[],
self.client, cal
)
self.assertEqual(len(tsv), 0)
def test_period_percentiles_tsv_values(self) -> None:
"""Test that a TsVector is generated by period_percentiles_tsv with time-series
fulfilling some properties of being percentiles of the data ts."""
cal = Calendar()
period = UtcPeriod(cal.time(2017, 1, 1), cal.time(2018, 1, 1))
data = np.linspace(-2, 2, 24*7)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, len(data)), data, POINT_INSTANT_VALUE)
# compute
percentiles = [0, 10, 50, 90, 100]
tsv = period_percentiles_tsv(
data_ts, period,
3*Calendar.HOUR, period,
percentiles,
self.client, cal
)
self.assertEqual(len(tsv), 5)
# assert that the time-series have the correct properties for being percentile series
for i in range(len(tsv[0])):
prev_v = tsv[0].values[i]
for j in range(len(percentiles)-1):
v = tsv[j+1].values[i]
# both values will be NaN at the end - that is ok
if math.isnan(prev_v) and math.isnan(v):
continue
# check that no larger percentile have values greater than lower percentiles
self.assertLessEqual(prev_v, v)
prev_v = v
def test_selector_ts(self) -> None:
"""Test that selector_ts constructs a time-series selects data from different time-series correctly."""
n = 24
cal = Calendar()
period = UtcPeriod(0, n*Calendar.HOUR)
data_ts = TimeSeries(TimeAxis(0, Calendar.HOUR, n), np.linspace(-10, 10, n), POINT_INSTANT_VALUE)
source_tss = [
TimeSeries(TimeAxis(0, Calendar.HOUR, n), 1.00*np.ones(n), POINT_INSTANT_VALUE),
TimeSeries(TimeAxis(0, Calendar.HOUR, n), 10.0*np.ones(n), POINT_INSTANT_VALUE),
TimeSeries(TimeAxis(0, Calendar.HOUR, n), 100.*np.ones(n), POINT_INSTANT_VALUE),
]
threshold_1 = -5
threshold_2 = 5
threshold_tss = [
TimeSeries(TimeAxis(0, Calendar.HOUR, n), threshold_1*np.ones(n), POINT_INSTANT_VALUE),
TimeSeries(TimeAxis(0, Calendar.HOUR, n), threshold_2*np.ones(n), POINT_INSTANT_VALUE),
]
ts = selector_ts(
data_ts, period, 2*Calendar.HOUR,
threshold_tss, source_tss,
POINT_AVERAGE_VALUE,
self.client, cal
)
self.assertEqual(len(data_ts), len(ts))
for dv, rv in zip(data_ts.values, ts.values):
if dv < threshold_1:
self.assertEqual(rv, source_tss[0].values[0])
elif threshold_1 <= dv < threshold_2:
self.assertEqual(rv, source_tss[1].values[0])
else:
self.assertEqual(rv, source_tss[2].values[0])
|
"""
RSS Reader for C-Power 1200
Copyright 2010-2012 Michael Farrell <http://micolous.id.au/>
This library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from cpower1200 import *
import feedparser
from sys import argv
FEED = 'http://news.google.com.au/news?pz=1&cf=all&ned=au&hl=en&output=rss'
d = feedparser.parse(FEED)
s = CPower1200(argv[1])
s.send_window(dict(x=0, y=0, h=8, w=64), dict(x=0, y=8, h=8, w=64))
header = s.format_text(d.feed.title, RED, 0)
articles = ''
for i, article in enumerate(d.entries[:4]):
print "entry %d: %s" % (i, article.title)
colour = YELLOW if i % 2 == 0 else GREEN
articles += s.format_text(article.title + ' ', colour)
s.send_clock(0, display_year=False, display_month=False, display_day=False, display_hour=True, display_minute=True, display_second=True, multiline=False, red=255,green=0,blue=0)
s.send_text(1, articles, speed=10)
|
from distutils.core import setup
import os.path
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def read(fname):
fname = os.path.join(os.path.dirname(__file__), fname)
return open(fname).read().strip()
def read_files(*fnames):
return '\r\n\r\n\r\n'.join(map(read, fnames))
setup(
name = 'icall',
version = '0.3.4',
py_modules = ['icall'],
description = 'Parameters call function, :-)',
long_description = read_files('README.rst', 'CHANGES.rst'),
author = 'huyx',
author_email = 'ycyuxin@gmail.com',
url = 'https://github.com/huyx/icall',
keywords = ['functools', 'function', 'call'],
classifiers = classifiers,
)
|
{
'name' : 'Signature templates for user emails',
'version' : '1.0.0',
'author' : 'IT-Projects LLC, Ivan Yelizariev',
'license': 'LGPL-3',
'category' : 'Social Network',
'website' : 'https://yelizariev.github.io',
'depends' : ['base'],
'data':[
'res_users_signature_views.xml',
'security/res_users_signature_security.xml',
'security/ir.model.access.csv',
],
'installable': False
}
|
import hashlib
import uuid
def get_hash(data):
"""Returns hashed string"""
return hashlib.sha256(data).hexdigest()
def get_token():
return str(uuid.uuid4())
|
import sys
if sys.argv[1] == 'template':
effectname = 'template'
parameters = [('Vol', (0.0, 1.0, 0.5, 0.25, 0.00001))]
# pName, (min, max, default, skew, increment)
# where skew is a dynamic adjustment of exp/lin/log translation if the GUI widget
# and increment is the smallest change allowed by the GUI widget
if sys.argv[1] == 'stereopan':
effectname = 'stereopan'
parameters = [('Pan', (0.0, 1.0, 0.5, 1, 0.001)),
('Mix', (0.0, 1.0, 0.5, 1, 0.001))]
if sys.argv[1] == 'tremolam':
effectname = 'tremolam'
parameters = [('Depth', (0.0, 1.0, 0.5, 0.25, 0.001)),
('RateLow', (0.0, 10.0, 0.5, 0.25, 0.001)),
('RateHigh', (0.0, 500.0, 0.5, 0.25, 0.001))]
if sys.argv[1] == 'vst_mediator':
effectname = 'vst_mediator'
parameters = [('parm1', (0.0, 1.0, 0.5, 1, 0.001)),
('parm2', (0.0, 1.0, 0.5, 1, 0.001)),
('parm3', (0.0, 1.0, 0.5, 1, 0.001)),
('parm4', (0.0, 1.0, 0.5, 1, 0.001)),
('parm5', (0.0, 1.0, 0.5, 1, 0.001)),
('parm6', (0.0, 1.0, 0.5, 1, 0.001)),
('parm7', (0.0, 1.0, 0.5, 1, 0.001)),
('parm8', (0.0, 1.0, 0.5, 1, 0.001))
]
if sys.argv[1] == 'vst_MIDIator':
effectname = 'vst_MIDIator'
parameters = [('parm1', (0.0, 1.0, 0.5, 1, 0.001)),
('parm2', (0.0, 1.0, 0.5, 1, 0.001)),
('parm3', (0.0, 1.0, 0.5, 1, 0.001)),
('parm4', (0.0, 1.0, 0.5, 1, 0.001)),
('parm5', (0.0, 1.0, 0.5, 1, 0.001)),
('parm6', (0.0, 1.0, 0.5, 1, 0.001)),
('parm7', (0.0, 1.0, 0.5, 1, 0.001)),
('parm8', (0.0, 1.0, 0.5, 1, 0.001))
]
if sys.argv[1] == 'stereodelay':
effectname = 'stereodelay'
parameters = [('delaytime', (0.0008, 2.0, 0.5, 0.25, 0.00001)),
('filt_fq', (100, 10000, 1000, 0.35, 1)),
('feedback', (0.0, 0.9999, 0.3, 1.9, 0.0001))
]
if sys.argv[1] == 'pluck':
effectname = 'pluck'
parameters = [('inlevel', (0, 1.0, 1, 0.3, 0.01)),
('freq', (1, 1450, 400, 0.3, 0.01)),
('filt_fq', (1000, 16000, 7000, 0.35, 1)),
('feedback', (0.8, 0.9999, 0.95, 1.9, 0.0001)),
('mix', (0, 1.0, 1, 0.3, 0.01))
]
if sys.argv[1] == 'lpf18dist':
effectname = 'lpf18dist'
parameters = [('Drive', (1, 12, 2, 1, 0.1)),
('Freq', (20, 10000, 3000, 0.35, 1)),
('Resonance', (0.001, 0.95, 0.3, 1, 0.001)),
('Dist', (0.001, 10, 0.2, 0.5, 0.001)),
('Mix', (0.0, 1.0, 1.0, 1, 0.01)),
]
if sys.argv[1] == 'screverb':
effectname = 'screverb'
parameters = [('InLevel', (0, 1.0, 0.2, 0.3, 0.01)),
('Feed', (0.0, 1.0, 0.85, 1.2, 0.01)),
('FiltFq', (100, 14000, 7000, 0.6, 1)),
('PitchMod', (0.0, 4.0, 0.9, 1, 0.01)),
('PreDly', (0.0, 500, 120, 1, 1)),
('LfRoll', (20, 500, 90, 1, 1)),
('Mix', (0.0, 1.0, 1.0, 1, 0.01))
]
if sys.argv[1] == 'freeverb':
effectname = 'freeverb'
parameters = [('inlevel', (0, 1.0, 1.0, 0.3, 0.01)),
('reverbtime', (0.0, 8.0, 1.5, 0.4, 0.01)),
('reverbdamp', (0.0, 1.0, 0.25, 0.6, 0.01)),
('reverbmix', (0.0, 1.0, 0.7, 1, 0.01))
]
if sys.argv[1] == 'mincertime':
effectname = 'mincertime'
parameters = [('inlevel', (0, 1.0, 1, 0.3, 0.01)),
('timpoint', (0, 0.99, 0.1, 0.4, 0.001)),
('pitch', (0.0, 2.0, 1.0, 1, 0.01)),
('feedback', (0.0, 1.0, 0.0, 1, 0.01)),
('mix', (0, 1.0, 1, 0.3, 0.01))
]
if sys.argv[1] == 'plucktremlpfverb':
effectname = 'plucktremlpfverb'
parameters = [('inlevel', (0, 1.0, 1, 0.3, 0.01)),
('pluckfreq', (1, 1450, 400, 0.3, 0.01)),
('pluckfilt', (1000, 16000, 7000, 0.35, 1)),
('pluckfeed', (0.8, 0.9999, 0.95, 1.9, 0.0001)),
('pluckmix', (0, 1.0, 1, 0.3, 0.01)),
('tremDepth', (0.0, 1.0, 0.5, 0.25, 0.001)),
('tRateLow', (0.0, 10.0, 0.5, 0.25, 0.001)),
('tRateHigh', (0.0, 500.0, 0.5, 0.25, 0.001)),
('lpfDrive', (1, 12, 2, 1, 0.1)),
('lpfFreq', (20, 10000, 3000, 0.35, 1)),
('lpfResonance', (0.001, 0.95, 0.3, 1, 0.001)),
('lpfDist', (0.001, 10, 0.2, 0.5, 0.001)),
('lpfMix', (0.0, 1.0, 1.0, 1, 0.01)),
('reverbtime', (0.0, 8.0, 1.5, 0.4, 0.01)),
('reverbdamp', (0.0, 1.0, 0.25, 0.6, 0.01)),
('reverbmix', (0.0, 1.0, 0.7, 1, 0.01))
]
if sys.argv[1] == 'mincerpanverb':
effectname = 'mincerpanverb'
parameters = [('inlevel', (0, 1.0, 1, 0.3, 0.01)),
('mincertime', (0, 0.99, 0.1, 0.4, 0.001)),
('mincerpitch', (0.0, 2.0, 1.0, 1, 0.01)),
('mincerfeed', (0.0, 1.0, 0.0, 1, 0.01)),
('mincermix', (0, 1.0, 1, 0.3, 0.01)),
('Pan', (0.0, 1.0, 0.5, 1, 0.001)),
('panMix', (0.0, 1.0, 0.5, 1, 0.001)),
('reverbtime', (0.0, 8.0, 1.5, 0.4, 0.01)),
('reverbdamp', (0.0, 1.0, 0.25, 0.6, 0.01)),
('reverbmix', (0.0, 1.0, 0.7, 1, 0.01))
]
scorefile = open(effectname+'_score_events.inc', 'w')
fractionalinstr = 0
for p in parameters:
fractionalinstr += 1
scorefile.write('i4.{fracinstr:02d} 3.1 $SCORELEN "{pname}"\n'.format(fracinstr=fractionalinstr, pname=p[0]))
chn_init_file = open(effectname+'_parameter_ranges.inc', 'w')
instr_template = '''
instr 1
; list of min and max for the mappable parameters
{}
endin
'''
parameter_ranges = ''
for i in range(len(parameters)):
parm = parameters[i]
parameter_ranges += ' chnset {}, "{}_min" \n'.format(parm[1][0], parm[0])
parameter_ranges += ' chnset {}, "{}_max" \n'.format(parm[1][1], parm[0])
chn_init_file.write(instr_template.format(parameter_ranges))
start_x_pos = 30
start_y_pos = 5
plant_height = 85
analysis_parms = '"rms", "rms_preEq", "cps", "pitch", "centroid", "spread", "skewness", "kurtosis", "flatness", "crest", "flux", "amp_trans", "amp_t_dens", "centr_trans", "centr_t_dens", "kurt_trans", "pitchup_trans", "pitchdown_trans", "cps_raw"'
plant = '''groupbox bounds({start_y}, {start_x}, 564, 81), plant("plant_{pname}"), linethickness("0"){{
combobox channel("source1_{pname}"), bounds(10, 12, 90, 20), items({analysis_p}), value(1), channeltype("string")
combobox channel("chan1_{pname}"), bounds(103, 12, 50, 20), items("1", "2", "3", "4"), value(1)
numberbox bounds(158, 14, 35, 15), channel("rise1_{pname}"), range(0.01, 10.0, 0.01)
numberbox bounds(196, 14, 35, 15), channel("fall1_{pname}"), range(0.01, 10.0, 0.5)
hslider bounds(233, 12, 86, 20), channel("scale1_{pname}"), range(-1.0, 1.0, 0, 1, 0.01)
button bounds(320, 12, 29, 19), channel("scale1_x_{pname}"), text("x 1","x 10"),
hslider bounds(349, 12, 86, 20), channel("curve1_{pname}"), range(-5.0, 5.0, 0)
combobox channel("source2_{pname}"), bounds(10, 34, 90, 20), items({analysis_p}), value(1), channeltype("string")
combobox channel("chan2_{pname}"), bounds(103, 34, 50, 20), items("1", "2", "3", "4"), value(1)
numberbox bounds(158, 36, 35, 15), channel("rise2_{pname}"), range(0.01, 10.0, 0.01)
numberbox bounds(196, 36, 35, 15), channel("fall2_{pname}"), range(0.01, 10.0, 0.5)
hslider bounds(233, 34, 86, 20), channel("scale2_{pname}"), range(-1.0, 1.0, 0, 1, 0.01)
button bounds(320, 34, 29, 19), channel("scale2_x_{pname}"), text("x 1","x 10"),
hslider bounds(349, 34, 86, 20), channel("curve2_{pname}"), range(-5.0, 5.0, 0)
label bounds(10, 58, 90, 12), text("source"), colour(20,20,20,255)
label bounds(103, 58, 50, 12), text("chan"), colour(20,20,20,255)
label bounds(156, 58, 76, 12), text("rise/fall"), colour(20,20,20,255)
label bounds(236, 58, 110, 12), text("scale"), colour(20,20,20,255)
label bounds(352, 58, 81, 12), text("curve"), colour(20,20,20,255)
rslider bounds(433, 12, 62, 62), text("offset"), channel("offset_{pname}"), range({p_min}, {p_max}, {p_default}, {p_skew}, {p_incr})
combobox bounds(433, 1, 55, 12), channel("offsetx_{pname}"), items("-1", "Nornm", "+1"), , value(2), channeltype("string")
rslider bounds(494, 8, 66, 66), text("{pname}"), channel("{pname}"), range({p_min}, {p_max}, {p_default}, {p_skew}, {p_incr})
}}
'''
plantMIDI = '''groupbox bounds({start_y}, {start_x}, 710, 81), plant("plant_{pname}"), linethickness("0"){{
combobox channel("source1_{pname}"), bounds(10, 12, 90, 20), items({analysis_p}), value(1), channeltype("string")
combobox channel("chan1_{pname}"), bounds(103, 12, 50, 20), items("1", "2", "3", "4"), value(1)
numberbox bounds(158, 14, 35, 15), channel("rise1_{pname}"), range(0.01, 10.0, 0.01)
numberbox bounds(196, 14, 35, 15), channel("fall1_{pname}"), range(0.01, 10.0, 0.5)
hslider bounds(233, 12, 86, 20), channel("scale1_{pname}"), range(-1.0, 1.0, 0, 1, 0.01)
button bounds(320, 12, 29, 19), channel("scale1_x_{pname}"), text("x 1","x 10"),
hslider bounds(349, 12, 86, 20), channel("curve1_{pname}"), range(-5.0, 5.0, 0)
combobox channel("source2_{pname}"), bounds(10, 34, 90, 20), items({analysis_p}), value(1), channeltype("string")
combobox channel("chan2_{pname}"), bounds(103, 34, 50, 20), items("1", "2", "3", "4"), value(1)
numberbox bounds(158, 36, 35, 15), channel("rise2_{pname}"), range(0.01, 10.0, 0.01)
numberbox bounds(196, 36, 35, 15), channel("fall2_{pname}"), range(0.01, 10.0, 0.5)
hslider bounds(233, 34, 86, 20), channel("scale2_{pname}"), range(-1.0, 1.0, 0, 1, 0.01)
button bounds(320, 34, 29, 19), channel("scale2_x_{pname}"), text("x 1","x 10"),
hslider bounds(349, 34, 86, 20), channel("curve2_{pname}"), range(-5.0, 5.0, 0)
label bounds(10, 58, 90, 12), text("source"), colour(20,20,20,255)
label bounds(103, 58, 50, 12), text("chan"), colour(20,20,20,255)
label bounds(156, 58, 76, 12), text("rise/fall"), colour(20,20,20,255)
label bounds(236, 58, 110, 12), text("scale"), colour(20,20,20,255)
label bounds(352, 58, 81, 12), text("curve"), colour(20,20,20,255)
rslider bounds(433, 12, 62, 62), text("offset"), channel("offset_{pname}"), range({p_min}, {p_max}, {p_default}, {p_skew}, {p_incr})
combobox bounds(433, 1, 55, 12), channel("offsetx_{pname}"), items("-1", "Nornm", "+1"), , value(2), channeltype("string")
rslider bounds(494, 8, 66, 66), text("{pname}"), channel("{pname}"), range({p_min}, {p_max}, {p_default}, {p_skew}, {p_incr})
label bounds(570, 8, 55, 12), text("midi"), colour(20,20,20,255)
checkbox bounds(632, 8, 12, 12), text("enable"), channel("enable_{pname}"), value(1)
numberbox bounds(570, 25, 55, 15), channel("midich_{pname}"), range(1, 16, 1)
numberbox bounds(570, 42, 55, 15), channel("ctrlnum_{pname}"), range(1, 127, 1)
label bounds(632, 25, 70, 12), text("channel"), colour(20,20,20,255)
label bounds(632, 42, 70, 12), text("ctrl"), colour(20,20,20,255)
}}
'''
if effectname == 'vst_MIDIator': plant = plantMIDI
guifile = open(effectname+'_gui_scratchpad.inc', 'w')
x_pos = start_x_pos
x_pos1 = start_x_pos
y_pos = start_y_pos
for i in range(len(parameters)):
parm = parameters[i]
if (effectname == 'plucktremlpfverb') and (parm[0] == 'lpfDrive'):
x_pos1 = x_pos
x_pos = start_x_pos
y_pos = 575
guifile.write(plant.format(start_x=x_pos, start_y=y_pos, pname=parm[0], analysis_p=analysis_parms,p_min=parm[1][0], p_max=parm[1][1], p_default=parm[1][2], p_skew=parm[1][3], p_incr=parm[1][4]))
x_pos+=plant_height
guifile.write(';next x position available below plants is {}'.format(max([x_pos,x_pos1])))
|
import unittest
from matching.cpe_sorter import *
unsorted_cpes = [{'wfn': {'version': '4.0', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.0:beta:~~~android_marshmallow~~'},
{'wfn': {'version': '1.0.1.2', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:1.0.1.2:beta'},
{'wfn': {'version': '4.1.2', 'target_sw': 'ANY'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.1.2:beta'},
{'wfn': {'version': '4.6.3', 'target_sw': 'windows'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.6.3:beta:~~~windows~~'},
{'wfn': {'version': '4.7.1', 'target_sw': 'android'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.1:beta:~~~android~~'},
{'wfn': {'version': '4.7.2', 'target_sw': 'ANY'},
'uri_binding':'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.2:beta'},
{'wfn': {'version': '4.3.2', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.3.2:beta:~~~linux~~'},
{'wfn': {'version': '2.3.1', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2.3.1:beta'},
{'wfn': {'version': '4.7.3', 'target_sw': 'mac_os_x'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.3:beta:~~~mac_os_x~~'}
]
unsorted_cpes_year = [{'wfn': {'version': '2000', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2000:beta:~~~android_marshmallow~~'},
{'wfn': {'version': '2007', 'target_sw': 'android_marshmallow'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2007:beta'},
{'wfn': {'version': '4.1.2', 'target_sw': 'ANY'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.1.2:beta'},
{'wfn': {'version': '2010', 'target_sw': 'windows'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2010:beta:~~~windows~~'},
{'wfn': {'version': '4.7.1', 'target_sw': 'android'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.1:beta:~~~android~~'},
{'wfn': {'version': '2001', 'target_sw': 'ANY'},
'uri_binding':'cpe:/a:string_value_with\:double_points:internet_explorer:2001:beta'},
{'wfn': {'version': '4.3.2', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.3.2:beta:~~~linux~~'},
{'wfn': {'version': '2010', 'target_sw': 'linux'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2010:beta'},
{'wfn': {'version': '4.7.3', 'target_sw': 'mac_os_x'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:4.7.3:beta:~~~mac_os_x~~'},
{'wfn': {'version': '2010', 'target_sw': 'mac_os_x'},
'uri_binding': 'cpe:/a:string_value_with\:double_points:internet_explorer:2010:beta:~~~mac_os_x~~'}]
version = '4.7.2'
version_without_points = '4_7-2'
version_year = '2010'
os_windows = 'windows_7'
os_linux = 'linux_ubuntu'
os_android = 'android'
os_mac = 'mac_os_x_10.11'
class TestCPESorter(unittest.TestCase):
def test_sort_cpes_by_software_version(self):
sorted_cpes = sort_cpes_by_version(unsorted_cpes, version)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[5], sorted_cpes[0]) # 4.7.2
self.assertEqual(unsorted_cpes[4], sorted_cpes[1]) # 4.7.1
self.assertEqual(unsorted_cpes[8], sorted_cpes[2]) # 4.7.3
self.assertEqual(unsorted_cpes[0], sorted_cpes[3]) # 4.0
self.assertEqual(unsorted_cpes[2], sorted_cpes[4]) # 4.1.2
self.assertEqual(unsorted_cpes[3], sorted_cpes[5]) # 4.6.3
self.assertEqual(unsorted_cpes[6], sorted_cpes[6]) # 4.3.2
def test_cpes_and_sorted_cpes_are_equal_when_software_version_not_splitted_by_points(self):
sorted_cpes = sort_cpes_by_version(unsorted_cpes, version_without_points)
self.assertListEqual(unsorted_cpes, sorted_cpes)
def test_sort_cpes_by_version_with_year(self):
sorted_cpes = sort_cpes_by_version(unsorted_cpes_year, version_year)
self.assertEqual(len(unsorted_cpes_year), len(sorted_cpes))
self.assertEqual(unsorted_cpes_year[3], sorted_cpes[0]) # 2010
self.assertEqual(unsorted_cpes_year[7], sorted_cpes[1]) # 2010
self.assertEqual(unsorted_cpes_year[9], sorted_cpes[2]) # 2010
self.assertEqual(unsorted_cpes_year[0], sorted_cpes[3]) # 2000
self.assertEqual(unsorted_cpes_year[1], sorted_cpes[4]) # 2007
self.assertEqual(unsorted_cpes_year[5], sorted_cpes[5]) # 2001
def test_sort_cpes_by_operating_system_windows(self):
sorted_cpes = sort_cpes_by_operating_system(unsorted_cpes, os_windows)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[3], sorted_cpes[0])
def test_sort_cpes_by_operating_system_linux(self):
sorted_cpes = sort_cpes_by_operating_system(unsorted_cpes, os_linux)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[6], sorted_cpes[0])
def test_sort_cpes_by_operating_system_android(self):
sorted_cpes = sort_cpes_by_operating_system(unsorted_cpes, os_android)
self.assertEqual(len(unsorted_cpes), len(sorted_cpes))
self.assertEqual(unsorted_cpes[4], sorted_cpes[0])
self.assertEqual(unsorted_cpes[0], sorted_cpes[1])
if __name__ == '__main__':
unittest.main()
|
"""
# Copyright (c) 05 2015 | surya
# 18/05/15 nanang.ask@kubuskotak.com
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# __init__.py.py
"""
import urlparse
from niimanga.libs.exceptions import HtmlError
from requests import request
class Site:
def __init__(self):
pass
def get_html(self, url, method='GET', **kwargs):
resp = request(method, url, **kwargs)
if resp.status_code != 200:
raise HtmlError({'msg': 'external_request_fail', 'url': url})
return resp.content
def fetch_manga_seed_page(self, url, **kwargs):
return self.get_html(url, **kwargs)
def fetch_chapter_seed_page(self, url, **kwargs):
return self.get_html(url, **kwargs)
def fetch_page_image(self, url, **kwargs):
return self.get_html(url, **kwargs)
def search_by_author(self, author):
"""
Return list of chapter dicts whose keys are:
name
url
site
This should be specifically implemented in each Site subclass. If not,
this method will be used which returns an empty list.
"""
return []
from mangaeden import MangaEden
from batoto import Batoto
available_sites = [
# Kissmanga(),
# Vitaku(),
Batoto(),
# Mangafox(),
# Mangahere(),
# MangaHereMob(),
MangaEden()
]
def get_site(url):
netloc = urlparse.urlparse(url).netloc
for site in available_sites:
if netloc in site.netlocs:
return site
return None
|
import os
import socket
import sys
input_host = '127.0.0.1'
input_port = 65000
batch_enabled = int(os.environ.get('_BACKEND_BATCH_MODE', '0'))
if batch_enabled:
# Since latest Python 2 has `builtins`and `input`,
# we cannot detect Python 2 with the existence of them.
if sys.version_info.major > 2:
import builtins
def _input(prompt=''):
sys.stdout.write(prompt)
sys.stdout.flush()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.connect((input_host, input_port))
userdata = sock.recv(1024)
except ConnectionRefusedError:
userdata = b'<user-input-unavailable>'
return userdata.decode()
builtins._input = input # type: ignore
builtins.input = _input
else:
# __builtins__ is an alias dict for __builtin__ in modules other than __main__.
# Thus, we have to explicitly import __builtin__ module in Python 2.
import __builtin__
builtins = __builtin__
def _raw_input(prompt=''):
sys.stdout.write(prompt)
sys.stdout.flush()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((input_host, input_port))
userdata = sock.recv(1024)
except socket.error:
userdata = b'<user-input-unavailable>'
finally:
sock.close()
return userdata.decode()
builtins._raw_input = builtins.raw_input # type: ignore
builtins.raw_input = _raw_input # type: ignore
|
class Range:
"""A class that mimic's the built-in range class."""
def __init__(self, start, stop=None, step=1):
"""Initialize a Range instance.
Semantics is similar to built-in range class.
"""
if step == 0:
raise ValueError('step cannot be 0')
if stop is None: # special case of range(n)
start, stop = 0, start # should be treated as if range(0,n)
# calculate the effective length once
self._length = max(0, (stop - start + step - 1) // step)
# need knowledge of start and step (but not stop) to support __getitem__
self._start = start
self._step = step
def __len__(self):
"""Return number of entries in the range."""
return self._length
def __getitem__(self, k):
"""Return entry at index k (using standard interpretation if negative)."""
if k < 0:
k += len(self) # attempt to convert negative index
if not 0 <= k < self._length:
raise IndexError('index out of range')
return self._start + k * self._step
|
"""
This script edits your backends conf file by replacing stuff like:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = 78910
with:
[bnporc21]
_module = bnporc
website = pp
login = 123456
password = `pass show weboob/bnporc21`
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
FILE = os.getenv('WEBOOB_BACKENDS') or os.path.expanduser('~/.config/weboob/backends')
if not os.path.exists(FILE):
print('the backends file does not exist')
sys.exit(os.EX_NOINPUT)
if not shutil.which('pass'):
print('the "pass" tool could not be found')
sys.exit(os.EX_UNAVAILABLE)
errors = 0
seen = set()
backend = None
with open(FILE) as inp:
with tempfile.NamedTemporaryFile('w', delete=False, dir=os.path.dirname(FILE)) as outp:
for line in inp:
line = line.strip()
mtc = re.match(r'password\s*=\s*(\S.*)$', line)
if mtc and not mtc.group(1).startswith('`'):
cmd = ['pass', 'insert', 'weboob/%s' % backend]
stdin = 2 * ('%s\n' % mtc.group(1))
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate(stdin.encode('utf-8'))
if proc.returncode == 0:
print('password = `pass show weboob/%s`' % backend, file=outp)
continue
else:
errors += 1
print('warning: could not store password for backend %r' % backend)
mtc = re.match(r'\[(.+)\]', line)
if mtc:
backend = mtc.group(1)
if backend in seen:
print('error: backend %r is present multiple times' % backend)
sys.exit(os.EX_DATAERR)
seen.add(backend)
print(line, file=outp)
os.rename(outp.name, FILE)
if errors:
print('%d errors were encountered when storing passwords securely' % errors)
sys.exit(2)
|
import oauth2 # XXX pumazi: factor this out
from webob.multidict import MultiDict, NestedMultiDict
from webob.request import Request as WebObRequest
__all__ = ['Request']
class Request(WebObRequest):
"""The OAuth version of the WebOb Request.
Provides an easier way to obtain OAuth request parameters
(e.g. oauth_token) from the WSGI environment."""
def _checks_positive_for_oauth(self, params_var):
"""Simple check for the presence of OAuth parameters."""
checks = [ p.find('oauth_') >= 0 for p in params_var ]
return True in checks
@property
def str_oauth_header(self):
extracted = {}
# Check for OAuth in the Header
if 'authorization' in self.headers:
auth_header = self.headers['authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header.lstrip('OAuth ')
try:
# Extract the parameters from the header.
extracted = oauth2.Request._split_header(auth_header)
except:
raise Error('Unable to parse OAuth parameters from '
'the Authorization header.')
return extracted
@property
def str_oauth_POST(self):
extracted = {}
if self._checks_positive_for_oauth(self.str_POST):
extracted = dict([ (k, v,) for k, v in self.str_POST.iteritems()
if (k.find('oauth_') >= 0) ])
return extracted
@property
def str_oauth_GET(self):
extracted = {}
if self._checks_positive_for_oauth(self.str_GET):
extracted = dict([ (k, v,) for k, v in self.str_GET.iteritems()
if (k.find('oauth_') >= 0) ])
return extracted
def params(self):
params = WebObRequest.params.fget(self)
return NestedMultiDict(params, self.str_oauth_header)
params = property(params, doc=WebObRequest.params.__doc__)
@property
def oauth_params(self):
"""Simple way to get the OAuth parameters without sifting through
the entire stack of parameters.
We check the header first, because it is low hanging fruit.
However, it would be more efficient to check for the POSTed
parameters, because the specification defines the POST method as the
recommended request type before using GET or the Authorization
header."""
extracted = {}
# OAuth in the Header
extracted.update(self.str_oauth_header)
# OAuth in a GET or POST method
extracted.update(self.str_oauth_GET)
extracted.update(self.str_oauth_POST)
# Return the extracted oauth variables
return MultiDict(extracted)
@property
def nonoauth_params(self):
"""Simple way to get the non-OAuth parameters from the request."""
oauth_param_keys = self.oauth_params.keys()
return dict([(k, v) for k, v in self.params.iteritems() if k not in oauth_param_keys])
|
import re
import json
from datetime import datetime
from weboob.browser.pages import LoggedPage, HTMLPage, JsonPage
from weboob.browser.elements import DictElement, ItemElement, method
from weboob.browser.filters.standard import Date, CleanDecimal, CleanText, Format, Field, Env, Regexp, Currency
from weboob.browser.filters.json import Dict
from weboob.capabilities import NotAvailable
from weboob.capabilities.bank import Account, Loan
from weboob.capabilities.contact import Advisor
from weboob.capabilities.profile import Profile
from weboob.capabilities.bill import DocumentTypes, Subscription, Document
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
from weboob.exceptions import BrowserUnavailable
class Transaction(FrenchTransaction):
PATTERNS = [
(re.compile(r'^CB (?P<text>.*?) FACT (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'^RET(RAIT)? DAB (?P<dd>\d+)-(?P<mm>\d+)-.*', re.IGNORECASE), FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile(r'^RET(RAIT)? DAB (?P<text>.*?) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) (?P<HH>\d{2})H(?P<MM>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile(r'^VIR(EMENT)?(\.PERIODIQUE)? (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_TRANSFER),
(re.compile(r'^PRLV (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_ORDER),
(re.compile(r'^CHEQUE.*', re.IGNORECASE), FrenchTransaction.TYPE_CHECK),
(re.compile(r'^(CONVENTION \d+ )?COTIS(ATION)? (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_BANK),
(re.compile(r'^\* (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_BANK),
(re.compile(r'^REMISE (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_DEPOSIT),
(re.compile(r'^(?P<text>.*)( \d+)? QUITTANCE .*', re.IGNORECASE), FrenchTransaction.TYPE_ORDER),
(re.compile(r'^CB [\d\*]+ TOT DIF .*', re.IGNORECASE), FrenchTransaction.TYPE_CARD_SUMMARY),
(re.compile(r'^CB [\d\*]+ (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'^CB (?P<text>.*?) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'\*CB (?P<text>.*?) (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
(re.compile(r'^FAC CB (?P<text>.*?) (?P<dd>\d{2})/(?P<mm>\d{2})', re.IGNORECASE), FrenchTransaction.TYPE_CARD),
]
class LoginPage(JsonPage):
def get_response(self):
return self.doc
class CenetLoginPage(HTMLPage):
def login(self, username, password, nuser, codeCaisse, _id, vkpass):
form = self.get_form(id='aspnetForm')
form['__EVENTTARGET'] = "btn_authentifier_securise"
form['__EVENTARGUMENT'] = '{"CodeCaisse":"%s","NumeroBad":"%s","NumeroUsager":"%s",\
"MotDePasse":"%s","IdentifiantClavier":"%s","ChaineConnexion":"%s"}' \
% (codeCaisse, username, nuser, password, _id, vkpass)
form.submit()
class CenetHomePage(LoggedPage, HTMLPage):
@method
class get_advisor(ItemElement):
klass = Advisor
obj_name = CleanText('//section[contains(@id, "ChargeAffaires")]//strong')
obj_email = CleanText('//li[contains(@id, "MailContact")]')
obj_phone = CleanText('//li[contains(@id, "TelAgence")]', replace=[('.', '')])
obj_mobile = NotAvailable
obj_agency = CleanText('//section[contains(@id, "Agence")]//strong')
obj_address = CleanText('//li[contains(@id, "AdresseAgence")]')
def obj_fax(self):
return CleanText('//li[contains(@id, "FaxAgence")]', replace=[('.', '')])(self) or NotAvailable
@method
class get_profile(ItemElement):
klass = Profile
obj_name = CleanText('//li[@class="identite"]/a/span')
class CenetJsonPage(JsonPage):
def __init__(self, browser, response, *args, **kwargs):
super(CenetJsonPage, self).__init__(browser, response, *args, **kwargs)
# Why you are so ugly....
self.doc = json.loads(self.doc['d'])
if self.doc['Erreur'] and (self.doc['Erreur']['Titre'] or self.doc['Erreur']['Code']):
self.logger.warning('error on %r: %s', self.url, self.doc['Erreur']['Titre'] or self.doc['Erreur']['Code'])
raise BrowserUnavailable(self.doc['Erreur']['Titre'] or self.doc['Erreur']['Description'])
self.doc['DonneesSortie'] = json.loads(self.doc['DonneesSortie'])
class CenetAccountsPage(LoggedPage, CenetJsonPage):
ACCOUNT_TYPES = {'CCP': Account.TYPE_CHECKING}
@method
class get_accounts(DictElement):
item_xpath = "DonneesSortie"
class item(ItemElement):
klass = Account
obj_id = CleanText(Dict('Numero'))
obj_label = CleanText(Dict('Intitule'))
obj_iban = CleanText(Dict('IBAN'))
def obj_balance(self):
absolut_amount = CleanDecimal(Dict('Solde/Valeur'))(self)
if CleanText(Dict('Solde/CodeSens'))(self) == 'D':
return -absolut_amount
return absolut_amount
def obj_currency(self):
return CleanText(Dict('Devise'))(self).upper()
def obj_type(self):
return self.page.ACCOUNT_TYPES.get(Dict('TypeCompte')(self), Account.TYPE_UNKNOWN)
def obj__formated(self):
return self.el
class CenetLoanPage(LoggedPage, CenetJsonPage):
@method
class get_accounts(DictElement):
item_xpath = "DonneesSortie"
class item(ItemElement):
klass = Loan
obj_id = CleanText(Dict('IdentifiantUniqueContrat'), replace=[(' ', '-')])
obj_label = CleanText(Dict('Libelle'))
obj_total_amount = CleanDecimal(Dict('MontantInitial/Valeur'))
obj_currency = Currency(Dict('MontantInitial/Devise'))
obj_type = Account.TYPE_LOAN
obj_duration = CleanDecimal(Dict('Duree'))
obj_rate = CleanDecimal.French(Dict('Taux'))
obj_next_payment_amount = CleanDecimal(Dict('MontantProchaineEcheance/Valeur'))
def obj_balance(self):
balance = CleanDecimal(Dict('CapitalRestantDu/Valeur'))(self)
if balance > 0:
balance *= -1
return balance
def obj_subscription_date(self):
sub_date = Dict('DateDebutEffet')(self)
if sub_date:
date = CleanDecimal().filter(sub_date) / 1000
return datetime.fromtimestamp(date).date()
return NotAvailable
def obj_maturity_date(self):
mat_date = Dict('DateDerniereEcheance')(self)
if mat_date:
date = CleanDecimal().filter(mat_date) / 1000
return datetime.fromtimestamp(date).date()
return NotAvailable
def obj_next_payment_date(self):
next_date = Dict('DateProchaineEcheance')(self)
if next_date:
date = CleanDecimal().filter(next_date) / 1000
return datetime.fromtimestamp(date).date()
return NotAvailable
class CenetCardsPage(LoggedPage, CenetJsonPage):
def get_cards(self):
cards = Dict('DonneesSortie')(self.doc)
# Remove dates to prevent bad parsing
def reword_dates(card):
tmp_card = card
for k, v in tmp_card.items():
if isinstance(v, dict):
v = reword_dates(v)
if k == "Date" and v is not None and "Date" in v:
card[k] = None
for card in cards:
reword_dates(card)
return cards
class CenetAccountHistoryPage(LoggedPage, CenetJsonPage):
TR_TYPES_LABEL = {
'VIR': Transaction.TYPE_TRANSFER,
'CHEQUE': Transaction.TYPE_CHECK,
'REMISE CHEQUE': Transaction.TYPE_CASH_DEPOSIT,
'PRLV': Transaction.TYPE_ORDER,
}
TR_TYPES_API = {
'VIR': Transaction.TYPE_TRANSFER,
'PE': Transaction.TYPE_ORDER, # PRLV
'CE': Transaction.TYPE_CHECK, # CHEQUE
'DE': Transaction.TYPE_CASH_DEPOSIT, # APPRO
'PI': Transaction.TYPE_CASH_DEPOSIT, # REMISE CHEQUE
}
@method
class get_history(DictElement):
item_xpath = "DonneesSortie"
class item(ItemElement):
klass = Transaction
obj_raw = Format('%s %s', Dict('Libelle'), Dict('Libelle2'))
obj_label = CleanText(Dict('Libelle'))
obj_date = Date(Dict('DateGroupImputation'), dayfirst=True)
obj_rdate = Date(Dict('DateGroupReglement'), dayfirst=True)
def obj_type(self):
ret = Transaction.TYPE_UNKNOWN
# The API may send the same key for 'PRLV' and 'VIR' transactions
# So the label is checked first, then the API key
for k, v in self.page.TR_TYPES_LABEL.items():
if Field('label')(self).startswith(k):
ret = v
break
if ret == Transaction.TYPE_UNKNOWN:
ret = self.page.TR_TYPES_API.get(Dict('TypeOperationDisplay')(self), Transaction.TYPE_UNKNOWN)
if ret != Transaction.TYPE_UNKNOWN:
return ret
for pattern, type in Transaction.PATTERNS:
if pattern.match(Field('raw')(self)):
return type
return Transaction.TYPE_UNKNOWN
def obj_amount(self):
amount = CleanDecimal(Dict('Montant/Valeur'))(self)
return -amount if Dict('Montant/CodeSens')(self) == "D" else amount
def next_offset(self):
offset = Dict('OffsetSortie')(self.doc)
if offset:
assert Dict('EstComplete')(self.doc) == 'false'
return offset
class CenetCardSummaryPage(LoggedPage, CenetJsonPage):
@method
class get_history(DictElement):
item_xpath = "DonneesSortie/OperationsCB"
class item(ItemElement):
klass = Transaction
obj_label = CleanText(Dict('Libelle'))
obj_date = Date(Dict('DateGroupImputation'), dayfirst=True)
obj_type = Transaction.TYPE_DEFERRED_CARD
def obj_raw(self):
label = Dict('Libelle')(self)
label2 = Dict('Libelle2')(self)
if label2 and label2 != 'None':
return '%s %s' % (label, label2)
else:
return label
def obj_rdate(self):
rdate = re.search('(FACT\s)(\d{6})', Field('label')(self))
if rdate.group(2):
return Date(dayfirst=True).filter(rdate.group(2))
return NotAvailable
def obj_amount(self):
amount = CleanDecimal(Dict('Montant/Valeur'))(self)
return -amount if Dict('Montant/CodeSens')(self) == "D" else amount
class _LogoutPage(HTMLPage):
def on_load(self):
raise BrowserUnavailable(CleanText('//*[@class="messErreur"]')(self.doc))
class ErrorPage(_LogoutPage):
pass
class UnavailablePage(HTMLPage):
def on_load(self):
raise BrowserUnavailable(CleanText('//div[@id="message_error_hs"]')(self.doc))
class SubscriptionPage(LoggedPage, CenetJsonPage):
@method
class iter_subscription(DictElement):
item_xpath = 'DonneesSortie'
class item(ItemElement):
klass = Subscription
obj_id = CleanText(Dict('Numero'))
obj_label = CleanText(Dict('Intitule'))
obj_subscriber = Env('subscriber')
@method
class iter_documents(DictElement):
item_xpath = 'DonneesSortie'
class item(ItemElement):
klass = Document
obj_id = Format('%s_%s_%s', Env('sub_id'), Dict('Numero'), CleanText(Env('french_date'), symbols='/'))
obj_format = 'pdf'
obj_type = DocumentTypes.OTHER
obj__numero = CleanText(Dict('Numero'))
obj__sub_id = Env('sub_id')
obj__sub_label = Env('sub_label')
obj__download_id = CleanText(Dict('IdDocument'))
def obj_date(self):
date = Regexp(Dict('DateArrete'), r'Date\((\d+)\)')(self)
date = int(date) // 1000
return datetime.fromtimestamp(date).date()
def obj_label(self):
return '%s %s' % (CleanText(Dict('Libelle'))(self), Env('french_date')(self))
def parse(self, el):
self.env['french_date'] = Field('date')(self).strftime('%d/%m/%Y')
class DownloadDocumentPage(LoggedPage, HTMLPage):
def download_form(self, document):
data = {
'Numero': document._numero,
'Libelle': document._sub_label.replace(' ', '+'),
'DateArrete': '',
'IdDocument': document._download_id
}
form = self.get_form(id='aspnetForm')
form['__EVENTTARGET'] = 'btn_telecharger'
form['__EVENTARGUMENT'] = json.dumps(data)
return form.submit()
|
"""
.15925 Editor
Copyright 2014 TechInvestLab.ru dot15926@gmail.com
.15925 Editor is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3.0 of the License, or (at your option) any later version.
.15925 Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with .15925 Editor.
"""
from iso15926.tools.environment import EnvironmentContext
from PySide.QtCore import *
from PySide.QtGui import *
import os
from framework.dialogs import Choice
class TestWindow(QDialog):
vis_label = tm.main.tests_title
tests_dir = 'tests'
def __init__(self):
QDialog.__init__(self, appdata.topwindow, Qt.WindowSystemMenuHint | Qt.WindowTitleHint)
self.setWindowTitle(self.vis_label)
layout = QVBoxLayout(self)
box = QGroupBox(tm.main.tests_field, self)
self.tests_list = QListWidget(box)
boxlayout = QHBoxLayout(box)
boxlayout.addWidget(self.tests_list)
layout.addWidget(box)
for n in os.listdir(self.tests_dir):
if n.startswith(".") or not n.endswith('.py'):
continue
sp = os.path.splitext(n)
item = QListWidgetItem(sp[0], self.tests_list)
item.setCheckState(Qt.Unchecked)
self.btn_prepare = QPushButton(tm.main.prepare, self)
self.btn_prepare.setToolTip(tm.main.prepare_selected_tests)
self.btn_prepare.clicked.connect(self.OnPrepare)
self.btn_run = QPushButton(tm.main.run, self)
self.btn_run.setToolTip(tm.main.run_selected_tests)
self.btn_run.clicked.connect(self.OnRun)
self.btn_sel_all = QPushButton(tm.main.select_all, self)
self.btn_sel_all.clicked.connect(self.SelectAll)
self.btn_unsel_all = QPushButton(tm.main.unselect_all, self)
self.btn_unsel_all.clicked.connect(self.UnselectAll)
self.btn_cancel = QPushButton(tm.main.cancel, self)
self.btn_cancel.clicked.connect(self.reject)
btnlayout = QHBoxLayout()
btnlayout.addWidget(self.btn_sel_all)
btnlayout.addWidget(self.btn_unsel_all)
btnlayout.addStretch()
btnlayout.addWidget(self.btn_prepare)
btnlayout.addWidget(self.btn_run)
btnlayout.addWidget(self.btn_cancel)
layout.addLayout(btnlayout)
box = QGroupBox(tm.main.tests_result_field, self)
self.report = QPlainTextEdit(self)
boxlayout = QHBoxLayout(box)
boxlayout.addWidget(self.report)
layout.addWidget(box)
self.exec_()
def SelectAll(self):
self.tests_list.SetChecked([x for x in xrange(self.tests_list.Count)])
def UnselectAll(self):
self.tests_list.SetChecked([])
def OnPrepare(self):
if Choice(tm.main.tests_prepare_warning):
for k in self.tests_list.CheckedStrings:
self.report.AppendText(tm.main.tests_preparing.format(k))
locals = {'mode': 'prepare'}
ec = EnvironmentContext(None, locals)
ec.ExecutePythonFile(os.path.join(self.tests_dir, k + '.py'))
self.report.AppendText(tm.main.tests_preparing_done)
def OnRun(self):
all_passed = True
self.report.appendPlainText(tm.main.tests_running)
count = 0
passed = 0
for i in xrange(self.tests_list.count()):
item = self.tests_list.item(i)
name = item.text()
if not item.checkState() == Qt.Checked:
continue
count += 1
locals = {'mode': 'run', 'passed': False}
ec = EnvironmentContext(None, locals)
ec.ExecutePythonFile(os.path.join(self.tests_dir, name + '.py'))
if locals['passed']:
passed += 1
self.report.appendPlainText(tm.main.test_passed.format(name))
else:
self.report.appendPlainText(tm.main.test_failed.format(name))
self.report.appendPlainText(tm.main.tests_result)
self.report.appendPlainText(tm.main.tests_result_info.format(passed, count))
if os.path.exists(TestWindow.tests_dir):
@public('workbench.menu.help')
class xTestMenu:
vis_label = tm.main.menu_tests
@classmethod
def Do(cls):
TestWindow()
|
import os
import struct
from binascii import unhexlify
from shutil import copy as copyfile
from twisted.internet.defer import inlineCallbacks
from Tribler.Core.CacheDB.SqliteCacheDBHandler import TorrentDBHandler, MyPreferenceDBHandler, ChannelCastDBHandler
from Tribler.Core.CacheDB.sqlitecachedb import str2bin
from Tribler.Core.Category.Category import Category
from Tribler.Core.TorrentDef import TorrentDef
from Tribler.Core.leveldbstore import LevelDbStore
from Tribler.Test.Core.test_sqlitecachedbhandler import AbstractDB
from Tribler.Test.common import TESTS_DATA_DIR
S_TORRENT_PATH_BACKUP = os.path.join(TESTS_DATA_DIR, 'bak_single.torrent')
M_TORRENT_PATH_BACKUP = os.path.join(TESTS_DATA_DIR, 'bak_multiple.torrent')
class TestTorrentFullSessionDBHandler(AbstractDB):
def setUpPreSession(self):
super(TestTorrentFullSessionDBHandler, self).setUpPreSession()
self.config.set_megacache_enabled(True)
@inlineCallbacks
def setUp(self):
yield super(TestTorrentFullSessionDBHandler, self).setUp()
self.tdb = TorrentDBHandler(self.session)
def test_initialize(self):
self.tdb.initialize()
self.assertIsNone(self.tdb.mypref_db)
self.assertIsNone(self.tdb.votecast_db)
self.assertIsNone(self.tdb.channelcast_db)
class TestTorrentDBHandler(AbstractDB):
def addTorrent(self):
old_size = self.tdb.size()
old_tracker_size = self.tdb._db.size('TrackerInfo')
s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09')
m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
single_torrent_file_path = os.path.join(self.getStateDir(), 'single.torrent')
multiple_torrent_file_path = os.path.join(self.getStateDir(), 'multiple.torrent')
copyfile(S_TORRENT_PATH_BACKUP, single_torrent_file_path)
copyfile(M_TORRENT_PATH_BACKUP, multiple_torrent_file_path)
single_tdef = TorrentDef.load(single_torrent_file_path)
self.assertEqual(s_infohash, single_tdef.get_infohash())
multiple_tdef = TorrentDef.load(multiple_torrent_file_path)
self.assertEqual(m_infohash, multiple_tdef.get_infohash())
self.tdb.addExternalTorrent(single_tdef)
self.tdb.addExternalTorrent(multiple_tdef)
single_torrent_id = self.tdb.getTorrentID(s_infohash)
multiple_torrent_id = self.tdb.getTorrentID(m_infohash)
self.assertEqual(self.tdb.getInfohash(single_torrent_id), s_infohash)
single_name = 'Tribler_4.1.7_src.zip'
multiple_name = 'Tribler_4.1.7_src'
self.assertEqual(self.tdb.size(), old_size + 2)
new_tracker_table_size = self.tdb._db.size('TrackerInfo')
self.assertLess(old_tracker_size, new_tracker_table_size)
sname = self.tdb.getOne('name', torrent_id=single_torrent_id)
self.assertEqual(sname, single_name)
mname = self.tdb.getOne('name', torrent_id=multiple_torrent_id)
self.assertEqual(mname, multiple_name)
s_size = self.tdb.getOne('length', torrent_id=single_torrent_id)
self.assertEqual(s_size, 1583233)
m_size = self.tdb.getOne('length', torrent_id=multiple_torrent_id)
self.assertEqual(m_size, 5358560)
cat = self.tdb.getOne('category', torrent_id=multiple_torrent_id)
self.assertEqual(cat, u'xxx')
s_status = self.tdb.getOne('status', torrent_id=single_torrent_id)
self.assertEqual(s_status, u'unknown')
m_comment = self.tdb.getOne('comment', torrent_id=multiple_torrent_id)
comments = 'www.tribler.org'
self.assertGreater(m_comment.find(comments), -1)
comments = 'something not inside'
self.assertEqual(m_comment.find(comments), -1)
m_trackers = self.tdb.getTrackerListByInfohash(m_infohash)
self.assertEqual(len(m_trackers), 8)
self.assertIn('http://tpb.tracker.thepiratebay.org/announce', m_trackers)
s_torrent = self.tdb.getTorrent(s_infohash)
m_torrent = self.tdb.getTorrent(m_infohash)
self.assertEqual(s_torrent['name'], 'Tribler_4.1.7_src.zip')
self.assertEqual(m_torrent['name'], 'Tribler_4.1.7_src')
self.assertEqual(m_torrent['last_tracker_check'], 0)
def updateTorrent(self):
m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
self.tdb.updateTorrent(m_infohash, relevance=3.1415926, category=u'Videoclips',
status=u'good', seeder=123, leecher=321,
last_tracker_check=1234567,
other_key1='abcd', other_key2=123)
multiple_torrent_id = self.tdb.getTorrentID(m_infohash)
category = self.tdb.getOne('category', torrent_id=multiple_torrent_id)
self.assertEqual(category, u'Videoclips')
status = self.tdb.getOne('status', torrent_id=multiple_torrent_id)
self.assertEqual(status, u'good')
seeder = self.tdb.getOne('num_seeders', torrent_id=multiple_torrent_id)
self.assertEqual(seeder, 123)
leecher = self.tdb.getOne('num_leechers', torrent_id=multiple_torrent_id)
self.assertEqual(leecher, 321)
last_tracker_check = self.tdb.getOne('last_tracker_check', torrent_id=multiple_torrent_id)
self.assertEqual(last_tracker_check, 1234567)
def setUpPreSession(self):
super(TestTorrentDBHandler, self).setUpPreSession()
self.config.set_megacache_enabled(True)
self.config.set_torrent_store_enabled(True)
@inlineCallbacks
def setUp(self):
yield super(TestTorrentDBHandler, self).setUp()
from Tribler.Core.APIImplementation.LaunchManyCore import TriblerLaunchMany
from Tribler.Core.Modules.tracker_manager import TrackerManager
self.session.lm = TriblerLaunchMany()
self.session.lm.tracker_manager = TrackerManager(self.session)
self.tdb = TorrentDBHandler(self.session)
self.tdb.torrent_dir = TESTS_DATA_DIR
self.tdb.category = Category()
self.tdb.mypref_db = MyPreferenceDBHandler(self.session)
@inlineCallbacks
def tearDown(self):
self.tdb.mypref_db.close()
self.tdb.mypref_db = None
self.tdb.close()
self.tdb = None
yield super(TestTorrentDBHandler, self).tearDown()
def test_hasTorrent(self):
infohash_str = 'AA8cTG7ZuPsyblbRE7CyxsrKUCg='
infohash = str2bin(infohash_str)
self.assertTrue(self.tdb.hasTorrent(infohash))
self.assertTrue(self.tdb.hasTorrent(infohash)) # cache will trigger
fake_infohash = 'fake_infohash_100000'
self.assertFalse(self.tdb.hasTorrent(fake_infohash))
def test_get_infohash(self):
self.assertTrue(self.tdb.getInfohash(1))
self.assertFalse(self.tdb.getInfohash(1234567))
def test_add_update_torrent(self):
self.addTorrent()
self.updateTorrent()
def test_update_torrent_from_metainfo(self):
# Add torrent first
infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
# Only infohash is added to the database
self.tdb.addOrGetTorrentID(infohash)
# Then update the torrent with metainfo
metainfo = {'info': {'files': [{'path': ['Something.something.pdf'], 'length': 123456789},
{'path': ['Another-thing.jpg'], 'length': 100000000}],
'piece length': 2097152,
'name': '\xc3Something awesome (2015)',
'pieces': ''},
'seeders': 0, 'initial peers': [],
'leechers': 36, 'download_exists': False, 'nodes': []}
self.tdb.update_torrent_with_metainfo(infohash, metainfo)
# Check updates are correct
torrent_id = self.tdb.getTorrentID(infohash)
name = self.tdb.getOne('name', torrent_id=torrent_id)
self.assertEqual(name, u'\xc3Something awesome (2015)')
num_files = self.tdb.getOne('num_files', torrent_id=torrent_id)
self.assertEqual(num_files, 2)
length = self.tdb.getOne('length', torrent_id=torrent_id)
self.assertEqual(length, 223456789)
def test_add_external_torrent_no_def_existing(self):
infohash = str2bin('AA8cTG7ZuPsyblbRE7CyxsrKUCg=')
self.tdb.addExternalTorrentNoDef(infohash, "test torrent", [], [], 1234)
self.assertTrue(self.tdb.hasTorrent(infohash))
def test_add_external_torrent_no_def_no_files(self):
infohash = unhexlify('48865489ac16e2f34ea0cd3043cfd970cc24ec09')
self.tdb.addExternalTorrentNoDef(infohash, "test torrent", [], [], 1234)
self.assertFalse(self.tdb.hasTorrent(infohash))
def test_add_external_torrent_no_def_one_file(self):
infohash = unhexlify('49865489ac16e2f34ea0cd3043cfd970cc24ec09')
self.tdb.addExternalTorrentNoDef(infohash, "test torrent", [("file1", 42)],
['http://localhost/announce'], 1234)
self.assertTrue(self.tdb.getTorrentID(infohash))
def test_add_external_torrent_no_def_more_files(self):
infohash = unhexlify('50865489ac16e2f34ea0cd3043cfd970cc24ec09')
self.tdb.addExternalTorrentNoDef(infohash, "test torrent", [("file1", 42), ("file2", 43)],
[], 1234, extra_info={"seeder": 2, "leecher": 3})
self.assertTrue(self.tdb.getTorrentID(infohash))
def test_add_external_torrent_no_def_invalid(self):
infohash = unhexlify('50865489ac16e2f34ea0cd3043cfd970cc24ec09')
self.tdb.addExternalTorrentNoDef(infohash, "test torrent", [("file1", {}), ("file2", 43)],
[], 1234)
self.assertFalse(self.tdb.getTorrentID(infohash))
def test_add_get_torrent_id(self):
infohash = str2bin('AA8cTG7ZuPsyblbRE7CyxsrKUCg=')
self.assertEqual(self.tdb.addOrGetTorrentID(infohash), 1)
new_infohash = unhexlify('50865489ac16e2f34ea0cd3043cfd970cc24ec09')
self.assertEqual(self.tdb.addOrGetTorrentID(new_infohash), 4859)
def test_add_get_torrent_ids_return(self):
infohash = str2bin('AA8cTG7ZuPsyblbRE7CyxsrKUCg=')
new_infohash = unhexlify('50865489ac16e2f34ea0cd3043cfd970cc24ec09')
tids, inserted = self.tdb.addOrGetTorrentIDSReturn([infohash, new_infohash])
self.assertEqual(tids, [1, 4859])
self.assertEqual(len(inserted), 1)
def test_index_torrent_existing(self):
self.tdb._indexTorrent(1, "test", [])
def test_getCollectedTorrentHashes(self):
res = self.tdb.getNumberCollectedTorrents()
self.assertEqual(res, 4847)
def test_freeSpace(self):
# Manually set the torrent store because register is not called.
self.session.lm.torrent_store = LevelDbStore(self.session.config.get_torrent_store_dir())
old_res = self.tdb.getNumberCollectedTorrents()
self.tdb.freeSpace(20)
res = self.tdb.getNumberCollectedTorrents()
self.session.lm.torrent_store.close()
self.assertEqual(res, old_res-20)
def test_get_search_suggestions(self):
self.assertEqual(self.tdb.getSearchSuggestion(["content", "cont"]), ["content 1"])
def test_get_autocomplete_terms(self):
self.assertEqual(len(self.tdb.getAutoCompleteTerms("content", 100)), 0)
def test_get_recently_randomly_collected_torrents(self):
self.assertEqual(len(self.tdb.getRecentlyCollectedTorrents(limit=10)), 10)
self.assertEqual(len(self.tdb.getRandomlyCollectedTorrents(100000000, limit=10)), 3)
def test_get_recently_checked_torrents(self):
self.assertEqual(len(self.tdb.getRecentlyCheckedTorrents(limit=5)), 5)
def test_select_torrents_to_collect(self):
infohash = str2bin('AA8cTG7ZuPsyblbRE7CyxsrKUCg=')
self.assertEqual(len(self.tdb.select_torrents_to_collect(infohash)), 0)
def test_get_torrents_stats(self):
self.assertEqual(self.tdb.getTorrentsStats(), (4847, 6519179841442, 187195))
def test_get_library_torrents(self):
self.assertEqual(len(self.tdb.getLibraryTorrents(['infohash'])), 12)
def test_search_names_no_sort(self):
"""
Test whether the right amount of torrents are returned when searching for torrents in db
"""
columns = ['T.torrent_id', 'infohash', 'status', 'num_seeders']
self.tdb.channelcast_db = ChannelCastDBHandler(self.session)
self.assertEqual(len(self.tdb.searchNames(['content'], keys=columns, doSort=False)), 4849)
self.assertEqual(len(self.tdb.searchNames(['content', '1'], keys=columns, doSort=False)), 1)
def test_search_names_sort(self):
"""
Test whether the right amount of sorted torrents are returned when searching for torrents in db
"""
columns = ['T.torrent_id', 'infohash', 'status', 'num_seeders']
self.tdb.channelcast_db = ChannelCastDBHandler(self.session)
results = self.tdb.searchNames(['content'], keys=columns)
self.assertEqual(len(results), 4849)
self.assertEqual(results[0][3], 493785)
def test_search_local_torrents(self):
"""
Test the search procedure in the local database when searching for torrents
"""
results = self.tdb.search_in_local_torrents_db('content', ['infohash', 'num_seeders'])
self.assertEqual(len(results), 4849)
self.assertNotEqual(results[0][-1], 0.0) # Relevance score of result should not be zero
results = self.tdb.search_in_local_torrents_db('fdsafasfds', ['infohash'])
self.assertEqual(len(results), 0)
def test_rel_score_remote_torrent(self):
self.tdb.latest_matchinfo_torrent = struct.pack("I" * 12, *([1] * 12)), "torrent"
self.assertNotEqual(self.tdb.relevance_score_remote_torrent("my-torrent.iso"), 0.0)
|
"""
Property reference docs:
- https://docs.microsoft.com/en-us/office/client-developer/outlook/mapi/mapping-canonical-property-names-to-mapi-names#tagged-properties
- https://interoperability.blob.core.windows.net/files/MS-OXPROPS/[MS-OXPROPS].pdf
- https://fossies.org/linux/libpst/xml/MAPI_definitions.pdf
- https://docs.microsoft.com/en-us/office/client-developer/outlook/mapi/mapi-constants
+----------------+----------------+-------------------------------------------------------------------------------+
| Range minimum | Range maximum | Description |
+----------------+----------------+-------------------------------------------------------------------------------+
| 0x0001 | 0x0BFF | Message object envelope property; reserved |
| 0x0C00 | 0x0DFF | Recipient property; reserved |
| 0x0E00 | 0x0FFF | Non-transmittable Message property; reserved |
| 0x1000 | 0x2FFF | Message content property; reserved |
| 0x3000 | 0x33FF | Multi-purpose property that can appear on all or most objects; reserved |
| 0x3400 | 0x35FF | Message store property; reserved |
| 0x3600 | 0x36FF | Folder and address book container property; reserved |
| 0x3700 | 0x38FF | Attachment property; reserved |
| 0x3900 | 0x39FF | Address Book object property; reserved |
| 0x3A00 | 0x3BFF | Mail user object property; reserved |
| 0x3C00 | 0x3CFF | Distribution list property; reserved |
| 0x3D00 | 0x3DFF | Profile section property; reserved |
| 0x3E00 | 0x3EFF | Status object property; reserved |
| 0x4000 | 0x57FF | Transport-defined envelope property |
| 0x5800 | 0x5FFF | Transport-defined recipient property |
| 0x6000 | 0x65FF | User-defined non-transmittable property |
| 0x6600 | 0x67FF | Provider-defined internal non-transmittable property |
| 0x6800 | 0x7BFF | Message class-defined content property |
| 0x7C00 | 0x7FFF | Message class-defined non-transmittable property |
| 0x8000 | 0xFFFF | Reserved for mapping to named properties. The exceptions to this rule are |
| | | some of the address book tagged properties (those with names beginning with |
| | | PIDTagAddressBook). Many are static property IDs but are in this range. |
+----------------+----------------+-------------------------------------------------------------------------------+
""" # noqa: E501
MAPI_ACKNOWLEDGEMENT_MODE = 0x0001
MAPI_ALTERNATE_RECIPIENT_ALLOWED = 0x0002
MAPI_AUTHORIZING_USERS = 0x0003
MAPI_AUTO_FORWARD_COMMENT = 0x0004
MAPI_AUTO_FORWARDED = 0x0005
MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID = 0x0006
MAPI_CONTENT_CORRELATOR = 0x0007
MAPI_CONTENT_IDENTIFIER = 0x0008
MAPI_CONTENT_LENGTH = 0x0009
MAPI_CONTENT_RETURN_REQUESTED = 0x000A
MAPI_CONVERSATION_KEY = 0x000B
MAPI_CONVERSION_EITS = 0x000C
MAPI_CONVERSION_WITH_LOSS_PROHIBITED = 0x000D
MAPI_CONVERTED_EITS = 0x000E
MAPI_DEFERRED_DELIVERY_TIME = 0x000F
MAPI_DELIVER_TIME = 0x0010
MAPI_DISCARD_REASON = 0x0011
MAPI_DISCLOSURE_OF_RECIPIENTS = 0x0012
MAPI_DL_EXPANSION_HISTORY = 0x0013
MAPI_DL_EXPANSION_PROHIBITED = 0x0014
MAPI_EXPIRY_TIME = 0x0015
MAPI_IMPLICIT_CONVERSION_PROHIBITED = 0x0016
MAPI_IMPORTANCE = 0x0017
MAPI_IPM_ID = 0x0018
MAPI_LATEST_DELIVERY_TIME = 0x0019
MAPI_MESSAGE_CLASS = 0x001A
MAPI_MESSAGE_DELIVERY_ID = 0x001B
MAPI_MESSAGE_SECURITY_LABEL = 0x001E
MAPI_OBSOLETED_IPMS = 0x001F
MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME = 0x0020
MAPI_ORIGINAL_EITS = 0x0021
MAPI_ORIGINATOR_CERTIFICATE = 0x0022
MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED = 0x0023
MAPI_ORIGINATOR_RETURN_ADDRESS = 0x0024
MAPI_PARENT_KEY = 0x0025
MAPI_PRIORITY = 0x0026
MAPI_ORIGIN_CHECK = 0x0027
MAPI_PROOF_OF_SUBMISSION_REQUESTED = 0x0028
MAPI_READ_RECEIPT_REQUESTED = 0x0029
MAPI_RECEIPT_TIME = 0x002A
MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED = 0x002B
MAPI_REDIRECTION_HISTORY = 0x002C
MAPI_RELATED_IPMS = 0x002D
MAPI_ORIGINAL_SENSITIVITY = 0x002E
MAPI_LANGUAGES = 0x002F
MAPI_REPLY_TIME = 0x0030
MAPI_REPORT_TAG = 0x0031
MAPI_REPORT_TIME = 0x0032
MAPI_RETURNED_IPM = 0x0033
MAPI_SECURITY = 0x0034
MAPI_INCOMPLETE_COPY = 0x0035
MAPI_SENSITIVITY = 0x0036
MAPI_SUBJECT = 0x0037
MAPI_SUBJECT_IPM = 0x0038
MAPI_CLIENT_SUBMIT_TIME = 0x0039
MAPI_REPORT_NAME = 0x003A
MAPI_SENT_REPRESENTING_SEARCH_KEY = 0x003B
MAPI_X400_CONTENT_TYPE = 0x003C
MAPI_SUBJECT_PREFIX = 0x003D
MAPI_NON_RECEIPT_REASON = 0x003E
MAPI_RECEIVED_BY_ENTRYID = 0x003F
MAPI_RECEIVED_BY_NAME = 0x0040
MAPI_SENT_REPRESENTING_ENTRYID = 0x0041
MAPI_SENT_REPRESENTING_NAME = 0x0042
MAPI_RCVD_REPRESENTING_ENTRYID = 0x0043
MAPI_RCVD_REPRESENTING_NAME = 0x0044
MAPI_REPORT_ENTRYID = 0x0045
MAPI_READ_RECEIPT_ENTRYID = 0x0046
MAPI_MESSAGE_SUBMISSION_ID = 0x0047
MAPI_PROVIDER_SUBMIT_TIME = 0x0048
MAPI_ORIGINAL_SUBJECT = 0x0049
MAPI_DISC_VAL = 0x004A
MAPI_ORIG_MESSAGE_CLASS = 0x004B
MAPI_ORIGINAL_AUTHOR_ENTRYID = 0x004C
MAPI_ORIGINAL_AUTHOR_NAME = 0x004D
MAPI_ORIGINAL_SUBMIT_TIME = 0x004E
MAPI_REPLY_RECIPIENT_ENTRIES = 0x004F
MAPI_REPLY_RECIPIENT_NAMES = 0x0050
MAPI_RECEIVED_BY_SEARCH_KEY = 0x0051
MAPI_RCVD_REPRESENTING_SEARCH_KEY = 0x0052
MAPI_READ_RECEIPT_SEARCH_KEY = 0x0053
MAPI_REPORT_SEARCH_KEY = 0x0054
MAPI_ORIGINAL_DELIVERY_TIME = 0x0055
MAPI_ORIGINAL_AUTHOR_SEARCH_KEY = 0x0056
MAPI_MESSAGE_TO_ME = 0x0057
MAPI_MESSAGE_CC_ME = 0x0058
MAPI_MESSAGE_RECIP_ME = 0x0059
MAPI_ORIGINAL_SENDER_NAME = 0x005A
MAPI_ORIGINAL_SENDER_ENTRYID = 0x005B
MAPI_ORIGINAL_SENDER_SEARCH_KEY = 0x005C
MAPI_ORIGINAL_SENT_REPRESENTING_NAME = 0x005D
MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID = 0x005E
MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY = 0x005F
MAPI_START_DATE = 0x0060
MAPI_END_DATE = 0x0061
MAPI_OWNER_APPT_ID = 0x0062
MAPI_RESPONSE_REQUESTED = 0x0063
MAPI_SENT_REPRESENTING_ADDRTYPE = 0x0064
MAPI_SENT_REPRESENTING_EMAIL_ADDRESS = 0x0065
MAPI_ORIGINAL_SENDER_ADDRTYPE = 0x0066
MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS = 0x0067
MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE = 0x0068
MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS = 0x0069
MAPI_CONVERSATION_TOPIC = 0x0070
MAPI_CONVERSATION_INDEX = 0x0071
MAPI_ORIGINAL_DISPLAY_BCC = 0x0072
MAPI_ORIGINAL_DISPLAY_CC = 0x0073
MAPI_ORIGINAL_DISPLAY_TO = 0x0074
MAPI_RECEIVED_BY_ADDRTYPE = 0x0075
MAPI_RECEIVED_BY_EMAIL_ADDRESS = 0x0076
MAPI_RCVD_REPRESENTING_ADDRTYPE = 0x0077
MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS = 0x0078
MAPI_ORIGINAL_AUTHOR_ADDRTYPE = 0x0079
MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS = 0x007A
MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE = 0x007B
MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS = 0x007C
MAPI_TRANSPORT_MESSAGE_HEADERS = 0x007D
MAPI_DELEGATION = 0x007E
MAPI_TNEF_CORRELATION_KEY = 0x007F
MAPI_CONTENT_INTEGRITY_CHECK = 0x0C00
MAPI_EXPLICIT_CONVERSION = 0x0C01
MAPI_IPM_RETURN_REQUESTED = 0x0C02
MAPI_MESSAGE_TOKEN = 0x0C03
MAPI_NDR_REASON_CODE = 0x0C04
MAPI_NDR_DIAG_CODE = 0x0C05
MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED = 0x0C06
MAPI_DELIVERY_POINT = 0x0C07
MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED = 0x0C08
MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT = 0x0C09
MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY = 0x0C0A
MAPI_PHYSICAL_DELIVERY_MODE = 0x0C0B
MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST = 0x0C0C
MAPI_PHYSICAL_FORWARDING_ADDRESS = 0x0C0D
MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED = 0x0C0E
MAPI_PHYSICAL_FORWARDING_PROHIBITED = 0x0C0F
MAPI_PHYSICAL_RENDITION_ATTRIBUTES = 0x0C10
MAPI_PROOF_OF_DELIVERY = 0x0C11
MAPI_PROOF_OF_DELIVERY_REQUESTED = 0x0C12
MAPI_RECIPIENT_CERTIFICATE = 0x0C13
MAPI_RECIPIENT_NUMBER_FOR_ADVICE = 0x0C14
MAPI_RECIPIENT_TYPE = 0x0C15
MAPI_REGISTERED_MAIL_TYPE = 0x0C16
MAPI_REPLY_REQUESTED = 0x0C17
MAPI_REQUESTED_DELIVERY_METHOD = 0x0C18
MAPI_SENDER_ENTRYID = 0x0C19
MAPI_SENDER_NAME = 0x0C1A
MAPI_SUPPLEMENTARY_INFO = 0x0C1B
MAPI_TYPE_OF_MTS_USER = 0x0C1C
MAPI_SENDER_SEARCH_KEY = 0x0C1D
MAPI_SENDER_ADDRTYPE = 0x0C1E
MAPI_SENDER_EMAIL_ADDRESS = 0x0C1F
MAPI_CURRENT_VERSION = 0x0E00
MAPI_DELETE_AFTER_SUBMIT = 0x0E01
MAPI_DISPLAY_BCC = 0x0E02
MAPI_DISPLAY_CC = 0x0E03
MAPI_DISPLAY_TO = 0x0E04
MAPI_PARENT_DISPLAY = 0x0E05
MAPI_MESSAGE_DELIVERY_TIME = 0x0E06
MAPI_MESSAGE_FLAGS = 0x0E07
MAPI_MESSAGE_SIZE = 0x0E08
MAPI_PARENT_ENTRYID = 0x0E09
MAPI_SENTMAIL_ENTRYID = 0x0E0A
MAPI_CORRELATE = 0x0E0C
MAPI_CORRELATE_MTSID = 0x0E0D
MAPI_DISCRETE_VALUES = 0x0E0E
MAPI_RESPONSIBILITY = 0x0E0F
MAPI_SPOOLER_STATUS = 0x0E10
MAPI_TRANSPORT_STATUS = 0x0E11
MAPI_MESSAGE_RECIPIENTS = 0x0E12
MAPI_MESSAGE_ATTACHMENTS = 0x0E13
MAPI_SUBMIT_FLAGS = 0x0E14
MAPI_RECIPIENT_STATUS = 0x0E15
MAPI_TRANSPORT_KEY = 0x0E16
MAPI_MSG_STATUS = 0x0E17
MAPI_MESSAGE_DOWNLOAD_TIME = 0x0E18
MAPI_CREATION_VERSION = 0x0E19
MAPI_MODIFY_VERSION = 0x0E1A
MAPI_HASATTACH = 0x0E1B
MAPI_BODY_CRC = 0x0E1C
MAPI_NORMALIZED_SUBJECT = 0x0E1D
MAPI_RTF_IN_SYNC = 0x0E1F
MAPI_ATTACH_SIZE = 0x0E20
MAPI_ATTACH_NUM = 0x0E21
MAPI_PREPROCESS = 0x0E22
MAPI_ORIGINATING_MTA_CERTIFICATE = 0x0E25
MAPI_PROOF_OF_SUBMISSION = 0x0E26
MAPI_PRIMARY_SEND_ACCOUNT = 0x0E28
MAPI_NEXT_SEND_ACCT = 0x0E29
MAPI_ACCESS = 0x0FF4
MAPI_ROW_TYPE = 0x0FF5
MAPI_INSTANCE_KEY = 0x0FF6
MAPI_ACCESS_LEVEL = 0x0FF7
MAPI_MAPPING_SIGNATURE = 0x0FF8
MAPI_RECORD_KEY = 0x0FF9
MAPI_STORE_RECORD_KEY = 0x0FFA
MAPI_STORE_ENTRYID = 0x0FFB
MAPI_MINI_ICON = 0x0FFC
MAPI_ICON = 0x0FFD
MAPI_OBJECT_TYPE = 0x0FFE
MAPI_ENTRYID = 0x0FFF
MAPI_BODY = 0x1000
MAPI_REPORT_TEXT = 0x1001
MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY = 0x1002
MAPI_REPORTING_DL_NAME = 0x1003
MAPI_REPORTING_MTA_CERTIFICATE = 0x1004
MAPI_RTF_SYNC_BODY_CRC = 0x1006
MAPI_RTF_SYNC_BODY_COUNT = 0x1007
MAPI_RTF_SYNC_BODY_TAG = 0x1008
MAPI_RTF_COMPRESSED = 0x1009
MAPI_RTF_SYNC_PREFIX_COUNT = 0x1010
MAPI_RTF_SYNC_TRAILING_COUNT = 0x1011
MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID = 0x1012
MAPI_BODY_HTML = 0x1013
MAPI_NATIVE_BODY = 0x1016
MAPI_SMTP_MESSAGE_ID = 0x1035
MAPI_INTERNET_REFERENCES = 0x1039
MAPI_IN_REPLY_TO_ID = 0x1042
MAPI_INTERNET_RETURN_PATH = 0x1046
MAPI_ICON_INDEX = 0x1080
MAPI_LAST_VERB_EXECUTED = 0x1081
MAPI_LAST_VERB_EXECUTION_TIME = 0x1082
MAPI_URL_COMP_NAME = 0x10F3
MAPI_ATTRIBUTE_HIDDEN = 0x10F4
MAPI_ATTRIBUTE_SYSTEM = 0x10F5
MAPI_ATTRIBUTE_READ_ONLY = 0x10F6
MAPI_ROWID = 0x3000
MAPI_DISPLAY_NAME = 0x3001
MAPI_ADDRTYPE = 0x3002
MAPI_EMAIL_ADDRESS = 0x3003
MAPI_COMMENT = 0x3004
MAPI_DEPTH = 0x3005
MAPI_PROVIDER_DISPLAY = 0x3006
MAPI_CREATION_TIME = 0x3007
MAPI_LAST_MODIFICATION_TIME = 0x3008
MAPI_RESOURCE_FLAGS = 0x3009
MAPI_PROVIDER_DLL_NAME = 0x300A
MAPI_SEARCH_KEY = 0x300B
MAPI_PROVIDER_UID = 0x300C
MAPI_PROVIDER_ORDINAL = 0x300D
MAPI_TARGET_ENTRY_ID = 0x3010
MAPI_CONVERSATION_ID = 0x3013
MAPI_CONVERSATION_INDEX_TRACKING = 0x3016
MAPI_FORM_VERSION = 0x3301
MAPI_FORM_CLSID = 0x3302
MAPI_FORM_CONTACT_NAME = 0x3303
MAPI_FORM_CATEGORY = 0x3304
MAPI_FORM_CATEGORY_SUB = 0x3305
MAPI_FORM_HOST_MAP = 0x3306
MAPI_FORM_HIDDEN = 0x3307
MAPI_FORM_DESIGNER_NAME = 0x3308
MAPI_FORM_DESIGNER_GUID = 0x3309
MAPI_FORM_MESSAGE_BEHAVIOR = 0x330A
MAPI_DEFAULT_STORE = 0x3400
MAPI_STORE_SUPPORT_MASK = 0x340D
MAPI_STORE_STATE = 0x340E
MAPI_STORE_UNICODE_MASK = 0x340F
MAPI_IPM_SUBTREE_SEARCH_KEY = 0x3410
MAPI_IPM_OUTBOX_SEARCH_KEY = 0x3411
MAPI_IPM_WASTEBASKET_SEARCH_KEY = 0x3412
MAPI_IPM_SENTMAIL_SEARCH_KEY = 0x3413
MAPI_MDB_PROVIDER = 0x3414
MAPI_RECEIVE_FOLDER_SETTINGS = 0x3415
MAPI_VALID_FOLDER_MASK = 0x35DF
MAPI_IPM_SUBTREE_ENTRYID = 0x35E0
MAPI_IPM_OUTBOX_ENTRYID = 0x35E2
MAPI_IPM_WASTEBASKET_ENTRYID = 0x35E3
MAPI_IPM_SENTMAIL_ENTRYID = 0x35E4
MAPI_VIEWS_ENTRYID = 0x35E5
MAPI_COMMON_VIEWS_ENTRYID = 0x35E6
MAPI_FINDER_ENTRYID = 0x35E7
MAPI_CONTAINER_FLAGS = 0x3600
MAPI_FOLDER_TYPE = 0x3601
MAPI_CONTENT_COUNT = 0x3602
MAPI_CONTENT_UNREAD = 0x3603
MAPI_CREATE_TEMPLATES = 0x3604
MAPI_DETAILS_TABLE = 0x3605
MAPI_SEARCH = 0x3607
MAPI_SELECTABLE = 0x3609
MAPI_SUBFOLDERS = 0x360A
MAPI_STATUS = 0x360B
MAPI_ANR = 0x360C
MAPI_CONTENTS_SORT_ORDER = 0x360D
MAPI_CONTAINER_HIERARCHY = 0x360E
MAPI_CONTAINER_CONTENTS = 0x360F
MAPI_FOLDER_ASSOCIATED_CONTENTS = 0x3610
MAPI_DEF_CREATE_DL = 0x3611
MAPI_DEF_CREATE_MAILUSER = 0x3612
MAPI_CONTAINER_CLASS = 0x3613
MAPI_CONTAINER_MODIFY_VERSION = 0x3614
MAPI_AB_PROVIDER_ID = 0x3615
MAPI_DEFAULT_VIEW_ENTRYID = 0x3616
MAPI_ASSOC_CONTENT_COUNT = 0x3617
MAPI_ATTACHMENT_X400_PARAMETERS = 0x3700
MAPI_ATTACH_DATA_OBJ = 0x3701
MAPI_ATTACH_ENCODING = 0x3702
MAPI_ATTACH_EXTENSION = 0x3703
MAPI_ATTACH_FILENAME = 0x3704
MAPI_ATTACH_METHOD = 0x3705
MAPI_ATTACH_LONG_FILENAME = 0x3707
MAPI_ATTACH_PATHNAME = 0x3708
MAPI_ATTACH_RENDERING = 0x3709
MAPI_ATTACH_TAG = 0x370A
MAPI_RENDERING_POSITION = 0x370B
MAPI_ATTACH_TRANSPORT_NAME = 0x370C
MAPI_ATTACH_LONG_PATHNAME = 0x370D
MAPI_ATTACH_MIME_TAG = 0x370E
MAPI_ATTACH_ADDITIONAL_INFO = 0x370F
MAPI_ATTACH_MIME_SEQUENCE = 0x3710
MAPI_ATTACH_CONTENT_ID = 0x3712
MAPI_ATTACH_CONTENT_LOCATION = 0x3713
MAPI_ATTACH_FLAGS = 0x3714
MAPI_DISPLAY_TYPE = 0x3900
MAPI_TEMPLATEID = 0x3902
MAPI_PRIMARY_CAPABILITY = 0x3904
MAPI_SMTP_ADDRESS = 0x39FE
MAPI_7BIT_DISPLAY_NAME = 0x39FF
MAPI_ACCOUNT = 0x3A00
MAPI_ALTERNATE_RECIPIENT = 0x3A01
MAPI_CALLBACK_TELEPHONE_NUMBER = 0x3A02
MAPI_CONVERSION_PROHIBITED = 0x3A03
MAPI_DISCLOSE_RECIPIENTS = 0x3A04
MAPI_GENERATION = 0x3A05
MAPI_GIVEN_NAME = 0x3A06
MAPI_GOVERNMENT_ID_NUMBER = 0x3A07
MAPI_BUSINESS_TELEPHONE_NUMBER = 0x3A08
MAPI_HOME_TELEPHONE_NUMBER = 0x3A09
MAPI_INITIALS = 0x3A0A
MAPI_KEYWORD = 0x3A0B
MAPI_LANGUAGE = 0x3A0C
MAPI_LOCATION = 0x3A0D
MAPI_MAIL_PERMISSION = 0x3A0E
MAPI_MHS_COMMON_NAME = 0x3A0F
MAPI_ORGANIZATIONAL_ID_NUMBER = 0x3A10
MAPI_SURNAME = 0x3A11
MAPI_ORIGINAL_ENTRYID = 0x3A12
MAPI_ORIGINAL_DISPLAY_NAME = 0x3A13
MAPI_ORIGINAL_SEARCH_KEY = 0x3A14
MAPI_POSTAL_ADDRESS = 0x3A15
MAPI_COMPANY_NAME = 0x3A16
MAPI_TITLE = 0x3A17
MAPI_DEPARTMENT_NAME = 0x3A18
MAPI_OFFICE_LOCATION = 0x3A19
MAPI_PRIMARY_TELEPHONE_NUMBER = 0x3A1A
MAPI_BUSINESS2_TELEPHONE_NUMBER = 0x3A1B
MAPI_MOBILE_TELEPHONE_NUMBER = 0x3A1C
MAPI_RADIO_TELEPHONE_NUMBER = 0x3A1D
MAPI_CAR_TELEPHONE_NUMBER = 0x3A1E
MAPI_OTHER_TELEPHONE_NUMBER = 0x3A1F
MAPI_TRANSMITABLE_DISPLAY_NAME = 0x3A20
MAPI_PAGER_TELEPHONE_NUMBER = 0x3A21
MAPI_USER_CERTIFICATE = 0x3A22
MAPI_PRIMARY_FAX_NUMBER = 0x3A23
MAPI_BUSINESS_FAX_NUMBER = 0x3A24
MAPI_HOME_FAX_NUMBER = 0x3A25
MAPI_COUNTRY = 0x3A26
MAPI_LOCALITY = 0x3A27
MAPI_STATE_OR_PROVINCE = 0x3A28
MAPI_STREET_ADDRESS = 0x3A29
MAPI_POSTAL_CODE = 0x3A2A
MAPI_POST_OFFICE_BOX = 0x3A2B
MAPI_TELEX_NUMBER = 0x3A2C
MAPI_ISDN_NUMBER = 0x3A2D
MAPI_ASSISTANT_TELEPHONE_NUMBER = 0x3A2E
MAPI_HOME2_TELEPHONE_NUMBER = 0x3A2F
MAPI_ASSISTANT = 0x3A30
MAPI_SEND_RICH_INFO = 0x3A40
MAPI_WEDDING_ANNIVERSARY = 0x3A41
MAPI_BIRTHDAY = 0x3A42
MAPI_HOBBIES = 0x3A43
MAPI_MIDDLE_NAME = 0x3A44
MAPI_DISPLAY_NAME_PREFIX = 0x3A45
MAPI_PROFESSION = 0x3A46
MAPI_PREFERRED_BY_NAME = 0x3A47
MAPI_SPOUSE_NAME = 0x3A48
MAPI_COMPUTER_NETWORK_NAME = 0x3A49
MAPI_CUSTOMER_ID = 0x3A4A
MAPI_TTYTDD_PHONE_NUMBER = 0x3A4B
MAPI_FTP_SITE = 0x3A4C
MAPI_GENDER = 0x3A4D
MAPI_MANAGER_NAME = 0x3A4E
MAPI_NICKNAME = 0x3A4F
MAPI_PERSONAL_HOME_PAGE = 0x3A50
MAPI_BUSINESS_HOME_PAGE = 0x3A51
MAPI_CONTACT_VERSION = 0x3A52
MAPI_CONTACT_ENTRYIDS = 0x3A53
MAPI_CONTACT_ADDRTYPES = 0x3A54
MAPI_CONTACT_DEFAULT_ADDRESS_INDEX = 0x3A55
MAPI_CONTACT_EMAIL_ADDRESSES = 0x3A56
MAPI_COMPANY_MAIN_PHONE_NUMBER = 0x3A57
MAPI_CHILDRENS_NAMES = 0x3A58
MAPI_HOME_ADDRESS_CITY = 0x3A59
MAPI_HOME_ADDRESS_COUNTRY = 0x3A5A
MAPI_HOME_ADDRESS_POSTAL_CODE = 0x3A5B
MAPI_HOME_ADDRESS_STATE_OR_PROVINCE = 0x3A5C
MAPI_HOME_ADDRESS_STREET = 0x3A5D
MAPI_HOME_ADDRESS_POST_OFFICE_BOX = 0x3A5E
MAPI_OTHER_ADDRESS_CITY = 0x3A5F
MAPI_OTHER_ADDRESS_COUNTRY = 0x3A60
MAPI_OTHER_ADDRESS_POSTAL_CODE = 0x3A61
MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE = 0x3A62
MAPI_OTHER_ADDRESS_STREET = 0x3A63
MAPI_OTHER_ADDRESS_POST_OFFICE_BOX = 0x3A64
MAPI_SEND_INTERNET_ENCODING = 0x3A71
MAPI_STORE_PROVIDERS = 0x3D00
MAPI_AB_PROVIDERS = 0x3D01
MAPI_TRANSPORT_PROVIDERS = 0x3D02
MAPI_DEFAULT_PROFILE = 0x3D04
MAPI_AB_SEARCH_PATH = 0x3D05
MAPI_AB_DEFAULT_DIR = 0x3D06
MAPI_AB_DEFAULT_PAB = 0x3D07
MAPI_FILTERING_HOOKS = 0x3D08
MAPI_SERVICE_NAME = 0x3D09
MAPI_SERVICE_DLL_NAME = 0x3D0A
MAPI_SERVICE_ENTRY_NAME = 0x3D0B
MAPI_SERVICE_UID = 0x3D0C
MAPI_SERVICE_EXTRA_UIDS = 0x3D0D
MAPI_SERVICES = 0x3D0E
MAPI_SERVICE_SUPPORT_FILES = 0x3D0F
MAPI_SERVICE_DELETE_FILES = 0x3D10
MAPI_AB_SEARCH_PATH_UPDATE = 0x3D11
MAPI_PROFILE_NAME = 0x3D12
MAPI_IDENTITY_DISPLAY = 0x3E00
MAPI_IDENTITY_ENTRYID = 0x3E01
MAPI_RESOURCE_METHODS = 0x3E02
MAPI_RESOURCE_TYPE = 0x3E03
MAPI_STATUS_CODE = 0x3E04
MAPI_IDENTITY_SEARCH_KEY = 0x3E05
MAPI_OWN_STORE_ENTRYID = 0x3E06
MAPI_RESOURCE_PATH = 0x3E07
MAPI_STATUS_STRING = 0x3E08
MAPI_X400_DEFERRED_DELIVERY_CANCEL = 0x3E09
MAPI_HEADER_FOLDER_ENTRYID = 0x3E0A
MAPI_REMOTE_PROGRESS = 0x3E0B
MAPI_REMOTE_PROGRESS_TEXT = 0x3E0C
MAPI_REMOTE_VALIDATE_OK = 0x3E0D
MAPI_CONTROL_FLAGS = 0x3F00
MAPI_CONTROL_STRUCTURE = 0x3F01
MAPI_CONTROL_TYPE = 0x3F02
MAPI_DELTAX = 0x3F03
MAPI_DELTAY = 0x3F04
MAPI_XPOS = 0x3F05
MAPI_YPOS = 0x3F06
MAPI_CONTROL_ID = 0x3F07
MAPI_INITIAL_DETAILS_PANE = 0x3F08
MAPI_UNCOMPRESSED_BODY = 0x3FD9
MAPI_INTERNET_CODEPAGE = 0x3FDE
MAPI_AUTO_RESPONSE_SUPPRESS = 0x3FDF
MAPI_MESSAGE_LOCALE_ID = 0x3FF1
MAPI_RULE_TRIGGER_HISTORY = 0x3FF2
MAPI_MOVE_TO_STORE_ENTRYID = 0x3FF3
MAPI_MOVE_TO_FOLDER_ENTRYID = 0x3FF4
MAPI_STORAGE_QUOTA_LIMIT = 0x3FF5
MAPI_EXCESS_STORAGE_USED = 0x3FF6
MAPI_SVR_GENERATING_QUOTA_MSG = 0x3FF7
MAPI_CREATOR_NAME = 0x3FF8
MAPI_CREATOR_ENTRY_ID = 0x3FF9
MAPI_LAST_MODIFIER_NAME = 0x3FFA
MAPI_LAST_MODIFIER_ENTRY_ID = 0x3FFB
MAPI_REPLY_RECIPIENT_SMTP_PROXIES = 0x3FFC
MAPI_MESSAGE_CODEPAGE = 0x3FFD
MAPI_EXTENDED_ACL_DATA = 0x3FFE
MAPI_SENDER_FLAGS = 0x4019
MAPI_SENT_REPRESENTING_FLAGS = 0x401A
MAPI_RECEIVED_BY_FLAGS = 0x401B
MAPI_RECEIVED_REPRESENTING_FLAGS = 0x401C
MAPI_CREATOR_ADDRESS_TYPE = 0x4022
MAPI_CREATOR_EMAIL_ADDRESS = 0x4023
MAPI_SENDER_SIMPLE_DISPLAY_NAME = 0x4030
MAPI_SENT_REPRESENTING_SIMPLE_DISPLAY_NAME = 0x4031
MAPI_RECEIVED_REPRESENTING_SIMPLE_DISPLAY_NAME = 0x4035
MAPI_CREATOR_SIMPLE_DISP_NAME = 0x4038
MAPI_LAST_MODIFIER_SIMPLE_DISPLAY_NAME = 0x4039
MAPI_CONTENT_FILTER_SPAM_CONFIDENCE_LEVEL = 0x4076
MAPI_INTERNET_MAIL_OVERRIDE_FORMAT = 0x5902
MAPI_MESSAGE_EDITOR_FORMAT = 0x5909
MAPI_SENDER_SMTP_ADDRESS = 0x5D01
MAPI_SENT_REPRESENTING_SMTP_ADDRESS = 0x5D02
MAPI_READ_RECEIPT_SMTP_ADDRESS = 0x5D05
MAPI_RECEIVED_BY_SMTP_ADDRESS = 0x5D07
MAPI_RECEIVED_REPRESENTING_SMTP_ADDRESS = 0x5D08
MAPI_SENDING_SMTP_ADDRESS = 0x5D09
MAPI_SIP_ADDRESS = 0x5FE5
MAPI_RECIPIENT_DISPLAY_NAME = 0x5FF6
MAPI_RECIPIENT_ENTRYID = 0x5FF7
MAPI_RECIPIENT_FLAGS = 0x5FFD
MAPI_RECIPIENT_TRACKSTATUS = 0x5FFF
MAPI_CHANGE_KEY = 0x65E2
MAPI_PREDECESSOR_CHANGE_LIST = 0x65E3
MAPI_ID_SECURE_MIN = 0x67F0
MAPI_ID_SECURE_MAX = 0x67FF
MAPI_VOICE_MESSAGE_DURATION = 0x6801
MAPI_SENDER_TELEPHONE_NUMBER = 0x6802
MAPI_VOICE_MESSAGE_SENDER_NAME = 0x6803
MAPI_FAX_NUMBER_OF_PAGES = 0x6804
MAPI_VOICE_MESSAGE_ATTACHMENT_ORDER = 0x6805
MAPI_CALL_ID = 0x6806
MAPI_ATTACHMENT_LINK_ID = 0x7FFA
MAPI_EXCEPTION_START_TIME = 0x7FFB
MAPI_EXCEPTION_END_TIME = 0x7FFC
MAPI_ATTACHMENT_FLAGS = 0x7FFD
MAPI_ATTACHMENT_HIDDEN = 0x7FFE
MAPI_ATTACHMENT_CONTACT_PHOTO = 0x7FFF
MAPI_FILE_UNDER = 0x8005
MAPI_FILE_UNDER_ID = 0x8006
MAPI_CONTACT_ITEM_DATA = 0x8007
MAPI_REFERRED_BY = 0x800E
MAPI_DEPARTMENT = 0x8010
MAPI_HAS_PICTURE = 0x8015
MAPI_HOME_ADDRESS = 0x801A
MAPI_WORK_ADDRESS = 0x801B
MAPI_OTHER_ADDRESS = 0x801C
MAPI_POSTAL_ADDRESS_ID = 0x8022
MAPI_CONTACT_CHARACTER_SET = 0x8023
MAPI_AUTO_LOG = 0x8025
MAPI_FILE_UNDER_LIST = 0x8026
MAPI_EMAIL_LIST = 0x8027
MAPI_ADDRESS_BOOK_PROVIDER_EMAIL_LIST = 0x8028
MAPI_ADDRESS_BOOK_PROVIDER_ARRAY_TYPE = 0x8029
MAPI_HTML = 0x802B
MAPI_YOMI_FIRST_NAME = 0x802C
MAPI_YOMI_LAST_NAME = 0x802D
MAPI_YOMI_COMPANY_NAME = 0x802E
MAPI_BUSINESS_CARD_DISPLAY_DEFINITION = 0x8040
MAPI_BUSINESS_CARD_CARD_PICTURE = 0x8041
MAPI_WORK_ADDRESS_STREET = 0x8045
MAPI_WORK_ADDRESS_CITY = 0x8046
MAPI_WORK_ADDRESS_STATE = 0x8047
MAPI_WORK_ADDRESS_POSTAL_CODE = 0x8048
MAPI_WORK_ADDRESS_COUNTRY = 0x8049
MAPI_WORK_ADDRESS_POST_OFFICE_BOX = 0x804A
MAPI_DISTRIBUTION_LIST_CHECKSUM = 0x804C
MAPI_BIRTHDAY_EVENT_ENTRY_ID = 0x804D
MAPI_ANNIVERSARY_EVENT_ENTRY_ID = 0x804E
MAPI_CONTACT_USER_FIELD1 = 0x804F
MAPI_CONTACT_USER_FIELD2 = 0x8050
MAPI_CONTACT_USER_FIELD3 = 0x8051
MAPI_CONTACT_USER_FIELD4 = 0x8052
MAPI_DISTRIBUTION_LIST_NAME = 0x8053
MAPI_DISTRIBUTION_LIST_ONE_OFF_MEMBERS = 0x8054
MAPI_DISTRIBUTION_LIST_MEMBERS = 0x8055
MAPI_INSTANT_MESSAGING_ADDRESS = 0x8062
MAPI_DISTRIBUTION_LIST_STREAM = 0x8064
MAPI_EMAIL_DISPLAY_NAME = 0x8080
MAPI_EMAIL_ADDR_TYPE = 0x8082
MAPI_EMAIL_EMAIL_ADDRESS = 0x8083
MAPI_EMAIL_ORIGINAL_DISPLAY_NAME = 0x8084
MAPI_EMAIL1ORIGINAL_ENTRY_ID = 0x8085
MAPI_EMAIL1RICH_TEXT_FORMAT = 0x8086
MAPI_EMAIL1EMAIL_TYPE = 0x8087
MAPI_EMAIL2DISPLAY_NAME = 0x8090
MAPI_EMAIL2ENTRY_ID = 0x8091
MAPI_EMAIL2ADDR_TYPE = 0x8092
MAPI_EMAIL2EMAIL_ADDRESS = 0x8093
MAPI_EMAIL2ORIGINAL_DISPLAY_NAME = 0x8094
MAPI_EMAIL2ORIGINAL_ENTRY_ID = 0x8095
MAPI_EMAIL2RICH_TEXT_FORMAT = 0x8096
MAPI_EMAIL3DISPLAY_NAME = 0x80A0
MAPI_EMAIL3ENTRY_ID = 0x80A1
MAPI_EMAIL3ADDR_TYPE = 0x80A2
MAPI_EMAIL3EMAIL_ADDRESS = 0x80A3
MAPI_EMAIL3ORIGINAL_DISPLAY_NAME = 0x80A4
MAPI_EMAIL3ORIGINAL_ENTRY_ID = 0x80A5
MAPI_EMAIL3RICH_TEXT_FORMAT = 0x80A6
MAPI_FAX1ADDRESS_TYPE = 0x80B2
MAPI_FAX1EMAIL_ADDRESS = 0x80B3
MAPI_FAX1ORIGINAL_DISPLAY_NAME = 0x80B4
MAPI_FAX1ORIGINAL_ENTRY_ID = 0x80B5
MAPI_FAX2ADDRESS_TYPE = 0x80C2
MAPI_FAX2EMAIL_ADDRESS = 0x80C3
MAPI_FAX2ORIGINAL_DISPLAY_NAME = 0x80C4
MAPI_FAX2ORIGINAL_ENTRY_ID = 0x80C5
MAPI_FAX3ADDRESS_TYPE = 0x80D2
MAPI_FAX3EMAIL_ADDRESS = 0x80D3
MAPI_FAX3ORIGINAL_DISPLAY_NAME = 0x80D4
MAPI_FAX3ORIGINAL_ENTRY_ID = 0x80D5
MAPI_FREE_BUSY_LOCATION = 0x80D8
MAPI_HOME_ADDRESS_COUNTRY_CODE = 0x80DA
MAPI_WORK_ADDRESS_COUNTRY_CODE = 0x80DB
MAPI_OTHER_ADDRESS_COUNTRY_CODE = 0x80DC
MAPI_ADDRESS_COUNTRY_CODE = 0x80DD
MAPI_BIRTHDAY_LOCAL = 0x80DE
MAPI_WEDDING_ANNIVERSARY_LOCAL = 0x80DF
MAPI_TASK_STATUS = 0x8101
MAPI_TASK_START_DATE = 0x8104
MAPI_TASK_DUE_DATE = 0x8105
MAPI_TASK_ACTUAL_EFFORT = 0x8110
MAPI_TASK_ESTIMATED_EFFORT = 0x8111
MAPI_TASK_FRECUR = 0x8126
MAPI_SEND_MEETING_AS_ICAL = 0x8200
MAPI_APPOINTMENT_SEQUENCE = 0x8201
MAPI_APPOINTMENT_SEQUENCE_TIME = 0x8202
MAPI_APPOINTMENT_LAST_SEQUENCE = 0x8203
MAPI_CHANGE_HIGHLIGHT = 0x8204
MAPI_BUSY_STATUS = 0x8205
MAPI_FEXCEPTIONAL_BODY = 0x8206
MAPI_APPOINTMENT_AUXILIARY_FLAGS = 0x8207
MAPI_OUTLOOK_LOCATION = 0x8208
MAPI_MEETING_WORKSPACE_URL = 0x8209
MAPI_FORWARD_INSTANCE = 0x820A
MAPI_LINKED_TASK_ITEMS = 0x820C
MAPI_APPT_START_WHOLE = 0x820D
MAPI_APPT_END_WHOLE = 0x820E
MAPI_APPOINTMENT_START_TIME = 0x820F
MAPI_APPOINTMENT_END_TIME = 0x8210
MAPI_APPOINTMENT_END_DATE = 0x8211
MAPI_APPOINTMENT_START_DATE = 0x8212
MAPI_APPT_DURATION = 0x8213
MAPI_APPOINTMENT_COLOR = 0x8214
MAPI_APPOINTMENT_SUB_TYPE = 0x8215
MAPI_APPOINTMENT_RECUR = 0x8216
MAPI_APPOINTMENT_STATE_FLAGS = 0x8217
MAPI_RESPONSE_STATUS = 0x8218
MAPI_APPOINTMENT_REPLY_TIME = 0x8220
MAPI_RECURRING = 0x8223
MAPI_INTENDED_BUSY_STATUS = 0x8224
MAPI_APPOINTMENT_UPDATE_TIME = 0x8226
MAPI_EXCEPTION_REPLACE_TIME = 0x8228
MAPI_OWNER_NAME = 0x822E
MAPI_APPOINTMENT_REPLY_NAME = 0x8230
MAPI_RECURRENCE_TYPE = 0x8231
MAPI_RECURRENCE_PATTERN = 0x8232
MAPI_TIME_ZONE_STRUCT = 0x8233
MAPI_TIME_ZONE_DESCRIPTION = 0x8234
MAPI_CLIP_START = 0x8235
MAPI_CLIP_END = 0x8236
MAPI_ORIGINAL_STORE_ENTRY_ID = 0x8237
MAPI_ALL_ATTENDEES_STRING = 0x8238
MAPI_AUTO_FILL_LOCATION = 0x823A
MAPI_TO_ATTENDEES_STRING = 0x823B
MAPI_CCATTENDEES_STRING = 0x823C
MAPI_CONF_CHECK = 0x8240
MAPI_CONFERENCING_TYPE = 0x8241
MAPI_DIRECTORY = 0x8242
MAPI_ORGANIZER_ALIAS = 0x8243
MAPI_AUTO_START_CHECK = 0x8244
MAPI_AUTO_START_WHEN = 0x8245
MAPI_ALLOW_EXTERNAL_CHECK = 0x8246
MAPI_COLLABORATE_DOC = 0x8247
MAPI_NET_SHOW_URL = 0x8248
MAPI_ONLINE_PASSWORD = 0x8249
MAPI_APPOINTMENT_PROPOSED_DURATION = 0x8256
MAPI_APPT_COUNTER_PROPOSAL = 0x8257
MAPI_APPOINTMENT_PROPOSAL_NUMBER = 0x8259
MAPI_APPOINTMENT_NOT_ALLOW_PROPOSE = 0x825A
MAPI_APPT_TZDEF_START_DISPLAY = 0x825E
MAPI_APPT_TZDEF_END_DISPLAY = 0x825F
MAPI_APPT_TZDEF_RECUR = 0x8260
MAPI_REMINDER_MINUTES_BEFORE_START = 0x8501
MAPI_REMINDER_TIME = 0x8502
MAPI_REMINDER_SET = 0x8503
MAPI_PRIVATE = 0x8506
MAPI_AGING_DONT_AGE_ME = 0x850E
MAPI_FORM_STORAGE = 0x850F
MAPI_SIDE_EFFECTS = 0x8510
MAPI_REMOTE_STATUS = 0x8511
MAPI_PAGE_DIR_STREAM = 0x8513
MAPI_SMART_NO_ATTACH = 0x8514
MAPI_COMMON_START = 0x8516
MAPI_COMMON_END = 0x8517
MAPI_TASK_MODE = 0x8518
MAPI_FORM_PROP_STREAM = 0x851B
MAPI_REQUEST = 0x8530
MAPI_NON_SENDABLE_TO = 0x8536
MAPI_NON_SENDABLE_CC = 0x8537
MAPI_NON_SENDABLE_BCC = 0x8538
MAPI_COMPANIES = 0x8539
MAPI_CONTACTS = 0x853A
MAPI_PROP_DEF_STREAM = 0x8540
MAPI_SCRIPT_STREAM = 0x8541
MAPI_CUSTOM_FLAG = 0x8542
MAPI_OUTLOOK_CURRENT_VERSION = 0x8552
MAPI_CURRENT_VERSION_NAME = 0x8554
MAPI_REMINDER_NEXT_TIME = 0x8560
MAPI_HEADER_ITEM = 0x8578
MAPI_USE_TNEF = 0x8582
MAPI_TO_DO_TITLE = 0x85A4
MAPI_VALID_FLAG_STRING_PROOF = 0x85BF
MAPI_LOG_TYPE = 0x8700
MAPI_LOG_START = 0x8706
MAPI_LOG_DURATION = 0x8707
MAPI_LOG_END = 0x8708
CODE_TO_NAME = {
MAPI_ACKNOWLEDGEMENT_MODE: "MAPI_ACKNOWLEDGEMENT_MODE",
MAPI_ALTERNATE_RECIPIENT_ALLOWED: "MAPI_ALTERNATE_RECIPIENT_ALLOWED",
MAPI_AUTHORIZING_USERS: "MAPI_AUTHORIZING_USERS",
MAPI_AUTO_FORWARD_COMMENT: "MAPI_AUTO_FORWARD_COMMENT",
MAPI_AUTO_FORWARDED: "MAPI_AUTO_FORWARDED",
MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID: "MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID",
MAPI_CONTENT_CORRELATOR: "MAPI_CONTENT_CORRELATOR",
MAPI_CONTENT_IDENTIFIER: "MAPI_CONTENT_IDENTIFIER",
MAPI_CONTENT_LENGTH: "MAPI_CONTENT_LENGTH",
MAPI_CONTENT_RETURN_REQUESTED: "MAPI_CONTENT_RETURN_REQUESTED",
MAPI_CONVERSATION_KEY: "MAPI_CONVERSATION_KEY",
MAPI_CONVERSION_EITS: "MAPI_CONVERSION_EITS",
MAPI_CONVERSION_WITH_LOSS_PROHIBITED: "MAPI_CONVERSION_WITH_LOSS_PROHIBITED",
MAPI_CONVERTED_EITS: "MAPI_CONVERTED_EITS",
MAPI_DEFERRED_DELIVERY_TIME: "MAPI_DEFERRED_DELIVERY_TIME",
MAPI_DELIVER_TIME: "MAPI_DELIVER_TIME",
MAPI_DISCARD_REASON: "MAPI_DISCARD_REASON",
MAPI_DISCLOSURE_OF_RECIPIENTS: "MAPI_DISCLOSURE_OF_RECIPIENTS",
MAPI_DL_EXPANSION_HISTORY: "MAPI_DL_EXPANSION_HISTORY",
MAPI_DL_EXPANSION_PROHIBITED: "MAPI_DL_EXPANSION_PROHIBITED",
MAPI_EXPIRY_TIME: "MAPI_EXPIRY_TIME",
MAPI_IMPLICIT_CONVERSION_PROHIBITED: "MAPI_IMPLICIT_CONVERSION_PROHIBITED",
MAPI_IMPORTANCE: "MAPI_IMPORTANCE",
MAPI_IPM_ID: "MAPI_IPM_ID",
MAPI_LATEST_DELIVERY_TIME: "MAPI_LATEST_DELIVERY_TIME",
MAPI_MESSAGE_CLASS: "MAPI_MESSAGE_CLASS",
MAPI_MESSAGE_DELIVERY_ID: "MAPI_MESSAGE_DELIVERY_ID",
MAPI_MESSAGE_SECURITY_LABEL: "MAPI_MESSAGE_SECURITY_LABEL",
MAPI_OBSOLETED_IPMS: "MAPI_OBSOLETED_IPMS",
MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME: "MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME",
MAPI_ORIGINAL_EITS: "MAPI_ORIGINAL_EITS",
MAPI_ORIGINATOR_CERTIFICATE: "MAPI_ORIGINATOR_CERTIFICATE",
MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED: "MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED",
MAPI_ORIGINATOR_RETURN_ADDRESS: "MAPI_ORIGINATOR_RETURN_ADDRESS",
MAPI_PARENT_KEY: "MAPI_PARENT_KEY",
MAPI_PRIORITY: "MAPI_PRIORITY",
MAPI_ORIGIN_CHECK: "MAPI_ORIGIN_CHECK",
MAPI_PROOF_OF_SUBMISSION_REQUESTED: "MAPI_PROOF_OF_SUBMISSION_REQUESTED",
MAPI_READ_RECEIPT_REQUESTED: "MAPI_READ_RECEIPT_REQUESTED",
MAPI_RECEIPT_TIME: "MAPI_RECEIPT_TIME",
MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED: "MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED",
MAPI_REDIRECTION_HISTORY: "MAPI_REDIRECTION_HISTORY",
MAPI_RELATED_IPMS: "MAPI_RELATED_IPMS",
MAPI_ORIGINAL_SENSITIVITY: "MAPI_ORIGINAL_SENSITIVITY",
MAPI_LANGUAGES: "MAPI_LANGUAGES",
MAPI_REPLY_TIME: "MAPI_REPLY_TIME",
MAPI_REPORT_TAG: "MAPI_REPORT_TAG",
MAPI_REPORT_TIME: "MAPI_REPORT_TIME",
MAPI_RETURNED_IPM: "MAPI_RETURNED_IPM",
MAPI_SECURITY: "MAPI_SECURITY",
MAPI_INCOMPLETE_COPY: "MAPI_INCOMPLETE_COPY",
MAPI_SENSITIVITY: "MAPI_SENSITIVITY",
MAPI_SUBJECT: "MAPI_SUBJECT",
MAPI_SUBJECT_IPM: "MAPI_SUBJECT_IPM",
MAPI_CLIENT_SUBMIT_TIME: "MAPI_CLIENT_SUBMIT_TIME",
MAPI_REPORT_NAME: "MAPI_REPORT_NAME",
MAPI_SENT_REPRESENTING_SEARCH_KEY: "MAPI_SENT_REPRESENTING_SEARCH_KEY",
MAPI_X400_CONTENT_TYPE: "MAPI_X400_CONTENT_TYPE",
MAPI_SUBJECT_PREFIX: "MAPI_SUBJECT_PREFIX",
MAPI_NON_RECEIPT_REASON: "MAPI_NON_RECEIPT_REASON",
MAPI_RECEIVED_BY_ENTRYID: "MAPI_RECEIVED_BY_ENTRYID",
MAPI_RECEIVED_BY_NAME: "MAPI_RECEIVED_BY_NAME",
MAPI_SENT_REPRESENTING_ENTRYID: "MAPI_SENT_REPRESENTING_ENTRYID",
MAPI_SENT_REPRESENTING_NAME: "MAPI_SENT_REPRESENTING_NAME",
MAPI_RCVD_REPRESENTING_ENTRYID: "MAPI_RCVD_REPRESENTING_ENTRYID",
MAPI_RCVD_REPRESENTING_NAME: "MAPI_RCVD_REPRESENTING_NAME",
MAPI_REPORT_ENTRYID: "MAPI_REPORT_ENTRYID",
MAPI_READ_RECEIPT_ENTRYID: "MAPI_READ_RECEIPT_ENTRYID",
MAPI_MESSAGE_SUBMISSION_ID: "MAPI_MESSAGE_SUBMISSION_ID",
MAPI_PROVIDER_SUBMIT_TIME: "MAPI_PROVIDER_SUBMIT_TIME",
MAPI_ORIGINAL_SUBJECT: "MAPI_ORIGINAL_SUBJECT",
MAPI_DISC_VAL: "MAPI_DISC_VAL",
MAPI_ORIG_MESSAGE_CLASS: "MAPI_ORIG_MESSAGE_CLASS",
MAPI_ORIGINAL_AUTHOR_ENTRYID: "MAPI_ORIGINAL_AUTHOR_ENTRYID",
MAPI_ORIGINAL_AUTHOR_NAME: "MAPI_ORIGINAL_AUTHOR_NAME",
MAPI_ORIGINAL_SUBMIT_TIME: "MAPI_ORIGINAL_SUBMIT_TIME",
MAPI_REPLY_RECIPIENT_ENTRIES: "MAPI_REPLY_RECIPIENT_ENTRIES",
MAPI_REPLY_RECIPIENT_NAMES: "MAPI_REPLY_RECIPIENT_NAMES",
MAPI_RECEIVED_BY_SEARCH_KEY: "MAPI_RECEIVED_BY_SEARCH_KEY",
MAPI_RCVD_REPRESENTING_SEARCH_KEY: "MAPI_RCVD_REPRESENTING_SEARCH_KEY",
MAPI_READ_RECEIPT_SEARCH_KEY: "MAPI_READ_RECEIPT_SEARCH_KEY",
MAPI_REPORT_SEARCH_KEY: "MAPI_REPORT_SEARCH_KEY",
MAPI_ORIGINAL_DELIVERY_TIME: "MAPI_ORIGINAL_DELIVERY_TIME",
MAPI_ORIGINAL_AUTHOR_SEARCH_KEY: "MAPI_ORIGINAL_AUTHOR_SEARCH_KEY",
MAPI_MESSAGE_TO_ME: "MAPI_MESSAGE_TO_ME",
MAPI_MESSAGE_CC_ME: "MAPI_MESSAGE_CC_ME",
MAPI_MESSAGE_RECIP_ME: "MAPI_MESSAGE_RECIP_ME",
MAPI_ORIGINAL_SENDER_NAME: "MAPI_ORIGINAL_SENDER_NAME",
MAPI_ORIGINAL_SENDER_ENTRYID: "MAPI_ORIGINAL_SENDER_ENTRYID",
MAPI_ORIGINAL_SENDER_SEARCH_KEY: "MAPI_ORIGINAL_SENDER_SEARCH_KEY",
MAPI_ORIGINAL_SENT_REPRESENTING_NAME: "MAPI_ORIGINAL_SENT_REPRESENTING_NAME",
MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID: "MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID",
MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY: "MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY",
MAPI_START_DATE: "MAPI_START_DATE",
MAPI_END_DATE: "MAPI_END_DATE",
MAPI_OWNER_APPT_ID: "MAPI_OWNER_APPT_ID",
MAPI_RESPONSE_REQUESTED: "MAPI_RESPONSE_REQUESTED",
MAPI_SENT_REPRESENTING_ADDRTYPE: "MAPI_SENT_REPRESENTING_ADDRTYPE",
MAPI_SENT_REPRESENTING_EMAIL_ADDRESS: "MAPI_SENT_REPRESENTING_EMAIL_ADDRESS",
MAPI_ORIGINAL_SENDER_ADDRTYPE: "MAPI_ORIGINAL_SENDER_ADDRTYPE",
MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS: "MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS",
MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE: "MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE",
MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS: "MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS",
MAPI_CONVERSATION_TOPIC: "MAPI_CONVERSATION_TOPIC",
MAPI_CONVERSATION_INDEX: "MAPI_CONVERSATION_INDEX",
MAPI_ORIGINAL_DISPLAY_BCC: "MAPI_ORIGINAL_DISPLAY_BCC",
MAPI_ORIGINAL_DISPLAY_CC: "MAPI_ORIGINAL_DISPLAY_CC",
MAPI_ORIGINAL_DISPLAY_TO: "MAPI_ORIGINAL_DISPLAY_TO",
MAPI_RECEIVED_BY_ADDRTYPE: "MAPI_RECEIVED_BY_ADDRTYPE",
MAPI_RECEIVED_BY_EMAIL_ADDRESS: "MAPI_RECEIVED_BY_EMAIL_ADDRESS",
MAPI_RCVD_REPRESENTING_ADDRTYPE: "MAPI_RCVD_REPRESENTING_ADDRTYPE",
MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS: "MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS",
MAPI_ORIGINAL_AUTHOR_ADDRTYPE: "MAPI_ORIGINAL_AUTHOR_ADDRTYPE",
MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS: "MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS",
MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE: "MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE",
MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS: "MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS",
MAPI_TRANSPORT_MESSAGE_HEADERS: "MAPI_TRANSPORT_MESSAGE_HEADERS",
MAPI_DELEGATION: "MAPI_DELEGATION",
MAPI_TNEF_CORRELATION_KEY: "MAPI_TNEF_CORRELATION_KEY",
MAPI_CONTENT_INTEGRITY_CHECK: "MAPI_CONTENT_INTEGRITY_CHECK",
MAPI_EXPLICIT_CONVERSION: "MAPI_EXPLICIT_CONVERSION",
MAPI_IPM_RETURN_REQUESTED: "MAPI_IPM_RETURN_REQUESTED",
MAPI_MESSAGE_TOKEN: "MAPI_MESSAGE_TOKEN",
MAPI_NDR_REASON_CODE: "MAPI_NDR_REASON_CODE",
MAPI_NDR_DIAG_CODE: "MAPI_NDR_DIAG_CODE",
MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED: "MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED",
MAPI_DELIVERY_POINT: "MAPI_DELIVERY_POINT",
MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED: "MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED",
MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT: "MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT",
MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY: "MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY",
MAPI_PHYSICAL_DELIVERY_MODE: "MAPI_PHYSICAL_DELIVERY_MODE",
MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST: "MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST",
MAPI_PHYSICAL_FORWARDING_ADDRESS: "MAPI_PHYSICAL_FORWARDING_ADDRESS",
MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED: "MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED",
MAPI_PHYSICAL_FORWARDING_PROHIBITED: "MAPI_PHYSICAL_FORWARDING_PROHIBITED",
MAPI_PHYSICAL_RENDITION_ATTRIBUTES: "MAPI_PHYSICAL_RENDITION_ATTRIBUTES",
MAPI_PROOF_OF_DELIVERY: "MAPI_PROOF_OF_DELIVERY",
MAPI_PROOF_OF_DELIVERY_REQUESTED: "MAPI_PROOF_OF_DELIVERY_REQUESTED",
MAPI_RECIPIENT_CERTIFICATE: "MAPI_RECIPIENT_CERTIFICATE",
MAPI_RECIPIENT_NUMBER_FOR_ADVICE: "MAPI_RECIPIENT_NUMBER_FOR_ADVICE",
MAPI_RECIPIENT_TYPE: "MAPI_RECIPIENT_TYPE",
MAPI_REGISTERED_MAIL_TYPE: "MAPI_REGISTERED_MAIL_TYPE",
MAPI_REPLY_REQUESTED: "MAPI_REPLY_REQUESTED",
MAPI_REQUESTED_DELIVERY_METHOD: "MAPI_REQUESTED_DELIVERY_METHOD",
MAPI_SENDER_ENTRYID: "MAPI_SENDER_ENTRYID",
MAPI_SENDER_NAME: "MAPI_SENDER_NAME",
MAPI_SUPPLEMENTARY_INFO: "MAPI_SUPPLEMENTARY_INFO",
MAPI_TYPE_OF_MTS_USER: "MAPI_TYPE_OF_MTS_USER",
MAPI_SENDER_SEARCH_KEY: "MAPI_SENDER_SEARCH_KEY",
MAPI_SENDER_ADDRTYPE: "MAPI_SENDER_ADDRTYPE",
MAPI_SENDER_EMAIL_ADDRESS: "MAPI_SENDER_EMAIL_ADDRESS",
MAPI_CURRENT_VERSION: "MAPI_CURRENT_VERSION",
MAPI_DELETE_AFTER_SUBMIT: "MAPI_DELETE_AFTER_SUBMIT",
MAPI_DISPLAY_BCC: "MAPI_DISPLAY_BCC",
MAPI_DISPLAY_CC: "MAPI_DISPLAY_CC",
MAPI_DISPLAY_TO: "MAPI_DISPLAY_TO",
MAPI_PARENT_DISPLAY: "MAPI_PARENT_DISPLAY",
MAPI_MESSAGE_DELIVERY_TIME: "MAPI_MESSAGE_DELIVERY_TIME",
MAPI_MESSAGE_FLAGS: "MAPI_MESSAGE_FLAGS",
MAPI_MESSAGE_SIZE: "MAPI_MESSAGE_SIZE",
MAPI_PARENT_ENTRYID: "MAPI_PARENT_ENTRYID",
MAPI_SENTMAIL_ENTRYID: "MAPI_SENTMAIL_ENTRYID",
MAPI_CORRELATE: "MAPI_CORRELATE",
MAPI_CORRELATE_MTSID: "MAPI_CORRELATE_MTSID",
MAPI_DISCRETE_VALUES: "MAPI_DISCRETE_VALUES",
MAPI_RESPONSIBILITY: "MAPI_RESPONSIBILITY",
MAPI_SPOOLER_STATUS: "MAPI_SPOOLER_STATUS",
MAPI_TRANSPORT_STATUS: "MAPI_TRANSPORT_STATUS",
MAPI_MESSAGE_RECIPIENTS: "MAPI_MESSAGE_RECIPIENTS",
MAPI_MESSAGE_ATTACHMENTS: "MAPI_MESSAGE_ATTACHMENTS",
MAPI_SUBMIT_FLAGS: "MAPI_SUBMIT_FLAGS",
MAPI_RECIPIENT_STATUS: "MAPI_RECIPIENT_STATUS",
MAPI_TRANSPORT_KEY: "MAPI_TRANSPORT_KEY",
MAPI_MSG_STATUS: "MAPI_MSG_STATUS",
MAPI_MESSAGE_DOWNLOAD_TIME: "MAPI_MESSAGE_DOWNLOAD_TIME",
MAPI_CREATION_VERSION: "MAPI_CREATION_VERSION",
MAPI_MODIFY_VERSION: "MAPI_MODIFY_VERSION",
MAPI_HASATTACH: "MAPI_HASATTACH",
MAPI_BODY_CRC: "MAPI_BODY_CRC",
MAPI_NORMALIZED_SUBJECT: "MAPI_NORMALIZED_SUBJECT",
MAPI_RTF_IN_SYNC: "MAPI_RTF_IN_SYNC",
MAPI_ATTACH_SIZE: "MAPI_ATTACH_SIZE",
MAPI_ATTACH_NUM: "MAPI_ATTACH_NUM",
MAPI_PREPROCESS: "MAPI_PREPROCESS",
MAPI_ORIGINATING_MTA_CERTIFICATE: "MAPI_ORIGINATING_MTA_CERTIFICATE",
MAPI_PROOF_OF_SUBMISSION: "MAPI_PROOF_OF_SUBMISSION",
MAPI_PRIMARY_SEND_ACCOUNT: "MAPI_PRIMARY_SEND_ACCOUNT",
MAPI_NEXT_SEND_ACCT: "MAPI_NEXT_SEND_ACCT",
MAPI_ACCESS: "MAPI_ACCESS",
MAPI_ROW_TYPE: "MAPI_ROW_TYPE",
MAPI_INSTANCE_KEY: "MAPI_INSTANCE_KEY",
MAPI_ACCESS_LEVEL: "MAPI_ACCESS_LEVEL",
MAPI_MAPPING_SIGNATURE: "MAPI_MAPPING_SIGNATURE",
MAPI_RECORD_KEY: "MAPI_RECORD_KEY",
MAPI_STORE_RECORD_KEY: "MAPI_STORE_RECORD_KEY",
MAPI_STORE_ENTRYID: "MAPI_STORE_ENTRYID",
MAPI_MINI_ICON: "MAPI_MINI_ICON",
MAPI_ICON: "MAPI_ICON",
MAPI_OBJECT_TYPE: "MAPI_OBJECT_TYPE",
MAPI_ENTRYID: "MAPI_ENTRYID",
MAPI_BODY: "MAPI_BODY",
MAPI_REPORT_TEXT: "MAPI_REPORT_TEXT",
MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY: "MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY",
MAPI_REPORTING_DL_NAME: "MAPI_REPORTING_DL_NAME",
MAPI_REPORTING_MTA_CERTIFICATE: "MAPI_REPORTING_MTA_CERTIFICATE",
MAPI_RTF_SYNC_BODY_CRC: "MAPI_RTF_SYNC_BODY_CRC",
MAPI_RTF_SYNC_BODY_COUNT: "MAPI_RTF_SYNC_BODY_COUNT",
MAPI_RTF_SYNC_BODY_TAG: "MAPI_RTF_SYNC_BODY_TAG",
MAPI_RTF_COMPRESSED: "MAPI_RTF_COMPRESSED",
MAPI_RTF_SYNC_PREFIX_COUNT: "MAPI_RTF_SYNC_PREFIX_COUNT",
MAPI_RTF_SYNC_TRAILING_COUNT: "MAPI_RTF_SYNC_TRAILING_COUNT",
MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID: "MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID",
MAPI_BODY_HTML: "MAPI_BODY_HTML",
MAPI_NATIVE_BODY: "MAPI_NATIVE_BODY",
MAPI_SMTP_MESSAGE_ID: "MAPI_SMTP_MESSAGE_ID",
MAPI_INTERNET_REFERENCES: "MAPI_INTERNET_REFERENCES",
MAPI_IN_REPLY_TO_ID: "MAPI_IN_REPLY_TO_ID",
MAPI_INTERNET_RETURN_PATH: "MAPI_INTERNET_RETURN_PATH",
MAPI_ICON_INDEX: "MAPI_ICON_INDEX",
MAPI_LAST_VERB_EXECUTED: "MAPI_LAST_VERB_EXECUTED",
MAPI_LAST_VERB_EXECUTION_TIME: "MAPI_LAST_VERB_EXECUTION_TIME",
MAPI_URL_COMP_NAME: "MAPI_URL_COMP_NAME",
MAPI_ATTRIBUTE_HIDDEN: "MAPI_ATTRIBUTE_HIDDEN",
MAPI_ATTRIBUTE_SYSTEM: "MAPI_ATTRIBUTE_SYSTEM",
MAPI_ATTRIBUTE_READ_ONLY: "MAPI_ATTRIBUTE_READ_ONLY",
MAPI_ROWID: "MAPI_ROWID",
MAPI_DISPLAY_NAME: "MAPI_DISPLAY_NAME",
MAPI_ADDRTYPE: "MAPI_ADDRTYPE",
MAPI_EMAIL_ADDRESS: "MAPI_EMAIL_ADDRESS",
MAPI_COMMENT: "MAPI_COMMENT",
MAPI_DEPTH: "MAPI_DEPTH",
MAPI_PROVIDER_DISPLAY: "MAPI_PROVIDER_DISPLAY",
MAPI_CREATION_TIME: "MAPI_CREATION_TIME",
MAPI_LAST_MODIFICATION_TIME: "MAPI_LAST_MODIFICATION_TIME",
MAPI_RESOURCE_FLAGS: "MAPI_RESOURCE_FLAGS",
MAPI_PROVIDER_DLL_NAME: "MAPI_PROVIDER_DLL_NAME",
MAPI_SEARCH_KEY: "MAPI_SEARCH_KEY",
MAPI_PROVIDER_UID: "MAPI_PROVIDER_UID",
MAPI_PROVIDER_ORDINAL: "MAPI_PROVIDER_ORDINAL",
MAPI_TARGET_ENTRY_ID: "MAPI_TARGET_ENTRY_ID",
MAPI_CONVERSATION_ID: "MAPI_CONVERSATION_ID",
MAPI_CONVERSATION_INDEX_TRACKING: "MAPI_CONVERSATION_INDEX_TRACKING",
MAPI_FORM_VERSION: "MAPI_FORM_VERSION",
MAPI_FORM_CLSID: "MAPI_FORM_CLSID",
MAPI_FORM_CONTACT_NAME: "MAPI_FORM_CONTACT_NAME",
MAPI_FORM_CATEGORY: "MAPI_FORM_CATEGORY",
MAPI_FORM_CATEGORY_SUB: "MAPI_FORM_CATEGORY_SUB",
MAPI_FORM_HOST_MAP: "MAPI_FORM_HOST_MAP",
MAPI_FORM_HIDDEN: "MAPI_FORM_HIDDEN",
MAPI_FORM_DESIGNER_NAME: "MAPI_FORM_DESIGNER_NAME",
MAPI_FORM_DESIGNER_GUID: "MAPI_FORM_DESIGNER_GUID",
MAPI_FORM_MESSAGE_BEHAVIOR: "MAPI_FORM_MESSAGE_BEHAVIOR",
MAPI_DEFAULT_STORE: "MAPI_DEFAULT_STORE",
MAPI_STORE_SUPPORT_MASK: "MAPI_STORE_SUPPORT_MASK",
MAPI_STORE_STATE: "MAPI_STORE_STATE",
MAPI_STORE_UNICODE_MASK: "MAPI_STORE_UNICODE_MASK",
MAPI_IPM_SUBTREE_SEARCH_KEY: "MAPI_IPM_SUBTREE_SEARCH_KEY",
MAPI_IPM_OUTBOX_SEARCH_KEY: "MAPI_IPM_OUTBOX_SEARCH_KEY",
MAPI_IPM_WASTEBASKET_SEARCH_KEY: "MAPI_IPM_WASTEBASKET_SEARCH_KEY",
MAPI_IPM_SENTMAIL_SEARCH_KEY: "MAPI_IPM_SENTMAIL_SEARCH_KEY",
MAPI_MDB_PROVIDER: "MAPI_MDB_PROVIDER",
MAPI_RECEIVE_FOLDER_SETTINGS: "MAPI_RECEIVE_FOLDER_SETTINGS",
MAPI_VALID_FOLDER_MASK: "MAPI_VALID_FOLDER_MASK",
MAPI_IPM_SUBTREE_ENTRYID: "MAPI_IPM_SUBTREE_ENTRYID",
MAPI_IPM_OUTBOX_ENTRYID: "MAPI_IPM_OUTBOX_ENTRYID",
MAPI_IPM_WASTEBASKET_ENTRYID: "MAPI_IPM_WASTEBASKET_ENTRYID",
MAPI_IPM_SENTMAIL_ENTRYID: "MAPI_IPM_SENTMAIL_ENTRYID",
MAPI_VIEWS_ENTRYID: "MAPI_VIEWS_ENTRYID",
MAPI_COMMON_VIEWS_ENTRYID: "MAPI_COMMON_VIEWS_ENTRYID",
MAPI_FINDER_ENTRYID: "MAPI_FINDER_ENTRYID",
MAPI_CONTAINER_FLAGS: "MAPI_CONTAINER_FLAGS",
MAPI_FOLDER_TYPE: "MAPI_FOLDER_TYPE",
MAPI_CONTENT_COUNT: "MAPI_CONTENT_COUNT",
MAPI_CONTENT_UNREAD: "MAPI_CONTENT_UNREAD",
MAPI_CREATE_TEMPLATES: "MAPI_CREATE_TEMPLATES",
MAPI_DETAILS_TABLE: "MAPI_DETAILS_TABLE",
MAPI_SEARCH: "MAPI_SEARCH",
MAPI_SELECTABLE: "MAPI_SELECTABLE",
MAPI_SUBFOLDERS: "MAPI_SUBFOLDERS",
MAPI_STATUS: "MAPI_STATUS",
MAPI_ANR: "MAPI_ANR",
MAPI_CONTENTS_SORT_ORDER: "MAPI_CONTENTS_SORT_ORDER",
MAPI_CONTAINER_HIERARCHY: "MAPI_CONTAINER_HIERARCHY",
MAPI_CONTAINER_CONTENTS: "MAPI_CONTAINER_CONTENTS",
MAPI_FOLDER_ASSOCIATED_CONTENTS: "MAPI_FOLDER_ASSOCIATED_CONTENTS",
MAPI_DEF_CREATE_DL: "MAPI_DEF_CREATE_DL",
MAPI_DEF_CREATE_MAILUSER: "MAPI_DEF_CREATE_MAILUSER",
MAPI_CONTAINER_CLASS: "MAPI_CONTAINER_CLASS",
MAPI_CONTAINER_MODIFY_VERSION: "MAPI_CONTAINER_MODIFY_VERSION",
MAPI_AB_PROVIDER_ID: "MAPI_AB_PROVIDER_ID",
MAPI_DEFAULT_VIEW_ENTRYID: "MAPI_DEFAULT_VIEW_ENTRYID",
MAPI_ASSOC_CONTENT_COUNT: "MAPI_ASSOC_CONTENT_COUNT",
MAPI_ATTACHMENT_X400_PARAMETERS: "MAPI_ATTACHMENT_X400_PARAMETERS",
MAPI_ATTACH_DATA_OBJ: "MAPI_ATTACH_DATA_OBJ",
MAPI_ATTACH_ENCODING: "MAPI_ATTACH_ENCODING",
MAPI_ATTACH_EXTENSION: "MAPI_ATTACH_EXTENSION",
MAPI_ATTACH_FILENAME: "MAPI_ATTACH_FILENAME",
MAPI_ATTACH_METHOD: "MAPI_ATTACH_METHOD",
MAPI_ATTACH_LONG_FILENAME: "MAPI_ATTACH_LONG_FILENAME",
MAPI_ATTACH_PATHNAME: "MAPI_ATTACH_PATHNAME",
MAPI_ATTACH_RENDERING: "MAPI_ATTACH_RENDERING",
MAPI_ATTACH_TAG: "MAPI_ATTACH_TAG",
MAPI_RENDERING_POSITION: "MAPI_RENDERING_POSITION",
MAPI_ATTACH_TRANSPORT_NAME: "MAPI_ATTACH_TRANSPORT_NAME",
MAPI_ATTACH_LONG_PATHNAME: "MAPI_ATTACH_LONG_PATHNAME",
MAPI_ATTACH_MIME_TAG: "MAPI_ATTACH_MIME_TAG",
MAPI_ATTACH_ADDITIONAL_INFO: "MAPI_ATTACH_ADDITIONAL_INFO",
MAPI_ATTACH_MIME_SEQUENCE: "MAPI_ATTACH_MIME_SEQUENCE",
MAPI_ATTACH_CONTENT_ID: "MAPI_ATTACH_CONTENT_ID",
MAPI_ATTACH_CONTENT_LOCATION: "MAPI_ATTACH_CONTENT_LOCATION",
MAPI_ATTACH_FLAGS: "MAPI_ATTACH_FLAGS",
MAPI_DISPLAY_TYPE: "MAPI_DISPLAY_TYPE",
MAPI_TEMPLATEID: "MAPI_TEMPLATEID",
MAPI_PRIMARY_CAPABILITY: "MAPI_PRIMARY_CAPABILITY",
MAPI_SMTP_ADDRESS: "MAPI_SMTP_ADDRESS",
MAPI_7BIT_DISPLAY_NAME: "MAPI_7BIT_DISPLAY_NAME",
MAPI_ACCOUNT: "MAPI_ACCOUNT",
MAPI_ALTERNATE_RECIPIENT: "MAPI_ALTERNATE_RECIPIENT",
MAPI_CALLBACK_TELEPHONE_NUMBER: "MAPI_CALLBACK_TELEPHONE_NUMBER",
MAPI_CONVERSION_PROHIBITED: "MAPI_CONVERSION_PROHIBITED",
MAPI_DISCLOSE_RECIPIENTS: "MAPI_DISCLOSE_RECIPIENTS",
MAPI_GENERATION: "MAPI_GENERATION",
MAPI_GIVEN_NAME: "MAPI_GIVEN_NAME",
MAPI_GOVERNMENT_ID_NUMBER: "MAPI_GOVERNMENT_ID_NUMBER",
MAPI_BUSINESS_TELEPHONE_NUMBER: "MAPI_BUSINESS_TELEPHONE_NUMBER",
MAPI_HOME_TELEPHONE_NUMBER: "MAPI_HOME_TELEPHONE_NUMBER",
MAPI_INITIALS: "MAPI_INITIALS",
MAPI_KEYWORD: "MAPI_KEYWORD",
MAPI_LANGUAGE: "MAPI_LANGUAGE",
MAPI_LOCATION: "MAPI_LOCATION",
MAPI_MAIL_PERMISSION: "MAPI_MAIL_PERMISSION",
MAPI_MHS_COMMON_NAME: "MAPI_MHS_COMMON_NAME",
MAPI_ORGANIZATIONAL_ID_NUMBER: "MAPI_ORGANIZATIONAL_ID_NUMBER",
MAPI_SURNAME: "MAPI_SURNAME",
MAPI_ORIGINAL_ENTRYID: "MAPI_ORIGINAL_ENTRYID",
MAPI_ORIGINAL_DISPLAY_NAME: "MAPI_ORIGINAL_DISPLAY_NAME",
MAPI_ORIGINAL_SEARCH_KEY: "MAPI_ORIGINAL_SEARCH_KEY",
MAPI_POSTAL_ADDRESS: "MAPI_POSTAL_ADDRESS",
MAPI_COMPANY_NAME: "MAPI_COMPANY_NAME",
MAPI_TITLE: "MAPI_TITLE",
MAPI_DEPARTMENT_NAME: "MAPI_DEPARTMENT_NAME",
MAPI_OFFICE_LOCATION: "MAPI_OFFICE_LOCATION",
MAPI_PRIMARY_TELEPHONE_NUMBER: "MAPI_PRIMARY_TELEPHONE_NUMBER",
MAPI_BUSINESS2_TELEPHONE_NUMBER: "MAPI_BUSINESS2_TELEPHONE_NUMBER",
MAPI_MOBILE_TELEPHONE_NUMBER: "MAPI_MOBILE_TELEPHONE_NUMBER",
MAPI_RADIO_TELEPHONE_NUMBER: "MAPI_RADIO_TELEPHONE_NUMBER",
MAPI_CAR_TELEPHONE_NUMBER: "MAPI_CAR_TELEPHONE_NUMBER",
MAPI_OTHER_TELEPHONE_NUMBER: "MAPI_OTHER_TELEPHONE_NUMBER",
MAPI_TRANSMITABLE_DISPLAY_NAME: "MAPI_TRANSMITABLE_DISPLAY_NAME",
MAPI_PAGER_TELEPHONE_NUMBER: "MAPI_PAGER_TELEPHONE_NUMBER",
MAPI_USER_CERTIFICATE: "MAPI_USER_CERTIFICATE",
MAPI_PRIMARY_FAX_NUMBER: "MAPI_PRIMARY_FAX_NUMBER",
MAPI_BUSINESS_FAX_NUMBER: "MAPI_BUSINESS_FAX_NUMBER",
MAPI_HOME_FAX_NUMBER: "MAPI_HOME_FAX_NUMBER",
MAPI_COUNTRY: "MAPI_COUNTRY",
MAPI_LOCALITY: "MAPI_LOCALITY",
MAPI_STATE_OR_PROVINCE: "MAPI_STATE_OR_PROVINCE",
MAPI_STREET_ADDRESS: "MAPI_STREET_ADDRESS",
MAPI_POSTAL_CODE: "MAPI_POSTAL_CODE",
MAPI_POST_OFFICE_BOX: "MAPI_POST_OFFICE_BOX",
MAPI_TELEX_NUMBER: "MAPI_TELEX_NUMBER",
MAPI_ISDN_NUMBER: "MAPI_ISDN_NUMBER",
MAPI_ASSISTANT_TELEPHONE_NUMBER: "MAPI_ASSISTANT_TELEPHONE_NUMBER",
MAPI_HOME2_TELEPHONE_NUMBER: "MAPI_HOME2_TELEPHONE_NUMBER",
MAPI_ASSISTANT: "MAPI_ASSISTANT",
MAPI_SEND_RICH_INFO: "MAPI_SEND_RICH_INFO",
MAPI_WEDDING_ANNIVERSARY: "MAPI_WEDDING_ANNIVERSARY",
MAPI_BIRTHDAY: "MAPI_BIRTHDAY",
MAPI_HOBBIES: "MAPI_HOBBIES",
MAPI_MIDDLE_NAME: "MAPI_MIDDLE_NAME",
MAPI_DISPLAY_NAME_PREFIX: "MAPI_DISPLAY_NAME_PREFIX",
MAPI_PROFESSION: "MAPI_PROFESSION",
MAPI_PREFERRED_BY_NAME: "MAPI_PREFERRED_BY_NAME",
MAPI_SPOUSE_NAME: "MAPI_SPOUSE_NAME",
MAPI_COMPUTER_NETWORK_NAME: "MAPI_COMPUTER_NETWORK_NAME",
MAPI_CUSTOMER_ID: "MAPI_CUSTOMER_ID",
MAPI_TTYTDD_PHONE_NUMBER: "MAPI_TTYTDD_PHONE_NUMBER",
MAPI_FTP_SITE: "MAPI_FTP_SITE",
MAPI_GENDER: "MAPI_GENDER",
MAPI_MANAGER_NAME: "MAPI_MANAGER_NAME",
MAPI_NICKNAME: "MAPI_NICKNAME",
MAPI_PERSONAL_HOME_PAGE: "MAPI_PERSONAL_HOME_PAGE",
MAPI_BUSINESS_HOME_PAGE: "MAPI_BUSINESS_HOME_PAGE",
MAPI_CONTACT_VERSION: "MAPI_CONTACT_VERSION",
MAPI_CONTACT_ENTRYIDS: "MAPI_CONTACT_ENTRYIDS",
MAPI_CONTACT_ADDRTYPES: "MAPI_CONTACT_ADDRTYPES",
MAPI_CONTACT_DEFAULT_ADDRESS_INDEX: "MAPI_CONTACT_DEFAULT_ADDRESS_INDEX",
MAPI_CONTACT_EMAIL_ADDRESSES: "MAPI_CONTACT_EMAIL_ADDRESSES",
MAPI_COMPANY_MAIN_PHONE_NUMBER: "MAPI_COMPANY_MAIN_PHONE_NUMBER",
MAPI_CHILDRENS_NAMES: "MAPI_CHILDRENS_NAMES",
MAPI_HOME_ADDRESS_CITY: "MAPI_HOME_ADDRESS_CITY",
MAPI_HOME_ADDRESS_COUNTRY: "MAPI_HOME_ADDRESS_COUNTRY",
MAPI_HOME_ADDRESS_POSTAL_CODE: "MAPI_HOME_ADDRESS_POSTAL_CODE",
MAPI_HOME_ADDRESS_STATE_OR_PROVINCE: "MAPI_HOME_ADDRESS_STATE_OR_PROVINCE",
MAPI_HOME_ADDRESS_STREET: "MAPI_HOME_ADDRESS_STREET",
MAPI_HOME_ADDRESS_POST_OFFICE_BOX: "MAPI_HOME_ADDRESS_POST_OFFICE_BOX",
MAPI_OTHER_ADDRESS_CITY: "MAPI_OTHER_ADDRESS_CITY",
MAPI_OTHER_ADDRESS_COUNTRY: "MAPI_OTHER_ADDRESS_COUNTRY",
MAPI_OTHER_ADDRESS_POSTAL_CODE: "MAPI_OTHER_ADDRESS_POSTAL_CODE",
MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE: "MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE",
MAPI_OTHER_ADDRESS_STREET: "MAPI_OTHER_ADDRESS_STREET",
MAPI_OTHER_ADDRESS_POST_OFFICE_BOX: "MAPI_OTHER_ADDRESS_POST_OFFICE_BOX",
MAPI_SEND_INTERNET_ENCODING: "MAPI_SEND_INTERNET_ENCODING",
MAPI_STORE_PROVIDERS: "MAPI_STORE_PROVIDERS",
MAPI_AB_PROVIDERS: "MAPI_AB_PROVIDERS",
MAPI_TRANSPORT_PROVIDERS: "MAPI_TRANSPORT_PROVIDERS",
MAPI_DEFAULT_PROFILE: "MAPI_DEFAULT_PROFILE",
MAPI_AB_SEARCH_PATH: "MAPI_AB_SEARCH_PATH",
MAPI_AB_DEFAULT_DIR: "MAPI_AB_DEFAULT_DIR",
MAPI_AB_DEFAULT_PAB: "MAPI_AB_DEFAULT_PAB",
MAPI_FILTERING_HOOKS: "MAPI_FILTERING_HOOKS",
MAPI_SERVICE_NAME: "MAPI_SERVICE_NAME",
MAPI_SERVICE_DLL_NAME: "MAPI_SERVICE_DLL_NAME",
MAPI_SERVICE_ENTRY_NAME: "MAPI_SERVICE_ENTRY_NAME",
MAPI_SERVICE_UID: "MAPI_SERVICE_UID",
MAPI_SERVICE_EXTRA_UIDS: "MAPI_SERVICE_EXTRA_UIDS",
MAPI_SERVICES: "MAPI_SERVICES",
MAPI_SERVICE_SUPPORT_FILES: "MAPI_SERVICE_SUPPORT_FILES",
MAPI_SERVICE_DELETE_FILES: "MAPI_SERVICE_DELETE_FILES",
MAPI_AB_SEARCH_PATH_UPDATE: "MAPI_AB_SEARCH_PATH_UPDATE",
MAPI_PROFILE_NAME: "MAPI_PROFILE_NAME",
MAPI_IDENTITY_DISPLAY: "MAPI_IDENTITY_DISPLAY",
MAPI_IDENTITY_ENTRYID: "MAPI_IDENTITY_ENTRYID",
MAPI_RESOURCE_METHODS: "MAPI_RESOURCE_METHODS",
MAPI_RESOURCE_TYPE: "MAPI_RESOURCE_TYPE",
MAPI_STATUS_CODE: "MAPI_STATUS_CODE",
MAPI_IDENTITY_SEARCH_KEY: "MAPI_IDENTITY_SEARCH_KEY",
MAPI_OWN_STORE_ENTRYID: "MAPI_OWN_STORE_ENTRYID",
MAPI_RESOURCE_PATH: "MAPI_RESOURCE_PATH",
MAPI_STATUS_STRING: "MAPI_STATUS_STRING",
MAPI_X400_DEFERRED_DELIVERY_CANCEL: "MAPI_X400_DEFERRED_DELIVERY_CANCEL",
MAPI_HEADER_FOLDER_ENTRYID: "MAPI_HEADER_FOLDER_ENTRYID",
MAPI_REMOTE_PROGRESS: "MAPI_REMOTE_PROGRESS",
MAPI_REMOTE_PROGRESS_TEXT: "MAPI_REMOTE_PROGRESS_TEXT",
MAPI_REMOTE_VALIDATE_OK: "MAPI_REMOTE_VALIDATE_OK",
MAPI_CONTROL_FLAGS: "MAPI_CONTROL_FLAGS",
MAPI_CONTROL_STRUCTURE: "MAPI_CONTROL_STRUCTURE",
MAPI_CONTROL_TYPE: "MAPI_CONTROL_TYPE",
MAPI_DELTAX: "MAPI_DELTAX",
MAPI_DELTAY: "MAPI_DELTAY",
MAPI_XPOS: "MAPI_XPOS",
MAPI_YPOS: "MAPI_YPOS",
MAPI_CONTROL_ID: "MAPI_CONTROL_ID",
MAPI_INITIAL_DETAILS_PANE: "MAPI_INITIAL_DETAILS_PANE",
MAPI_UNCOMPRESSED_BODY: "MAPI_UNCOMPRESSED_BODY",
MAPI_INTERNET_CODEPAGE: "MAPI_INTERNET_CODEPAGE",
MAPI_AUTO_RESPONSE_SUPPRESS: "MAPI_AUTO_RESPONSE_SUPPRESS",
MAPI_MESSAGE_LOCALE_ID: "MAPI_MESSAGE_LOCALE_ID",
MAPI_RULE_TRIGGER_HISTORY: "MAPI_RULE_TRIGGER_HISTORY",
MAPI_MOVE_TO_STORE_ENTRYID: "MAPI_MOVE_TO_STORE_ENTRYID",
MAPI_MOVE_TO_FOLDER_ENTRYID: "MAPI_MOVE_TO_FOLDER_ENTRYID",
MAPI_STORAGE_QUOTA_LIMIT: "MAPI_STORAGE_QUOTA_LIMIT",
MAPI_EXCESS_STORAGE_USED: "MAPI_EXCESS_STORAGE_USED",
MAPI_SVR_GENERATING_QUOTA_MSG: "MAPI_SVR_GENERATING_QUOTA_MSG",
MAPI_CREATOR_NAME: "MAPI_CREATOR_NAME",
MAPI_CREATOR_ENTRY_ID: "MAPI_CREATOR_ENTRY_ID",
MAPI_LAST_MODIFIER_NAME: "MAPI_LAST_MODIFIER_NAME",
MAPI_LAST_MODIFIER_ENTRY_ID: "MAPI_LAST_MODIFIER_ENTRY_ID",
MAPI_REPLY_RECIPIENT_SMTP_PROXIES: "MAPI_REPLY_RECIPIENT_SMTP_PROXIES",
MAPI_MESSAGE_CODEPAGE: "MAPI_MESSAGE_CODEPAGE",
MAPI_EXTENDED_ACL_DATA: "MAPI_EXTENDED_ACL_DATA",
MAPI_SENDER_FLAGS: "MAPI_SENDER_FLAGS",
MAPI_SENT_REPRESENTING_FLAGS: "MAPI_SENT_REPRESENTING_FLAGS",
MAPI_RECEIVED_BY_FLAGS: "MAPI_RECEIVED_BY_FLAGS",
MAPI_RECEIVED_REPRESENTING_FLAGS: "MAPI_RECEIVED_REPRESENTING_FLAGS",
MAPI_CREATOR_ADDRESS_TYPE: "MAPI_CREATOR_ADDRESS_TYPE",
MAPI_CREATOR_EMAIL_ADDRESS: "MAPI_CREATOR_EMAIL_ADDRESS",
MAPI_SENDER_SIMPLE_DISPLAY_NAME: "MAPI_SENDER_SIMPLE_DISPLAY_NAME",
MAPI_SENT_REPRESENTING_SIMPLE_DISPLAY_NAME: "MAPI_SENT_REPRESENTING_SIMPLE_DISPLAY_NAME",
MAPI_RECEIVED_REPRESENTING_SIMPLE_DISPLAY_NAME: "MAPI_RECEIVED_REPRESENTING_SIMPLE_DISPLAY_NAME",
MAPI_CREATOR_SIMPLE_DISP_NAME: "MAPI_CREATOR_SIMPLE_DISP_NAME",
MAPI_LAST_MODIFIER_SIMPLE_DISPLAY_NAME: "MAPI_LAST_MODIFIER_SIMPLE_DISPLAY_NAME",
MAPI_CONTENT_FILTER_SPAM_CONFIDENCE_LEVEL: "MAPI_CONTENT_FILTER_SPAM_CONFIDENCE_LEVEL",
MAPI_INTERNET_MAIL_OVERRIDE_FORMAT: "MAPI_INTERNET_MAIL_OVERRIDE_FORMAT",
MAPI_MESSAGE_EDITOR_FORMAT: "MAPI_MESSAGE_EDITOR_FORMAT",
MAPI_SENDER_SMTP_ADDRESS: "MAPI_SENDER_SMTP_ADDRESS",
MAPI_SENT_REPRESENTING_SMTP_ADDRESS: "MAPI_SENT_REPRESENTING_SMTP_ADDRESS",
MAPI_READ_RECEIPT_SMTP_ADDRESS: "MAPI_READ_RECEIPT_SMTP_ADDRESS",
MAPI_RECEIVED_BY_SMTP_ADDRESS: "MAPI_RECEIVED_BY_SMTP_ADDRESS",
MAPI_RECEIVED_REPRESENTING_SMTP_ADDRESS: "MAPI_RECEIVED_REPRESENTING_SMTP_ADDRESS",
MAPI_SENDING_SMTP_ADDRESS: "MAPI_SENDING_SMTP_ADDRESS",
MAPI_SIP_ADDRESS: "MAPI_SIP_ADDRESS",
MAPI_RECIPIENT_DISPLAY_NAME: "MAPI_RECIPIENT_DISPLAY_NAME",
MAPI_RECIPIENT_ENTRYID: "MAPI_RECIPIENT_ENTRYID",
MAPI_RECIPIENT_FLAGS: "MAPI_RECIPIENT_FLAGS",
MAPI_RECIPIENT_TRACKSTATUS: "MAPI_RECIPIENT_TRACKSTATUS",
MAPI_CHANGE_KEY: "MAPI_CHANGE_KEY",
MAPI_PREDECESSOR_CHANGE_LIST: "MAPI_PREDECESSOR_CHANGE_LIST",
MAPI_ID_SECURE_MIN: "MAPI_ID_SECURE_MIN",
MAPI_ID_SECURE_MAX: "MAPI_ID_SECURE_MAX",
MAPI_VOICE_MESSAGE_DURATION: "MAPI_VOICE_MESSAGE_DURATION",
MAPI_SENDER_TELEPHONE_NUMBER: "MAPI_SENDER_TELEPHONE_NUMBER",
MAPI_VOICE_MESSAGE_SENDER_NAME: "MAPI_VOICE_MESSAGE_SENDER_NAME",
MAPI_FAX_NUMBER_OF_PAGES: "MAPI_FAX_NUMBER_OF_PAGES",
MAPI_VOICE_MESSAGE_ATTACHMENT_ORDER: "MAPI_VOICE_MESSAGE_ATTACHMENT_ORDER",
MAPI_CALL_ID: "MAPI_CALL_ID",
MAPI_ATTACHMENT_LINK_ID: "MAPI_ATTACHMENT_LINK_ID",
MAPI_EXCEPTION_START_TIME: "MAPI_EXCEPTION_START_TIME",
MAPI_EXCEPTION_END_TIME: "MAPI_EXCEPTION_END_TIME",
MAPI_ATTACHMENT_FLAGS: "MAPI_ATTACHMENT_FLAGS",
MAPI_ATTACHMENT_HIDDEN: "MAPI_ATTACHMENT_HIDDEN",
MAPI_ATTACHMENT_CONTACT_PHOTO: "MAPI_ATTACHMENT_CONTACT_PHOTO",
MAPI_FILE_UNDER: "MAPI_FILE_UNDER",
MAPI_FILE_UNDER_ID: "MAPI_FILE_UNDER_ID",
MAPI_CONTACT_ITEM_DATA: "MAPI_CONTACT_ITEM_DATA",
MAPI_REFERRED_BY: "MAPI_REFERRED_BY",
MAPI_DEPARTMENT: "MAPI_DEPARTMENT",
MAPI_HAS_PICTURE: "MAPI_HAS_PICTURE",
MAPI_HOME_ADDRESS: "MAPI_HOME_ADDRESS",
MAPI_WORK_ADDRESS: "MAPI_WORK_ADDRESS",
MAPI_OTHER_ADDRESS: "MAPI_OTHER_ADDRESS",
MAPI_POSTAL_ADDRESS_ID: "MAPI_POSTAL_ADDRESS_ID",
MAPI_CONTACT_CHARACTER_SET: "MAPI_CONTACT_CHARACTER_SET",
MAPI_AUTO_LOG: "MAPI_AUTO_LOG",
MAPI_FILE_UNDER_LIST: "MAPI_FILE_UNDER_LIST",
MAPI_EMAIL_LIST: "MAPI_EMAIL_LIST",
MAPI_ADDRESS_BOOK_PROVIDER_EMAIL_LIST: "MAPI_ADDRESS_BOOK_PROVIDER_EMAIL_LIST",
MAPI_ADDRESS_BOOK_PROVIDER_ARRAY_TYPE: "MAPI_ADDRESS_BOOK_PROVIDER_ARRAY_TYPE",
MAPI_HTML: "MAPI_HTML",
MAPI_YOMI_FIRST_NAME: "MAPI_YOMI_FIRST_NAME",
MAPI_YOMI_LAST_NAME: "MAPI_YOMI_LAST_NAME",
MAPI_YOMI_COMPANY_NAME: "MAPI_YOMI_COMPANY_NAME",
MAPI_BUSINESS_CARD_DISPLAY_DEFINITION: "MAPI_BUSINESS_CARD_DISPLAY_DEFINITION",
MAPI_BUSINESS_CARD_CARD_PICTURE: "MAPI_BUSINESS_CARD_CARD_PICTURE",
MAPI_WORK_ADDRESS_STREET: "MAPI_WORK_ADDRESS_STREET",
MAPI_WORK_ADDRESS_CITY: "MAPI_WORK_ADDRESS_CITY",
MAPI_WORK_ADDRESS_STATE: "MAPI_WORK_ADDRESS_STATE",
MAPI_WORK_ADDRESS_POSTAL_CODE: "MAPI_WORK_ADDRESS_POSTAL_CODE",
MAPI_WORK_ADDRESS_COUNTRY: "MAPI_WORK_ADDRESS_COUNTRY",
MAPI_WORK_ADDRESS_POST_OFFICE_BOX: "MAPI_WORK_ADDRESS_POST_OFFICE_BOX",
MAPI_DISTRIBUTION_LIST_CHECKSUM: "MAPI_DISTRIBUTION_LIST_CHECKSUM",
MAPI_BIRTHDAY_EVENT_ENTRY_ID: "MAPI_BIRTHDAY_EVENT_ENTRY_ID",
MAPI_ANNIVERSARY_EVENT_ENTRY_ID: "MAPI_ANNIVERSARY_EVENT_ENTRY_ID",
MAPI_CONTACT_USER_FIELD1: "MAPI_CONTACT_USER_FIELD1",
MAPI_CONTACT_USER_FIELD2: "MAPI_CONTACT_USER_FIELD2",
MAPI_CONTACT_USER_FIELD3: "MAPI_CONTACT_USER_FIELD3",
MAPI_CONTACT_USER_FIELD4: "MAPI_CONTACT_USER_FIELD4",
MAPI_DISTRIBUTION_LIST_NAME: "MAPI_DISTRIBUTION_LIST_NAME",
MAPI_DISTRIBUTION_LIST_ONE_OFF_MEMBERS: "MAPI_DISTRIBUTION_LIST_ONE_OFF_MEMBERS",
MAPI_DISTRIBUTION_LIST_MEMBERS: "MAPI_DISTRIBUTION_LIST_MEMBERS",
MAPI_INSTANT_MESSAGING_ADDRESS: "MAPI_INSTANT_MESSAGING_ADDRESS",
MAPI_DISTRIBUTION_LIST_STREAM: "MAPI_DISTRIBUTION_LIST_STREAM",
MAPI_EMAIL_DISPLAY_NAME: "MAPI_EMAIL_DISPLAY_NAME",
MAPI_EMAIL_ADDR_TYPE: "MAPI_EMAIL_ADDR_TYPE",
MAPI_EMAIL_EMAIL_ADDRESS: "MAPI_EMAIL_EMAIL_ADDRESS",
MAPI_EMAIL_ORIGINAL_DISPLAY_NAME: "MAPI_EMAIL_ORIGINAL_DISPLAY_NAME",
MAPI_EMAIL1ORIGINAL_ENTRY_ID: "MAPI_EMAIL1ORIGINAL_ENTRY_ID",
MAPI_EMAIL1RICH_TEXT_FORMAT: "MAPI_EMAIL1RICH_TEXT_FORMAT",
MAPI_EMAIL1EMAIL_TYPE: "MAPI_EMAIL1EMAIL_TYPE",
MAPI_EMAIL2DISPLAY_NAME: "MAPI_EMAIL2DISPLAY_NAME",
MAPI_EMAIL2ENTRY_ID: "MAPI_EMAIL2ENTRY_ID",
MAPI_EMAIL2ADDR_TYPE: "MAPI_EMAIL2ADDR_TYPE",
MAPI_EMAIL2EMAIL_ADDRESS: "MAPI_EMAIL2EMAIL_ADDRESS",
MAPI_EMAIL2ORIGINAL_DISPLAY_NAME: "MAPI_EMAIL2ORIGINAL_DISPLAY_NAME",
MAPI_EMAIL2ORIGINAL_ENTRY_ID: "MAPI_EMAIL2ORIGINAL_ENTRY_ID",
MAPI_EMAIL2RICH_TEXT_FORMAT: "MAPI_EMAIL2RICH_TEXT_FORMAT",
MAPI_EMAIL3DISPLAY_NAME: "MAPI_EMAIL3DISPLAY_NAME",
MAPI_EMAIL3ENTRY_ID: "MAPI_EMAIL3ENTRY_ID",
MAPI_EMAIL3ADDR_TYPE: "MAPI_EMAIL3ADDR_TYPE",
MAPI_EMAIL3EMAIL_ADDRESS: "MAPI_EMAIL3EMAIL_ADDRESS",
MAPI_EMAIL3ORIGINAL_DISPLAY_NAME: "MAPI_EMAIL3ORIGINAL_DISPLAY_NAME",
MAPI_EMAIL3ORIGINAL_ENTRY_ID: "MAPI_EMAIL3ORIGINAL_ENTRY_ID",
MAPI_EMAIL3RICH_TEXT_FORMAT: "MAPI_EMAIL3RICH_TEXT_FORMAT",
MAPI_FAX1ADDRESS_TYPE: "MAPI_FAX1ADDRESS_TYPE",
MAPI_FAX1EMAIL_ADDRESS: "MAPI_FAX1EMAIL_ADDRESS",
MAPI_FAX1ORIGINAL_DISPLAY_NAME: "MAPI_FAX1ORIGINAL_DISPLAY_NAME",
MAPI_FAX1ORIGINAL_ENTRY_ID: "MAPI_FAX1ORIGINAL_ENTRY_ID",
MAPI_FAX2ADDRESS_TYPE: "MAPI_FAX2ADDRESS_TYPE",
MAPI_FAX2EMAIL_ADDRESS: "MAPI_FAX2EMAIL_ADDRESS",
MAPI_FAX2ORIGINAL_DISPLAY_NAME: "MAPI_FAX2ORIGINAL_DISPLAY_NAME",
MAPI_FAX2ORIGINAL_ENTRY_ID: "MAPI_FAX2ORIGINAL_ENTRY_ID",
MAPI_FAX3ADDRESS_TYPE: "MAPI_FAX3ADDRESS_TYPE",
MAPI_FAX3EMAIL_ADDRESS: "MAPI_FAX3EMAIL_ADDRESS",
MAPI_FAX3ORIGINAL_DISPLAY_NAME: "MAPI_FAX3ORIGINAL_DISPLAY_NAME",
MAPI_FAX3ORIGINAL_ENTRY_ID: "MAPI_FAX3ORIGINAL_ENTRY_ID",
MAPI_FREE_BUSY_LOCATION: "MAPI_FREE_BUSY_LOCATION",
MAPI_HOME_ADDRESS_COUNTRY_CODE: "MAPI_HOME_ADDRESS_COUNTRY_CODE",
MAPI_WORK_ADDRESS_COUNTRY_CODE: "MAPI_WORK_ADDRESS_COUNTRY_CODE",
MAPI_OTHER_ADDRESS_COUNTRY_CODE: "MAPI_OTHER_ADDRESS_COUNTRY_CODE",
MAPI_ADDRESS_COUNTRY_CODE: "MAPI_ADDRESS_COUNTRY_CODE",
MAPI_BIRTHDAY_LOCAL: "MAPI_BIRTHDAY_LOCAL",
MAPI_WEDDING_ANNIVERSARY_LOCAL: "MAPI_WEDDING_ANNIVERSARY_LOCAL",
MAPI_TASK_STATUS: "MAPI_TASK_STATUS",
MAPI_TASK_START_DATE: "MAPI_TASK_START_DATE",
MAPI_TASK_DUE_DATE: "MAPI_TASK_DUE_DATE",
MAPI_TASK_ACTUAL_EFFORT: "MAPI_TASK_ACTUAL_EFFORT",
MAPI_TASK_ESTIMATED_EFFORT: "MAPI_TASK_ESTIMATED_EFFORT",
MAPI_TASK_FRECUR: "MAPI_TASK_FRECUR",
MAPI_SEND_MEETING_AS_ICAL: "MAPI_SEND_MEETING_AS_ICAL",
MAPI_APPOINTMENT_SEQUENCE: "MAPI_APPOINTMENT_SEQUENCE",
MAPI_APPOINTMENT_SEQUENCE_TIME: "MAPI_APPOINTMENT_SEQUENCE_TIME",
MAPI_APPOINTMENT_LAST_SEQUENCE: "MAPI_APPOINTMENT_LAST_SEQUENCE",
MAPI_CHANGE_HIGHLIGHT: "MAPI_CHANGE_HIGHLIGHT",
MAPI_BUSY_STATUS: "MAPI_BUSY_STATUS",
MAPI_FEXCEPTIONAL_BODY: "MAPI_FEXCEPTIONAL_BODY",
MAPI_APPOINTMENT_AUXILIARY_FLAGS: "MAPI_APPOINTMENT_AUXILIARY_FLAGS",
MAPI_OUTLOOK_LOCATION: "MAPI_OUTLOOK_LOCATION",
MAPI_MEETING_WORKSPACE_URL: "MAPI_MEETING_WORKSPACE_URL",
MAPI_FORWARD_INSTANCE: "MAPI_FORWARD_INSTANCE",
MAPI_LINKED_TASK_ITEMS: "MAPI_LINKED_TASK_ITEMS",
MAPI_APPT_START_WHOLE: "MAPI_APPT_START_WHOLE",
MAPI_APPT_END_WHOLE: "MAPI_APPT_END_WHOLE",
MAPI_APPOINTMENT_START_TIME: "MAPI_APPOINTMENT_START_TIME",
MAPI_APPOINTMENT_END_TIME: "MAPI_APPOINTMENT_END_TIME",
MAPI_APPOINTMENT_END_DATE: "MAPI_APPOINTMENT_END_DATE",
MAPI_APPOINTMENT_START_DATE: "MAPI_APPOINTMENT_START_DATE",
MAPI_APPT_DURATION: "MAPI_APPT_DURATION",
MAPI_APPOINTMENT_COLOR: "MAPI_APPOINTMENT_COLOR",
MAPI_APPOINTMENT_SUB_TYPE: "MAPI_APPOINTMENT_SUB_TYPE",
MAPI_APPOINTMENT_RECUR: "MAPI_APPOINTMENT_RECUR",
MAPI_APPOINTMENT_STATE_FLAGS: "MAPI_APPOINTMENT_STATE_FLAGS",
MAPI_RESPONSE_STATUS: "MAPI_RESPONSE_STATUS",
MAPI_APPOINTMENT_REPLY_TIME: "MAPI_APPOINTMENT_REPLY_TIME",
MAPI_RECURRING: "MAPI_RECURRING",
MAPI_INTENDED_BUSY_STATUS: "MAPI_INTENDED_BUSY_STATUS",
MAPI_APPOINTMENT_UPDATE_TIME: "MAPI_APPOINTMENT_UPDATE_TIME",
MAPI_EXCEPTION_REPLACE_TIME: "MAPI_EXCEPTION_REPLACE_TIME",
MAPI_OWNER_NAME: "MAPI_OWNER_NAME",
MAPI_APPOINTMENT_REPLY_NAME: "MAPI_APPOINTMENT_REPLY_NAME",
MAPI_RECURRENCE_TYPE: "MAPI_RECURRENCE_TYPE",
MAPI_RECURRENCE_PATTERN: "MAPI_RECURRENCE_PATTERN",
MAPI_TIME_ZONE_STRUCT: "MAPI_TIME_ZONE_STRUCT",
MAPI_TIME_ZONE_DESCRIPTION: "MAPI_TIME_ZONE_DESCRIPTION",
MAPI_CLIP_START: "MAPI_CLIP_START",
MAPI_CLIP_END: "MAPI_CLIP_END",
MAPI_ORIGINAL_STORE_ENTRY_ID: "MAPI_ORIGINAL_STORE_ENTRY_ID",
MAPI_ALL_ATTENDEES_STRING: "MAPI_ALL_ATTENDEES_STRING",
MAPI_AUTO_FILL_LOCATION: "MAPI_AUTO_FILL_LOCATION",
MAPI_TO_ATTENDEES_STRING: "MAPI_TO_ATTENDEES_STRING",
MAPI_CCATTENDEES_STRING: "MAPI_CCATTENDEES_STRING",
MAPI_CONF_CHECK: "MAPI_CONF_CHECK",
MAPI_CONFERENCING_TYPE: "MAPI_CONFERENCING_TYPE",
MAPI_DIRECTORY: "MAPI_DIRECTORY",
MAPI_ORGANIZER_ALIAS: "MAPI_ORGANIZER_ALIAS",
MAPI_AUTO_START_CHECK: "MAPI_AUTO_START_CHECK",
MAPI_AUTO_START_WHEN: "MAPI_AUTO_START_WHEN",
MAPI_ALLOW_EXTERNAL_CHECK: "MAPI_ALLOW_EXTERNAL_CHECK",
MAPI_COLLABORATE_DOC: "MAPI_COLLABORATE_DOC",
MAPI_NET_SHOW_URL: "MAPI_NET_SHOW_URL",
MAPI_ONLINE_PASSWORD: "MAPI_ONLINE_PASSWORD",
MAPI_APPOINTMENT_PROPOSED_DURATION: "MAPI_APPOINTMENT_PROPOSED_DURATION",
MAPI_APPT_COUNTER_PROPOSAL: "MAPI_APPT_COUNTER_PROPOSAL",
MAPI_APPOINTMENT_PROPOSAL_NUMBER: "MAPI_APPOINTMENT_PROPOSAL_NUMBER",
MAPI_APPOINTMENT_NOT_ALLOW_PROPOSE: "MAPI_APPOINTMENT_NOT_ALLOW_PROPOSE",
MAPI_APPT_TZDEF_START_DISPLAY: "MAPI_APPT_TZDEF_START_DISPLAY",
MAPI_APPT_TZDEF_END_DISPLAY: "MAPI_APPT_TZDEF_END_DISPLAY",
MAPI_APPT_TZDEF_RECUR: "MAPI_APPT_TZDEF_RECUR",
MAPI_REMINDER_MINUTES_BEFORE_START: "MAPI_REMINDER_MINUTES_BEFORE_START",
MAPI_REMINDER_TIME: "MAPI_REMINDER_TIME",
MAPI_REMINDER_SET: "MAPI_REMINDER_SET",
MAPI_PRIVATE: "MAPI_PRIVATE",
MAPI_AGING_DONT_AGE_ME: "MAPI_AGING_DONT_AGE_ME",
MAPI_FORM_STORAGE: "MAPI_FORM_STORAGE",
MAPI_SIDE_EFFECTS: "MAPI_SIDE_EFFECTS",
MAPI_REMOTE_STATUS: "MAPI_REMOTE_STATUS",
MAPI_PAGE_DIR_STREAM: "MAPI_PAGE_DIR_STREAM",
MAPI_SMART_NO_ATTACH: "MAPI_SMART_NO_ATTACH",
MAPI_COMMON_START: "MAPI_COMMON_START",
MAPI_COMMON_END: "MAPI_COMMON_END",
MAPI_TASK_MODE: "MAPI_TASK_MODE",
MAPI_FORM_PROP_STREAM: "MAPI_FORM_PROP_STREAM",
MAPI_REQUEST: "MAPI_REQUEST",
MAPI_NON_SENDABLE_TO: "MAPI_NON_SENDABLE_TO",
MAPI_NON_SENDABLE_CC: "MAPI_NON_SENDABLE_CC",
MAPI_NON_SENDABLE_BCC: "MAPI_NON_SENDABLE_BCC",
MAPI_COMPANIES: "MAPI_COMPANIES",
MAPI_CONTACTS: "MAPI_CONTACTS",
MAPI_PROP_DEF_STREAM: "MAPI_PROP_DEF_STREAM",
MAPI_SCRIPT_STREAM: "MAPI_SCRIPT_STREAM",
MAPI_CUSTOM_FLAG: "MAPI_CUSTOM_FLAG",
MAPI_OUTLOOK_CURRENT_VERSION: "MAPI_OUTLOOK_CURRENT_VERSION",
MAPI_CURRENT_VERSION_NAME: "MAPI_CURRENT_VERSION_NAME",
MAPI_REMINDER_NEXT_TIME: "MAPI_REMINDER_NEXT_TIME",
MAPI_HEADER_ITEM: "MAPI_HEADER_ITEM",
MAPI_USE_TNEF: "MAPI_USE_TNEF",
MAPI_TO_DO_TITLE: "MAPI_TO_DO_TITLE",
MAPI_VALID_FLAG_STRING_PROOF: "MAPI_VALID_FLAG_STRING_PROOF",
MAPI_LOG_TYPE: "MAPI_LOG_TYPE",
MAPI_LOG_START: "MAPI_LOG_START",
MAPI_LOG_DURATION: "MAPI_LOG_DURATION",
MAPI_LOG_END: "MAPI_LOG_END",
}
|
import bpy
import blf
import math
import gpu, bgl
from bpy import types
from mathutils import Vector, Matrix
from mathutils import geometry
from bpy_extras import view3d_utils
from blenderbim.bim.module.drawing.shaders import DotsGizmoShader, ExtrusionGuidesShader, BaseLinesShader
from ifcopenshell.util.unit import si_conversions
"""Gizmos under the hood
source/blender/windowmanager/gizmo/WM_gizmo_types.h
matrix_basis -- "Transformation of this gizmo." = placement in scene
matrix_offset -- "Custom offset from origin." = local transforms according to state/value
matrix_space -- "The space this gizmo is being modified in." used by some gizmos for undefined purposes
matrix_world -- final matrix, scaled according to viewport zoom and custom scale_basis
source/blender/windowmanager/gizmo/intern/wm_gizmo.c:WM_gizmo_calc_matrix_final_params
final = space @ (autoscale * (basis @ offset))
final = space @ (basis @ offset) -- if gizmo.use_draw_scale == False
final = space @ ((autoscale * basis) @ offset) -- if gizmo.use_draw_offset_scale
source/blender/windowmanager/gizmo/intern/wm_gizmo.c:wm_gizmo_calculate_scale
autoscale = gizmo.scale_basis * magic(preferences, matrix_space, matrix_basis, context.region_data)
magic -- making 1.0 to match preferences.view.gizmo_size pixels (75 by default)
select_id -- apparently, id of a selectable part
test_select -- expected to return id of selection, doesn't seem to work
draw_select -- fake-draw of selection geometry for gpu-side cursor tracking
"""
CUBE = (
(+1, +1, +1),
(-1, +1, +1),
(+1, -1, +1), # top
(+1, -1, +1),
(-1, +1, +1),
(-1, -1, +1),
(+1, +1, +1),
(+1, -1, +1),
(+1, +1, -1), # right
(+1, +1, -1),
(+1, -1, +1),
(+1, -1, -1),
(+1, +1, +1),
(+1, +1, -1),
(-1, +1, +1), # back
(-1, +1, +1),
(+1, +1, -1),
(-1, +1, -1),
(-1, -1, -1),
(-1, +1, -1),
(+1, -1, -1), # bot
(+1, -1, -1),
(-1, +1, -1),
(+1, +1, -1),
(-1, -1, -1),
(-1, -1, +1),
(-1, +1, -1), # left
(-1, +1, -1),
(-1, -1, +1),
(-1, +1, +1),
(-1, -1, -1),
(+1, -1, -1),
(-1, -1, +1), # front
(-1, -1, +1),
(+1, -1, -1),
(+1, -1, +1),
)
DISC = (
(0.0, 0.0, 0.0),
(1.0, 0.0, 0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.0, 0.0, 0.0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.5000000000000001, 0.8660254037844386, 0),
(0.0, 0.0, 0.0),
(0.5000000000000001, 0.8660254037844386, 0),
(6.123233995736766e-17, 1.0, 0),
(0.0, 0.0, 0.0),
(6.123233995736766e-17, 1.0, 0),
(-0.4999999999999998, 0.8660254037844387, 0),
(0.0, 0.0, 0.0),
(-0.4999999999999998, 0.8660254037844387, 0),
(-0.8660254037844385, 0.5000000000000003, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844385, 0.5000000000000003, 0),
(-1.0, 1.2246467991473532e-16, 0),
(0.0, 0.0, 0.0),
(-1.0, 1.2246467991473532e-16, 0),
(-0.8660254037844388, -0.4999999999999997, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844388, -0.4999999999999997, 0),
(-0.5000000000000004, -0.8660254037844384, 0),
(0.0, 0.0, 0.0),
(-0.5000000000000004, -0.8660254037844384, 0),
(-1.8369701987210297e-16, -1.0, 0),
(0.0, 0.0, 0.0),
(-1.8369701987210297e-16, -1.0, 0),
(0.49999999999999933, -0.866025403784439, 0),
(0.0, 0.0, 0.0),
(0.49999999999999933, -0.866025403784439, 0),
(0.8660254037844384, -0.5000000000000004, 0),
(0.0, 0.0, 0.0),
(0.8660254037844384, -0.5000000000000004, 0),
(1.0, 0.0, 0),
)
X3DISC = (
(0.0, 0.0, 0.0),
(1.0, 0.0, 0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.0, 0.0, 0.0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.5000000000000001, 0.8660254037844386, 0),
(0.0, 0.0, 0.0),
(0.5000000000000001, 0.8660254037844386, 0),
(6.123233995736766e-17, 1.0, 0),
(0.0, 0.0, 0.0),
(6.123233995736766e-17, 1.0, 0),
(-0.4999999999999998, 0.8660254037844387, 0),
(0.0, 0.0, 0.0),
(-0.4999999999999998, 0.8660254037844387, 0),
(-0.8660254037844385, 0.5000000000000003, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844385, 0.5000000000000003, 0),
(-1.0, 1.2246467991473532e-16, 0),
(0.0, 0.0, 0.0),
(-1.0, 1.2246467991473532e-16, 0),
(-0.8660254037844388, -0.4999999999999997, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844388, -0.4999999999999997, 0),
(-0.5000000000000004, -0.8660254037844384, 0),
(0.0, 0.0, 0.0),
(-0.5000000000000004, -0.8660254037844384, 0),
(-1.8369701987210297e-16, -1.0, 0),
(0.0, 0.0, 0.0),
(-1.8369701987210297e-16, -1.0, 0),
(0.49999999999999933, -0.866025403784439, 0),
(0.0, 0.0, 0.0),
(0.49999999999999933, -0.866025403784439, 0),
(0.8660254037844384, -0.5000000000000004, 0),
(0.0, 0.0, 0.0),
(0.8660254037844384, -0.5000000000000004, 0),
(1.0, 0.0, 0),
(0.0, 0.0, 0.0),
(0, 1.0, 0.0),
(0, 0.8660254037844387, 0.49999999999999994),
(0.0, 0.0, 0.0),
(0, 0.8660254037844387, 0.49999999999999994),
(0, 0.5000000000000001, 0.8660254037844386),
(0.0, 0.0, 0.0),
(0, 0.5000000000000001, 0.8660254037844386),
(0, 6.123233995736766e-17, 1.0),
(0.0, 0.0, 0.0),
(0, 6.123233995736766e-17, 1.0),
(0, -0.4999999999999998, 0.8660254037844387),
(0.0, 0.0, 0.0),
(0, -0.4999999999999998, 0.8660254037844387),
(0, -0.8660254037844385, 0.5000000000000003),
(0.0, 0.0, 0.0),
(0, -0.8660254037844385, 0.5000000000000003),
(0, -1.0, 1.2246467991473532e-16),
(0.0, 0.0, 0.0),
(0, -1.0, 1.2246467991473532e-16),
(0, -0.8660254037844388, -0.4999999999999997),
(0.0, 0.0, 0.0),
(0, -0.8660254037844388, -0.4999999999999997),
(0, -0.5000000000000004, -0.8660254037844384),
(0.0, 0.0, 0.0),
(0, -0.5000000000000004, -0.8660254037844384),
(0, -1.8369701987210297e-16, -1.0),
(0.0, 0.0, 0.0),
(0, -1.8369701987210297e-16, -1.0),
(0, 0.49999999999999933, -0.866025403784439),
(0.0, 0.0, 0.0),
(0, 0.49999999999999933, -0.866025403784439),
(0, 0.8660254037844384, -0.5000000000000004),
(0.0, 0.0, 0.0),
(0, 0.8660254037844384, -0.5000000000000004),
(0, 1.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0, 1.0),
(0.49999999999999994, 0, 0.8660254037844387),
(0.0, 0.0, 0.0),
(0.49999999999999994, 0, 0.8660254037844387),
(0.8660254037844386, 0, 0.5000000000000001),
(0.0, 0.0, 0.0),
(0.8660254037844386, 0, 0.5000000000000001),
(1.0, 0, 6.123233995736766e-17),
(0.0, 0.0, 0.0),
(1.0, 0, 6.123233995736766e-17),
(0.8660254037844387, 0, -0.4999999999999998),
(0.0, 0.0, 0.0),
(0.8660254037844387, 0, -0.4999999999999998),
(0.5000000000000003, 0, -0.8660254037844385),
(0.0, 0.0, 0.0),
(0.5000000000000003, 0, -0.8660254037844385),
(1.2246467991473532e-16, 0, -1.0),
(0.0, 0.0, 0.0),
(1.2246467991473532e-16, 0, -1.0),
(-0.4999999999999997, 0, -0.8660254037844388),
(0.0, 0.0, 0.0),
(-0.4999999999999997, 0, -0.8660254037844388),
(-0.8660254037844384, 0, -0.5000000000000004),
(0.0, 0.0, 0.0),
(-0.8660254037844384, 0, -0.5000000000000004),
(-1.0, 0, -1.8369701987210297e-16),
(0.0, 0.0, 0.0),
(-1.0, 0, -1.8369701987210297e-16),
(-0.866025403784439, 0, 0.49999999999999933),
(0.0, 0.0, 0.0),
(-0.866025403784439, 0, 0.49999999999999933),
(-0.5000000000000004, 0, 0.8660254037844384),
(0.0, 0.0, 0.0),
(-0.5000000000000004, 0, 0.8660254037844384),
(0.0, 0, 1.0),
)
class CustomGizmo:
# FIXME: highliting/selection doesnt work
def draw_very_custom_shape(self, ctx, custom_shape, select_id=None):
# similar to draw_custom_shape
shape, batch, shader = custom_shape
shader.bind()
if select_id is not None:
gpu.select.load_id(select_id)
else:
if self.is_highlight:
color = (*self.color_highlight, self.alpha_highlight)
else:
color = (*self.color, self.alpha)
shader.uniform_float("color", color)
shape.glenable()
shape.uniform_region(ctx)
# shader.uniform_float('modelMatrix', self.matrix_world)
with gpu.matrix.push_pop():
gpu.matrix.multiply_matrix(self.matrix_world)
batch.draw()
bgl.glDisable(bgl.GL_BLEND)
class OffsetHandle:
"""Handling mouse to offset gizmo from base along Z axis"""
# FIXME: works a bit weird for rotated objects
def invoke(self, ctx, event):
self.init_value = self.target_get_value("offset") / self.scale_value
coordz = self.project_mouse(ctx, event)
if coordz is None:
return {"CANCELLED"}
self.init_coordz = coordz
return {"RUNNING_MODAL"}
def modal(self, ctx, event, tweak):
coordz = self.project_mouse(ctx, event)
if coordz is None:
return {"CANCELLED"}
delta = coordz - self.init_coordz
if "PRECISE" in tweak:
delta /= 10.0
value = max(0, self.init_value + delta)
value *= self.scale_value
# ctx.area.header_text_set(f"coords: {self.init_coordz} - {coordz}, delta: {delta}, value: {value}")
ctx.area.header_text_set(f"Depth: {value}")
self.target_set_value("offset", value)
return {"RUNNING_MODAL"}
def project_mouse(self, ctx, event):
"""Projecting mouse coords to local axis Z"""
# logic from source/blender/editors/gizmo_library/gizmo_types/arrow3d_gizmo.c:gizmo_arrow_modal
mouse = Vector((event.mouse_region_x, event.mouse_region_y))
region = ctx.region
region3d = ctx.region_data
ray_orig = view3d_utils.region_2d_to_origin_3d(region, region3d, mouse)
ray_norm = view3d_utils.region_2d_to_vector_3d(region, region3d, mouse)
# 'arrow' origin and direction
base = Vector((0, 0, 0))
axis = Vector((0, 0, 1))
# projecttion of the arrow to a plane, perpendicular to view ray
axis_proj = axis - ray_norm * axis.dot(ray_norm)
# intersection of the axis with the plane through view origin perpendicular to the arrow projection
coords = geometry.intersect_line_plane(base, axis, ray_orig, axis_proj)
return coords.z
def exit(self, ctx, cancel):
if cancel:
self.target_set_value("offset", self.init_value)
else:
self.group.update(ctx)
class UglyDotGizmo(OffsetHandle, types.Gizmo):
"""three orthogonal circles"""
bl_idname = "BIM_GT_uglydot_3d"
bl_target_properties = ({"id": "offset", "type": "FLOAT", "array_length": 1},)
__slots__ = (
"scale_value",
"custom_shape",
"init_value",
"init_coordz",
)
def setup(self):
self.custom_shape = self.new_custom_shape(type="TRIS", verts=X3DISC)
def refresh(self):
offset = self.target_get_value("offset") / self.scale_value
self.matrix_offset.col[3][2] = offset # z-shift
def draw(self, ctx):
self.refresh()
self.draw_custom_shape(self.custom_shape)
def draw_select(self, ctx, select_id):
self.refresh()
self.draw_custom_shape(self.custom_shape, select_id=select_id)
class DotGizmo(CustomGizmo, OffsetHandle, types.Gizmo):
"""Single dot viewport-aligned"""
# FIXME: make it selectable
bl_idname = "BIM_GT_dot_2d"
bl_target_properties = ({"id": "offset", "type": "FLOAT", "array_length": 1},)
__slots__ = (
"scale_value",
"custom_shape",
)
def setup(self):
shader = DotsGizmoShader()
self.custom_shape = shader, shader.batch(pos=((0, 0, 0),)), shader.prog
self.use_draw_scale = False
def refresh(self):
offset = self.target_get_value("offset") / self.scale_value
self.matrix_offset.col[3][2] = offset # z-shifted
def draw(self, ctx):
self.refresh()
self.draw_very_custom_shape(ctx, self.custom_shape)
def draw_select(self, ctx, select_id):
self.refresh()
self.draw_very_custom_shape(ctx, self.custom_shape, select_id=select_id)
# doesn't get called
# def test_select(self, ctx, location):
# pass
class ExtrusionGuidesGizmo(CustomGizmo, types.Gizmo):
"""Extrusion guides
Noninteractive gizmo to indicate extrusion depth and planes.
Draws main segment and orthogonal cross at endpoints.
"""
bl_idname = "BIM_GT_extrusion_guides"
bl_target_properties = ({"id": "depth", "type": "FLOAT", "array_length": 1},)
__slots__ = ("scale_value", "custom_shape")
def setup(self):
shader = ExtrusionGuidesShader()
self.custom_shape = shader, shader.batch(pos=((0, 0, 0), (0, 0, 1))), shader.prog
self.use_draw_scale = False
def refresh(self):
depth = self.target_get_value("depth") / self.scale_value
self.matrix_offset.col[2][2] = depth # z-scaled
def draw(self, ctx):
self.refresh()
self.draw_very_custom_shape(ctx, self.custom_shape)
class DimensionLabelGizmo(types.Gizmo):
"""Text label for a dimension"""
# does not work properly, fonts are totally screwed up
bl_idname = "BIM_GT_dimension_label"
bl_target_properties = ({"id": "value", "type": "FLOAT", "array_length": 1},)
__slots__ = "text_label"
def setup(self):
pass
def refresh(self, ctx):
value = self.target_get_value("value")
self.matrix_offset.col[3][2] = value * 0.5
unit_system = ctx.scene.unit_settings.system
self.text_label = bpy.utils.units.to_string(unit_system, "LENGTH", value, 3, split_unit=False)
def draw(self, ctx):
self.refresh(ctx)
self.draw_text(ctx)
def draw_text(self, ctx):
font_id = 0
font_size = 16
dpi = ctx.preferences.system.dpi
# pos = self.matrix_world @ Vector((0, 0, 0, 1))
# pos = Vector((0, 0, 0.5))
# region = ctx.region
# region3d = ctx.region_data
# pos = view3d_utils.location_3d_to_region_2d(region, region3d, pos)
# text = self.text_label
blf.size(font_id, font_size, dpi)
blf.position(font_id, 0, 0, 0)
blf.color(font_id, *self.color, self.alpha)
blf.draw(font_id, "ABC")
class ExtrusionWidget(types.GizmoGroup):
bl_idname = "bim.extrusion_widget"
bl_label = "Extrusion Gizmos"
bl_space_type = "VIEW_3D"
bl_region_type = "WINDOW"
bl_options = {"3D", "PERSISTENT", "SHOW_MODAL_ALL"}
@classmethod
def poll(cls, ctx):
obj = ctx.object
return (
obj
and obj.type == "MESH"
and obj.data.BIMMeshProperties.ifc_parameters.get("IfcExtrudedAreaSolid/Depth") is not None
)
def setup(self, ctx):
target = ctx.object
prop = target.data.BIMMeshProperties.ifc_parameters.get("IfcExtrudedAreaSolid/Depth")
basis = target.matrix_world.normalized()
theme = ctx.preferences.themes[0].user_interface
scale_value = self.get_scale_value(ctx.scene.unit_settings.system, ctx.scene.unit_settings.length_unit)
gz = self.handle = self.gizmos.new("BIM_GT_uglydot_3d")
gz.matrix_basis = basis
gz.scale_basis = 0.1
gz.color = gz.color_highlight = tuple(theme.gizmo_primary)
gz.alpha = 0.5
gz.alpha_highlight = 1.0
gz.use_draw_modal = True
gz.target_set_prop("offset", prop, "value")
gz.scale_value = scale_value
gz = self.guides = self.gizmos.new("BIM_GT_extrusion_guides")
gz.matrix_basis = basis
gz.color = gz.color_highlight = tuple(theme.gizmo_secondary)
gz.alpha = gz.alpha_highlight = 0.5
gz.use_draw_modal = True
gz.target_set_prop("depth", prop, "value")
gz.scale_value = scale_value
# gz = self.label = self.gizmos.new('GIZMO_GT_dimension_label')
# gz.matrix_basis = basis
# gz.color = tuple(theme.gizmo_secondary)
# gz.alpha = 0.5
# gz.use_draw_modal = True
# gz.target_set_prop('value', target.demo, 'depth')
def refresh(self, ctx):
"""updating gizmos"""
target = ctx.object
basis = target.matrix_world.normalized()
self.handle.matrix_basis = basis
self.guides.matrix_basis = basis
def update(self, ctx):
"""updating object"""
bpy.ops.bim.update_parametric_representation()
target = ctx.object
prop = target.data.BIMMeshProperties.ifc_parameters.get("IfcExtrudedAreaSolid/Depth")
self.handle.target_set_prop("offset", prop, "value")
self.guides.target_set_prop("depth", prop, "value")
@staticmethod
def get_scale_value(system, length_unit):
scale_value = 1
if system == "METRIC":
if length_unit == "KILOMETERS":
scale_value /= 1000
elif length_unit == "CENTIMETERS":
scale_value *= 100
elif length_unit == "MILLIMETERS":
scale_value *= 1000
elif length_unit == "MICROMETERS":
scale_value *= 1000000
elif system == "IMPERIAL":
if length_unit == "MILES":
scale_value /= si_conversions["mile"]
elif length_unit == "FEET":
scale_value /= si_conversions["foot"]
elif length_unit == "INCHES":
scale_value /= si_conversions["inch"]
elif length_unit == "THOU":
scale_value /= si_conversions["thou"]
return scale_value
|
import random
import time
class Treap:
def __init__(self, key):
self.key = key
self.prio = random.randint(0, 1000000000)
self.size = 1
self.left = None
self.right = None
def update(self):
self.size = 1 + size(self.left) + size(self.right)
def size(treap):
return 0 if treap is None else treap.size
def split(root, minRight):
if root is None:
return None, None
if root.key >= minRight:
left, right = split(root.left, minRight)
root.left = right
root.update()
return left, root
else:
left, right = split(root.right, minRight)
root.right = left
root.update()
return root, right
def merge(left, right):
if left is None:
return right
if right is None:
return left
if left.prio > right.prio:
left.right = merge(left.right, right)
left.update()
return left
else:
right.left = merge(left, right.left)
right.update()
return right
def insert(root, key):
left, right = split(root, key)
return merge(merge(left, Treap(key)), right)
def remove(root, key):
left, right = split(root, key)
return merge(left, split(right, key + 1)[1])
def kth(root, k):
if k < size(root.left):
return kth(root.left, k)
elif k > size(root.left):
return kth(root.right, k - size(root.left) - 1)
return root.key
def print_treap(root):
def dfs_print(root):
if root is None:
return
dfs_print(root.left)
print(str(root.key) + ' ', end='')
dfs_print(root.right)
dfs_print(root)
print()
def test():
start = time.time()
treap = None
s = set()
for i in range(100000):
key = random.randint(0, 10000)
if random.randint(0, 1) == 0:
if key in s:
treap = remove(treap, key)
s.remove(key)
elif key not in s:
treap = insert(treap, key)
s.add(key)
assert len(s) == size(treap)
for i in range(size(treap)):
assert kth(treap, i) in s
print(time.time() - start)
test()
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from nose.tools import eq_
from hamcrest import ( assert_that, has_item, has_items, has_entry,
has_entries, contains, empty, contains_string )
from ycmd.utils import ReadFile
from ycmd.tests.python import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest, CompletionEntryMatcher,
CompletionLocationMatcher )
import http.client
@SharedYcmd
def GetCompletions_Basic_test( app ):
filepath = PathToTestFile( 'basic.py' )
completion_data = BuildRequest( filepath = filepath,
filetype = 'python',
contents = ReadFile( filepath ),
line_num = 7,
column_num = 3)
results = app.post_json( '/completions',
completion_data ).json[ 'completions' ]
assert_that( results,
has_items(
CompletionEntryMatcher( 'a' ),
CompletionEntryMatcher( 'b' ),
CompletionLocationMatcher( 'line_num', 3 ),
CompletionLocationMatcher( 'line_num', 4 ),
CompletionLocationMatcher( 'column_num', 10 ),
CompletionLocationMatcher( 'filepath', filepath ) ) )
@SharedYcmd
def GetCompletions_UnicodeDescription_test( app ):
filepath = PathToTestFile( 'unicode.py' )
completion_data = BuildRequest( filepath = filepath,
filetype = 'python',
contents = ReadFile( filepath ),
force_semantic = True,
line_num = 5,
column_num = 3)
results = app.post_json( '/completions',
completion_data ).json[ 'completions' ]
assert_that( results, has_item(
has_entry( 'detailed_info', contains_string( u'aafäö' ) ) ) )
def RunTest( app, test ):
"""
Method to run a simple completion test and verify the result
test is a dictionary containing:
'request': kwargs for BuildRequest
'expect': {
'response': server response code (e.g. httplib.OK)
'data': matcher for the server response json
}
"""
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
def CombineRequest( request, data ):
kw = request
request.update( data )
return BuildRequest( **kw )
app.post_json( '/event_notification',
CombineRequest( test[ 'request' ], {
'event_name': 'FileReadyToParse',
'contents': contents,
} ) )
# We ignore errors here and we check the response code ourself.
# This is to allow testing of requests returning errors.
response = app.post_json( '/completions',
CombineRequest( test[ 'request' ], {
'contents': contents
} ),
expect_errors = True )
eq_( response.status_code, test[ 'expect' ][ 'response' ] )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
@SharedYcmd
def GetCompletions_NoSuggestions_Fallback_test( app ):
# Python completer doesn't raise NO_COMPLETIONS_MESSAGE, so this is a
# different code path to the Clang completer cases
# TESTCASE2 (general_fallback/lang_python.py)
RunTest( app, {
'description': 'param jedi does not know about (id). query="a_p"',
'request': {
'filetype' : 'python',
'filepath' : PathToTestFile( 'general_fallback',
'lang_python.py' ),
'line_num' : 28,
'column_num': 20,
'force_semantic': False,
},
'expect': {
'response': http.client.OK,
'data': has_entries( {
'completions': contains(
CompletionEntryMatcher( 'a_parameter', '[ID]' ),
CompletionEntryMatcher( 'another_parameter', '[ID]' ),
),
'errors': empty(),
} )
},
} )
@SharedYcmd
def GetCompletions_Unicode_InLine_test( app ):
RunTest( app, {
'description': 'return completions for strings with multi-byte chars',
'request': {
'filetype' : 'python',
'filepath' : PathToTestFile( 'unicode.py' ),
'line_num' : 7,
'column_num': 14
},
'expect': {
'response': http.client.OK,
'data': has_entries( {
'completions': contains(
CompletionEntryMatcher( 'center', 'function: builtins.str.center' )
),
'errors': empty(),
} )
},
} )
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER FEC CMTE ID', 'number': '2'},
{'name': 'ENTITY TYPE', 'number': '3'},
{'name': 'NAME (Payee)', 'number': '4'},
{'name': 'STREET 1', 'number': '5'},
{'name': 'STREET 2', 'number': '6'},
{'name': 'CITY', 'number': '7'},
{'name': 'STATE', 'number': '8'},
{'name': 'ZIP', 'number': '9'},
{'name': 'TRANSDESC', 'number': '10'},
{'name': 'Of Expenditure', 'number': '11-'},
{'name': 'AMOUNT', 'number': '12'},
{'name': 'SUPPORT/OPPOSE', 'number': '13'},
{'name': 'S/O FEC CAN ID NUMBER', 'number': '14'},
{'name': 'S/O CAN/NAME', 'number': '15'},
{'name': 'S/O CAN/OFFICE', 'number': '16'},
{'name': 'S/O CAN/STATE', 'number': '17'},
{'name': 'S/O CAN/DIST', 'number': '18'},
{'name': 'FEC COMMITTEE ID NUMBER', 'number': '19'},
{'name': 'Unused field', 'number': '20'},
{'name': 'Unused field', 'number': '21'},
{'name': 'Unused field', 'number': '22'},
{'name': 'Unused field', 'number': '23'},
{'name': 'Unused field', 'number': '24'},
{'name': 'CONDUIT NAME', 'number': '25'},
{'name': 'CONDUIT STREET 1', 'number': '26'},
{'name': 'CONDUIT STREET 2', 'number': '27'},
{'name': 'CONDUIT CITY', 'number': '28'},
{'name': 'CONDUIT STATE', 'number': '29'},
{'name': 'CONDUIT ZIP', 'number': '30'},
{'name': 'AMENDED CD', 'number': '31'},
{'name': 'TRAN ID', 'number': '32'},
]
self.fields_names = self.hash_names(self.fields)
|
"""
Largest product in a grid
Problem 11
Published on 22 February 2002 at 06:00 pm [Server Time]
In the 20x20 grid below, four numbers along a diagonal line have been marked in red.
The product of these numbers is 26 * 63 * 78 * 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20x20 grid?
"""
THE_GRID = [[int(column) for column in row.split(' ')] for row in
"""
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
""".strip().split('\n')]
"""
A few words about the declaration of THE_GRID:
This is not the easiest thing to digest on first look. I think it is "pythonic"
in its implementation and it allows to copy/paste the grid straight out of the problem
statement without a bunch of mucking around to manually turn it into a 2d array
( or nested lists, actually ). It is arranged as a list of rows. Each row is a
list of numbers for each column in that row. Looking at it, the multi-line string
definition actually converts to a list of strings from the split operation. One
string for each row. The top list comprehension converts each row into a list of
short strings ( the columns ) which are also converted to int.
"""
import operator
def product(iterable):
return reduce(operator.mul, iterable, 1)
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for y_dir in (0, 1):
for x_dir in (0,1):
for i in range(run_length):
print THE_GRID[row+(y_dir*i)][column+x_dir*i]
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for i in range(run_length):
for y_dir in (0, 1):
for x_dir in (0,1):
print THE_GRID[row+(y_dir*i)][column+x_dir*i]
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
highest = 0
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for x_dir, y_dir in [(1, 0), (0, 1), (1, 1)]:
for i in range(run_length):
print THE_GRID[row+(y_dir*i)][column+x_dir*i]
def solve(run_length):
height = len(THE_GRID)
width = len(THE_GRID[0])
highest = 0
for row in range(height-run_length+1):
for column in range(width-run_length+1):
for x_dir, y_dir in [(1, 0), (0, 1), (1, 1)]:
run =[THE_GRID[row+(y_dir*i)][column+x_dir*i] for i in range(run_length)]
result = product(run)
print run, result
#if result > highest:
# highest = result
#return(highest)
def solve():
g = THE_GRID
maxp = 0
rows, cols, path_size = len(g), len(g[0]), 5
for i in range(rows):
for j in range(cols - path_size + 1):
phv = max(product([g[i][j+s] for s in range(path_size)]),
product([g[j+s][i] for s in range(path_size)]))
#phv = max(g[i][j] * g[i][j+1] * g[i][j+2] * g[i][j+3],
# g[j][i] * g[j+1][i] * g[j+2][i] * g[j+3][i])
if i < rows - path_size:
pdd = max(product([g[i+s][j+s] for s in range(path_size)]),
product([g[i+s][j+path_size-s-1] for s in range(path_size)]))
#pdd = max(g[i][j] * g[i+1][j+1] * g[i+2][j+2] * g[i+3][j+3],
# g[i][j+3] * g[i+1][j+2] * g[i+2][j+1] * g[i+3][j])
maxp = max(maxp, phv, pdd)
return maxp
def main():
print "PROBLEM:\n"
for line in __doc__.strip().split('\n'):
print '\t', line
print "\nSOLUTION:"
print "\n\t", solve()
if __name__ == "__main__":
main()
|
"""
Your job is to write a function which increments a string, to create a new string. If the string already ends with a number, the number should be incremented by 1. If the string does not end with a number the number 1 should be appended to the new string.
Examples:
foo -> foo1
foobar23 -> foobar24
foo0042 -> foo0043
foo9 -> foo10
foo099 -> foo100
Attention: If the number has leading zeros the amount of digits should be considered.
"""
import re
def increment_string(strng):
match = re.match(r"(.*?)(\d*)$",strng)
string = match.group(1)
number = match.group(2)
if not number:
return string + '1'
else:
return string + str(int(number)+1).zfill(len(number))
|
from ..parsers.errors import ErrorResponse
from warnings import warn
def raise_for_error(f):
"""
Wrapper method to parse any error response and raise the ErrorResponse instance if an error is encountered.
:param f:
:return:
"""
def inner(*args, **kwargs):
warn('`raise_for_error` is deprecated and will not process any response content.')
return f(*args, **kwargs)
# e = ErrorResponse.load(content)
# e.raise_for_error()
# return content
return inner
def raise_response_for_error(f):
"""
Wrapper method to parse a response object and raise the ErrorResponse
instance if an error is encountered in the response body.
:param f:
:return:
"""
def inner(*args, **kwargs):
warn('`raise_response_for_error` is deprecated and will not process any response content.')
return f(*args, **kwargs)
return inner
|
import random
import socket
import os
import time
import threading
import Queue
import sys
import argparse
from multiprocessing import Process
print """\33[91m
═════════════════════════════════════════════════════════
███████ ██████ ███████
█ █ █ █ ║
█ █════╗ █ ╔═█ ║
█═════════════█ ╚█ ║█═══╝
█ ██████ ║█
█ █ █ ╚╗█ ╔═══════Server
█════════╗ █ █ ╚═█ ║
███████ ║ █ █ ███████
Chat Room Client════════╝
═════════════════════════════════════════════════════════
\33[92m"""
quit = Queue.Queue()
path = os.path.realpath(__file__)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--screen", help="This is used by the script to make a screen. Not necessarily needed for regular users.")
args = parser.parse_args()
def outputscreen(messages, online):
rows, columns = os.popen('stty size', 'r').read().split()
rows = int(rows)
rows = rows - 1
columns = int(columns)
if len(messages) > rows:
messages = messages[len(messages) - rows:]
print messages
else:
pass
if len(online) > rows:
online = online[len(online) - rows:]
print online
else:
pass
output = []
for line in range(rows):
output.append(["", ""])
tick = 0
for message in messages:
output[tick][0] = message
tick = tick + 1
print tick
if len(output) <= len(online):
print "less or equal output then online"
for l in range(len(online) - len(output)):
output.append(["", ""])
print output
#for num in range(len(online)):
tick = 0
print output
for user in online:
output[tick][1] = user
tick = tick + 1
print output
else:
print "more output then online"
print rows
#for num in range(len(output)):
tick = 0
for user in online:
output[tick][1] = user
tick = tick + 1
for line in output:
space = int(columns)
outleng = len(line[0]) + len(line[1])
space = space - outleng
print line[0] + " "*space + line[1]
if args.screen:
sp = args.screen
sp = sp.split(":")
user = sp[2]
port = int(sp[1])
server = sp[0]
global cv
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (server, port)
sock.connect(server_address)
sock.send("screen:")
#print "\33[96m Type /stop to quit\33[91m"
quit = False
messages = []
import ast
online = sock.recv(1024)
online = ast.literal_eval(online)
tmp = online
while quit == False:
servercom = sock.recv(1024)
#print servercom
if servercom == "quitting:":
quit.put("1")
quit = True
os._exit(0)
elif "online:" in servercom:
online = ast.literal_eval(servercom[7:])
if tmp != online:
for line in tmp:
if line not in online:
messages.append(line + " has left the server...")
else:
pass
for line in online:
if line not in tmp:
messages.append(line + " has joined the server...")
else:
pass
else:
pass
if user not in online:
quit = True
sock.send("quitting:")
os._exit(0)
else:
sock.send("good:")
tmp = online
outputscreen(messages, online)
else:
messages.append(servercom)
outputscreen(messages, online)
time.sleep(.01)
if servercom == "ping":
sock.send("ping:pong")
else:
pass
else:
pass
cv = "1.0"
username = raw_input("Name:")
server = raw_input("Server IP[127.0.0.1]:")
port = raw_input("Server Port[22550]:")
if port == "":
port = "22550"
else:
pass
if server == "":
server = "127.0.0.1"
else:
pass
print port
class connect(object):
def __init__(self, server, port, username, quit):
self.quit = quit
self.server = server
self.port = port
self.username = username
self.con()
def con(self):
#try:
global cv
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (self.server, int(self.port))
self.sock.connect(server_address)
self.sock.settimeout(60)
self.sock.send("cv:" + cv)
compatible = self.sock.recv(1024)
if compatible == "comp:1":
pass
else:
print """\33[91m
***************************************************
Error Server is on version """ + compatible[7:] + """
***************************************************
"""
sys.exit()
self.sock.send("user:" + self.username)
nc = self.sock.recv(1024)
if "error:" in nc:
print """\33[91m
***************************************************
Error while sending username:
""" + nc[6:] + """
***************************************************
"""
os._exit(0)
#threading.Thread(target = self.ping, args=()).start()
#threading.Thread(target = self.screen, args=()).start()
#self.screen.start()
quit = False
while quit == False:
#inp = raw_input(">>")
#time.sleep(.2)
send = str(random.randint(0, 9))
self.sock.send(send)
print send
'''if inp == "/quit":
quit = True
self.quit.put("1")
self.sock.send("quitting:")
elif "" == inp:
"""\33[91m
***************************************************
Error no message entered
***************************************************
"""
elif "/help" == inp:
"""\33[91m
***************************************************
Error no help menu implemented yet
***************************************************
"""
else:
self.sock.send("mesg:" + inp)'''
else:
os._exit(0)
'''except:
print """\33[91m
***************************************************
Error while initiating connecting with server
***************************************************
"""
sys.exit()'''
def ping(self):
while True:
self.sock.send("ping:")
time.sleep(1)
#def screen(self):
global path
os.system("xterm -hold -e python " + "./ChatRoom1.0Client.py" + " -s " + self.server + ":" + self.port + ":" + self.username)
self.qt = True
self.quit.put("1")
def quitcheck(quit):
while True:
time.sleep(1)
if quit.empty() == True:
pass
else:
os._exit(0)
threading.Thread(target = quitcheck, args=(quit,)).start()
threading.Thread(target=connect, args=(server, port, username, quit)).start()
|
"""
Assorted utilities for manipulating latitude and longitude values
"""
from __future__ import unicode_literals
__version__ = "1.4"
import math, struct
def signbit(value):
"""
Test whether the sign bit of the given floating-point value is
set. If it is set, this generally means the given value is
negative. However, this is not the same as comparing the value
to C{0.0}. For example:
>>> NEGATIVE_ZERO < 0.0
False
since negative zero is numerically equal to positive zero. But
the sign bit of negative zero is indeed set:
>>> signbit(NEGATIVE_ZERO)
True
>>> signbit(0.0)
False
@type value: float
@param value: a Python (double-precision) float value
@rtype: bool
@return: C{True} if the sign bit of C{value} is set;
C{False} if it is not set.
signbit and doubleToRawLongBits
are from Martin Jansche:
http://symptotic.com/mj/code.html (MIT license).
This is required to capture the difference between -0.0 and 0.0, which is
useful if someone wants to convert a latitude or longitude like:
-0.0degrees, 34minutes to 0d34'00"S
"""
return (doubleToRawLongBits(value) >> 63) == 1
def doubleToRawLongBits(value):
"""
@type value: float
@param value: a Python (double-precision) float value
@rtype: long
@return: the IEEE 754 bit representation (64 bits as a long integer)
of the given double-precision floating-point value.
"""
# pack double into 64 bits, then unpack as long int
return struct.unpack(b'Q', struct.pack(b'd', value))[0]
class LatLongConverter:
@classmethod
def ToDecDeg(self, d=0, m=0, s=0, ustring = False, max=180):
"""
DecDegrees = ToDecDeg(d=0, m=0, s=0)
converts degrees, minutes, seconds to decimal degrees (returned as a Float).
"""
if m < 0 or s < 0:
raise ValueError("Minutes and Seconds have to be positive")
if m > 60.0 or s > 60.0:
raise ValueError("Minutes and Seconds have to be between -180 and 180")
if abs(d) > max:
raise ValueError("Degrees have to be between -180 and 180")
if signbit(d):
Sign = -1
d = abs(d)
else:
Sign = 1
deg_has_fract = bool(math.modf(d)[0])
min_has_fract = bool(math.modf(m)[0])
if deg_has_fract and (m != 0.0 or s != 0.0):
raise ValueError("degrees cannot have fraction unless both minutes"
"and seconds are zero")
if min_has_fract and s != 0.0:
raise ValueError("minutes cannot have fraction unless seconds are zero")
DecDegrees = Sign * (d + m/60.0 + s/3600.0)
if ustring:
return u"%.6f\xb0"%(DecDegrees)
else:
return DecDegrees
@classmethod
def ToDegMin(self, DecDegrees, ustring = False):
"""
Converts from decimal (binary float) degrees to:
Degrees, Minutes
If the optional parameter: "ustring" is True,
a Unicode string is returned
"""
if signbit(DecDegrees):
Sign = -1
DecDegrees = abs(DecDegrees)
else:
Sign = 1
Degrees = int(DecDegrees)
DecMinutes = round((DecDegrees - Degrees + 1e-14) * 60, 10)# add a tiny bit then round to avoid binary rounding issues
if ustring:
if Sign == 1:
return u"%i\xb0 %.3f'"%(Degrees, DecMinutes)
else:
return u"-%i\xb0 %.3f'"%(Degrees, DecMinutes)
else:
return (Sign*float(Degrees), DecMinutes) # float to preserve -0.0
@classmethod
def ToDegMinSec(self, DecDegrees, ustring = False):
"""
Converts from decimal (binary float) degrees to:
Degrees, Minutes, Seconds
If the optional parameter: "ustring" is True,
a unicode string is returned
"""
if signbit(DecDegrees):
Sign = -1
DecDegrees = abs(DecDegrees)
else:
Sign = 1
Degrees = int(DecDegrees)
DecMinutes = (DecDegrees - Degrees + 1e-14) * 60 # add a tiny bit to avoid rounding issues
Minutes = int(DecMinutes)
Seconds = round(((DecMinutes - Minutes) * 60), 10 )
if ustring:
if Sign == 1:
return u"%i\xb0 %i' %.2f\""%(Degrees, Minutes, Seconds)
else:
return u"-%i\xb0 %i' %.2f\""%(Degrees, Minutes, Seconds)
else:
return (Sign * float(Degrees), Minutes, Seconds)
class Latitude:
"""An object that can interpret a latitude in various formats.
Constructor:
Latitude(deg, min=0.0, sec=0.0, direction=None)
- 'deg' may be between -90.0 and 90.0.
- if 'min' is nonzero, 'deg' cannot have a fractional part.
(This means 5 and 5.0 are acceptable but 5.1 is not.)
- if 'sec' is nonzero, 'deg' and 'min' cannot have fractional parts.
- 'direction' may be a string beginning with 'N' or 'S' (case
insensitive), or None.
- if 'direction' is not None, 'deg' cannot be negative.
Attributes:
.value : a float in decimal degrees. Positive is North; negative is
South. (These apply to zero too; positive zero is North.)
Methods:
.degrees() -> (float, str)
.degrees_minutes() -> (int, float, str)
.degrees_minutes_seconds() -> (int, int, float, str)
The 'str' argument is the direction: "North" or "South".
Example:
>>> lat1 = Latitude(-120.7625)
>>> lat2 = Latitude(-120, 45.7500)
>>> lat3 = Latitude(-120, 45, 45)
>>> lat4 = Latitude(120.7625, direction='South')
>>> lat5 = Latitude(120, 45.7500, direction='S')
>>> lat6 = Latitude(120, 45, 45, direction='south')
>>> (lat1.value == lat2.value == lat3.value == lat4.value ==
... lat5.value == lat6.value)
True
>>> lat1.value
-120.7625
>>> lat1.degrees()
(120.7625, 'South')
>>> lat1.degrees_minutes()
(120, 45.750000000000171, 'South')
>>> lat1.degrees_minutes_seconds()
(120, 45, 45.000000000010232, 'South')
>>> print str(lat1)
Latitude(-120.762500)
"""
negative_direction = "South"
positive_direction = "North"
min = -90.0
max = 90.0
def __init__(self, deg, min=0.0, sec=0.0, direction=None):
ndir = self.negative_direction[0].upper()
pdir = self.positive_direction[0].upper()
if direction:
if deg < 0.0:
msg = "degrees cannot be negative if direction is specified"
raise ValueError(msg)
if direction[0].upper() == pdir:
pass
elif direction[0].upper() == ndir:
deg = -deg
else:
msg = "direction must start with %r or %r" % (pdir, ndir)
raise ValueError(msg)
self.value = LatLongConverter.ToDecDeg(deg, min, sec, max=self.max)
def direction(self):
if self.value < 0.0:
return self.negative_direction
else:
return self.positive_direction
def degrees(self):
deg = abs(self.value)
return deg, self.direction()
def degrees_minutes(self):
deg, min = LatLongConverter.ToDegMin(abs(self.value))
return deg, min, self.direction()
def degrees_minutes_seconds(self):
deg, min, sec = LatLongConverter.ToDegMinSec(abs(self.value))
return deg, min, sec, self.direction()
def __repr__(self):
try:
return "%s(%f)" % (self.__class__.__name__, self.value)
except AttributeError:
return "%s(uninitialized)" % self.__class__.__name__
def format(self, style):
"""
format(style)
returns formatted value as Unicode string with u'\xb0' (degree symbol).
style is one of:
1: decimal degrees
2: degrees, decimal minutes
3: degrees, minutes, seconds
"""
if style == 1:
return u'''%0.2f\xb0 %s''' % self.degrees()
elif style == 2:
return u'''%d\xb0 %0.2f' %s''' % self.degrees_minutes()
elif style == 3:
return u'''%d\xb0 %d' %0.2f" %s''' % self.degrees_minutes_seconds()
else:
raise ValueError("style must be 1, 2, or 3")
def format_html(self, style):
"""
format_html(style)
Backward compatibility for Quixote rlink and Pylons inews.
"""
return self.format(style).replace(u"\xb0", u"°").encode("ascii")
class Longitude(Latitude):
"""See Latitude docstring.
Positive is East; negative is West. Degrees must be between -180.0 and
180.0
"""
negative_direction = "West"
positive_direction = "East"
min = -180.0
max = 180.0
class DummyLatitude:
"""A pseudo-Latitude whose components are None.
Useful in building HTML forms where the value is not required.
Note: this class may be deleted if it doesn't turn out to be useful.
"""
value = None
def direction(self): return None
def degrees(self): return None, None
def degrees_minutes(self): return None, None, None
def degrees_minutes_seconds(self): return None, None, None, None
class DummyLongitude(DummyLatitude):
"""
Note: this class may be deleted if it doesn't turn out to be useful.
"""
pass
DEGREES = "\xb0" # "DEGREE SIGN"
MINUTES = "\u2032" # "PRIME"
SECONDS = "\u2033" # "DOUBLE PRIME"
LAT_POSITIVE_DIRECTION = "North"
LAT_NEGATIVE_DIRECTION = "South"
LON_POSITIVE_DIRECTION = "East"
LON_NEGATIVE_DIRECTION = "West"
FORMAT1 = "{:.2f}\N{DEGREE SIGN} {}"
FORMAT2 = "{:.0f}\N{DEGREE SIGN} {:.2f}\N{PRIME} {}"
FORMAT3 = "{:.0f}\N{DEGREE SIGN} {:.0f}\N{PRIME} {:.2f}\N{DOUBLE PRIME} {}"
def reduce_base_60(f):
"""extract the base 60 fractional portion of a floating point number.
i.e. minutes from degrees, seconds from minutes.
"""
fract, whole = math.modf(f)
# Add a tiny bit before rounding to avoid binary rounding errors.
fract = abs(fract)
fract = (fract + 1e-14) * 60
fract = round(fract, 10)
return whole, fract
def format_latlon2(f, positive_direction, negative_direction):
direction = positive_direction if f >= 0.0 else negative_direction
degrees, minutes = reduce_base_60(f)
degrees = abs(degrees)
return FORMAT2.format(degrees, minutes, direction)
def format_latlon3(f, positive_direction, negative_direction):
direction = positive_direction if f >= 0.0 else negative_direction
degrees, minutes = reduce_base_60(f)
minutes, seconds = reduce_base_60(minutes)
degrees = abs(degrees)
return FORMAT3.format(degrees, minutes, seconds, direction)
def format_lat(f):
return format_latlon2(f, LAT_POSITIVE_DIRECTION, LAT_NEGATIVE_DIRECTION)
def format_lon(f):
return format_latlon2(f, LON_POSITIVE_DIRECTION, LON_NEGATIVE_DIRECTION)
def format_lat_dms(f):
return format_latlon3(f, LAT_POSITIVE_DIRECTION, LAT_NEGATIVE_DIRECTION)
def format_lon_dms(f):
return format_latlon3(f, LON_POSITIVE_DIRECTION, LON_NEGATIVE_DIRECTION)
|
from math import log
def num_prime_factors(upper_limit):
"""
Create an array whose entries are the number of not necessarily distinct
prime factors of the index. The upper bound is the first number not
included.
"""
factor_count = [0] * upper_limit
prime = 2 #start with the first prime, which is 2
while True:
prime_power = prime
for exponent in range(int(log(upper_limit, prime))):
for hit in range(prime_power, upper_limit, prime_power):
factor_count[hit] += 1
prime_power *= prime
while True:
prime += 1
if prime >= upper_limit:
return factor_count
if factor_count[prime] == 0: break
print(sum(1 if n == 2 else 0 for n in num_prime_factors(100000000)))
|
from __future__ import unicode_literals
import base64
import functools
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
dict_get,
ExtractorError,
js_to_json,
int_or_none,
merge_dicts,
OnDemandPagedList,
parse_filesize,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
str_or_none,
try_get,
unified_timestamp,
unsmuggle_url,
urlencode_postdata,
urljoin,
unescapeHTML,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
username, password = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
webpage = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = {
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
}
self._set_vimeo_cookie('vuid', vuid)
try:
self._download_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self._LOGIN_URL,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 418:
raise ExtractorError(
'Unable to log in: bad username or password',
expected=True)
raise ExtractorError('Unable to log in')
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
})
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
webpage, 'vimeo config', *args, **compat_kwargs(kwargs))
if vimeo_config:
return self._parse_json(vimeo_config, video_id)
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
def _vimeo_sort_formats(self, formats):
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))
def _parse_config(self, config, video_id):
video_data = config['video']
video_title = video_data['title']
live_event = video_data.get('live_event') or {}
is_live = live_event.get('status') == 'started'
formats = []
config_files = video_data.get('files') or config['request'].get('files', {})
for f in config_files.get('progressive', []):
video_url = f.get('url')
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': 'http-%s' % f.get('quality'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'fps': int_or_none(f.get('fps')),
'tbr': int_or_none(f.get('bitrate')),
})
# TODO: fix handling of 308 status code returned for live archive manifest requests
for files_type in ('hls', 'dash'):
for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
manifest_url = cdn_data.get('url')
if not manifest_url:
continue
format_id = '%s-%s' % (files_type, cdn_name)
if files_type == 'hls':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4',
'm3u8' if is_live else 'm3u8_native', m3u8_id=format_id,
note='Downloading %s m3u8 information' % cdn_name,
fatal=False))
elif files_type == 'dash':
mpd_pattern = r'/%s/(?:sep/)?video/' % video_id
mpd_manifest_urls = []
if re.search(mpd_pattern, manifest_url):
for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
mpd_manifest_urls.append((format_id + suffix, re.sub(
mpd_pattern, '/%s/%s/' % (video_id, repl), manifest_url)))
else:
mpd_manifest_urls = [(format_id, manifest_url)]
for f_id, m_url in mpd_manifest_urls:
if 'json=1' in m_url:
real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url')
if real_m_url:
m_url = real_m_url
mpd_formats = self._extract_mpd_formats(
m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
'Downloading %s MPD information' % cdn_name,
fatal=False)
for f in mpd_formats:
if f.get('vcodec') == 'none':
f['preference'] = -50
elif f.get('acodec') == 'none':
f['preference'] = -40
formats.extend(mpd_formats)
live_archive = live_event.get('archive') or {}
live_archive_source_url = live_archive.get('source_url')
if live_archive_source_url and live_archive.get('status') == 'done':
formats.append({
'format_id': 'live-archive-source',
'url': live_archive_source_url,
'preference': 1,
})
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': urljoin('https://vimeo.com', tt['url']),
}]
thumbnails = []
if not is_live:
for key, thumb in video_data.get('thumbs', {}).items():
thumbnails.append({
'id': key,
'width': int_or_none(key),
'url': thumb,
})
thumbnail = video_data.get('thumbnail')
if thumbnail:
thumbnails.append({
'url': thumbnail,
})
owner = video_data.get('owner') or {}
video_uploader_url = owner.get('url')
return {
'id': str_or_none(video_data.get('id')) or video_id,
'title': self._live_title(video_title) if is_live else video_title,
'uploader': owner.get('name'),
'uploader_id': video_uploader_url.split('/')[-1] if video_uploader_url else None,
'uploader_url': video_uploader_url,
'thumbnails': thumbnails,
'duration': int_or_none(video_data.get('duration')),
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
def _extract_original_format(self, url, video_id):
download_data = self._download_json(
url, video_id, fatal=False,
query={'action': 'load_download_config'},
headers={'X-Requested-With': 'XMLHttpRequest'})
if download_data:
source_file = download_data.get('source_file')
if isinstance(source_file, dict):
download_url = source_file.get('download_url')
if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
source_name = source_file.get('public_name', 'Original')
if self._is_valid_url(download_url, video_id, '%s video' % source_name):
ext = (try_get(
source_file, lambda x: x['extension'],
compat_str) or determine_ext(
download_url, None) or 'mp4').lower()
return {
'url': download_url,
'ext': ext,
'width': int_or_none(source_file.get('width')),
'height': int_or_none(source_file.get('height')),
'filesize': parse_filesize(source_file.get('size')),
'format_id': source_name,
'preference': 1,
}
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:
(?:
www|
player
)
\.
)?
vimeo(?:pro)?\.com/
(?!(?:channels|album|showcase)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
(?:.*?/)?
(?:
(?:
play_redirect_hls|
moogaloop\.swf)\?clip_id=
)?
(?:videos?/)?
(?P<id>[0-9]+)
(?:/[\da-f]+)?
/?(?:[?&].*)?(?:[#].*)?$
'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'timestamp': 1355990239,
'upload_date': '20121220',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user7108434',
'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
'license': 'by-sa',
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/openstreetmapus',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:2c362968038d4499f4d79f88458590c1',
'duration': 1595,
'upload_date': '20130610',
'timestamp': 1370893156,
},
'params': {
'format': 'best[protocol=https]',
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
'params': {
'format': 'best[protocol=https]',
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'timestamp': 1371200155,
'upload_date': '20130614',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'md5:dca3ea23adb29ee387127bc4ddfce63f',
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/atencio',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'channel_id': 'keypeele',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/keypeele',
'timestamp': 1380339469,
'upload_date': '20130928',
'duration': 187,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'timestamp': 1381846109,
'upload_date': '20131015',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/staff',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user28849593',
'uploader_id': 'user28849593',
},
},
{
# contains original format
'url': 'https://vimeo.com/33951933',
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
'info_dict': {
'id': '33951933',
'ext': 'mp4',
'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
'uploader': 'The DMCI',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/dmci',
'uploader_id': 'dmci',
'timestamp': 1324343742,
'upload_date': '20111220',
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
},
},
{
# only available via https://vimeo.com/channels/tributes/6213729 and
# not via https://vimeo.com/6213729
'url': 'https://vimeo.com/channels/tributes/6213729',
'info_dict': {
'id': '6213729',
'ext': 'mp4',
'title': 'Vimeo Tribute: The Shining',
'uploader': 'Casey Donahue',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/caseydonahue',
'uploader_id': 'caseydonahue',
'channel_url': r're:https?://(?:www\.)?vimeo\.com/channels/tributes',
'channel_id': 'tributes',
'timestamp': 1250886430,
'upload_date': '20090821',
'description': 'md5:bdbf314014e58713e6e5b66eb252f4a6',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
# redirects to ondemand extractor and should be passed through it
# for successful extraction
'url': 'https://vimeo.com/73445910',
'info_dict': {
'id': '73445910',
'ext': 'mp4',
'title': 'The Reluctant Revolutionary',
'uploader': '10Ft Films',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/tenfootfilms',
'uploader_id': 'tenfootfilms',
'description': 'md5:0fa704e05b04f91f40b7f3ca2e801384',
'upload_date': '20130830',
'timestamp': 1377853339,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
},
{
'url': 'http://player.vimeo.com/video/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
},
'params': {
'format': 'best[protocol=https]',
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/moogaloop.swf?clip_id=2539741',
'only_matching': True,
},
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'only_matching': True,
},
{
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
{
'url': 'https://vimeo.com/album/2632481/video/79010983',
'only_matching': True,
},
{
# source file returns 403: Forbidden
'url': 'https://vimeo.com/7809605',
'only_matching': True,
},
{
'url': 'https://vimeo.com/160743502/abd0e13fb4',
'only_matching': True,
}
# https://gettingthingsdone.com/workflowmap/
# vimeo embed with check-password page protected by Referer header
]
@staticmethod
def _smuggle_referrer(url, referrer_url):
return smuggle_url(url, {'http_headers': {'Referer': referrer_url}})
@staticmethod
def _extract_urls(url, webpage):
urls = []
# Look for embedded (iframe) Vimeo player
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/\d+.*?)\1',
webpage):
urls.append(VimeoIE._smuggle_referrer(unescapeHTML(mobj.group('url')), url))
PLAIN_EMBED_RE = (
# Look for embedded (swf embed) Vimeo player
r'<embed[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)\1',
# Look more for non-standard embedded Vimeo player
r'<video[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/[0-9]+)\1',
)
for embed_re in PLAIN_EMBED_RE:
for mobj in re.finditer(embed_re, webpage):
urls.append(mobj.group('url'))
return urls
@staticmethod
def _extract_url(url, webpage):
urls = VimeoIE._extract_urls(url, webpage)
return urls[0] if urls else None
def _verify_player_video_password(self, url, video_id, headers):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
data = urlencode_postdata({
'password': base64.b64encode(password.encode()),
})
headers = merge_dicts(headers, {
'Content-Type': 'application/x-www-form-urlencoded',
})
checked = self._download_json(
url + '/check-password', video_id,
'Verifying the password', data=data, headers=headers)
if checked is False:
raise ExtractorError('Wrong video password', expected=True)
return checked
def _real_initialize(self):
self._login()
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
headers = std_headers.copy()
if 'http_headers' in data:
headers.update(data['http_headers'])
if 'Referer' not in headers:
headers['Referer'] = url
channel_id = self._search_regex(
r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
# Extract ID from URL
video_id = self._match_id(url)
orig_url = url
is_pro = 'vimeopro.com/' in url
is_player = '://player.vimeo.com/video/' in url
if is_pro:
# some videos require portfolio_id to be present in player url
# https://github.com/ytdl-org/youtube-dl/issues/20070
url = self._extract_url(url, self._download_webpage(url, video_id))
if not url:
url = 'https://vimeo.com/' + video_id
elif is_player:
url = 'https://player.vimeo.com/video/' + video_id
elif any(p in url for p in ('play_redirect_hls', 'moogaloop.swf')):
url = 'https://vimeo.com/' + video_id
try:
# Retrieve video webpage to extract further information
webpage, urlh = self._download_webpage_handle(
url, video_id, headers=headers)
redirect_url = urlh.geturl()
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call youtube-dl with the URL of the page '
'that embeds this video.',
expected=True)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._extract_vimeo_config(webpage, video_id, default=None)
if vimeo_config:
seed_status = vimeo_config.get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
cc_license = None
timestamp = None
video_description = None
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage,
'config URL', default=None)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/ytdl-org/youtube-dl/pull/7209)
page_config = self._parse_json(self._search_regex(
r'vimeo\.(?:clip|vod_title)_page_config\s*=\s*({.+?});',
webpage, 'page config'), video_id)
config_url = page_config['player']['config_url']
cc_license = page_config.get('cc_license')
timestamp = try_get(
page_config, lambda x: x['clip']['uploaded_on'],
compat_str)
video_description = clean_html(dict_get(
page_config, ('description', 'description_html_escaped')))
config = self._download_json(config_url, video_id)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search(r'(\w)\.video\.id', webpage)
if m_variable_name is not None:
config_re = [r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))]
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
config_re.append(r'\bvar\s+r\s*=\s*({.+?})\s*;')
config_re.append(r'\bconfig\s*=\s*({.+?})\s*;')
config = self._search_regex(config_re, webpage, 'info section',
flags=re.DOTALL)
config = json.loads(config)
except Exception as e:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if '_video_password_verified' in data:
raise ExtractorError('video password verification failed!')
self._verify_video_password(redirect_url, video_id, webpage)
return self._real_extract(
smuggle_url(redirect_url, {'_video_password_verified': 'verified'}))
else:
raise ExtractorError('Unable to extract info section',
cause=e)
else:
if config.get('view') == 4:
config = self._verify_player_video_password(redirect_url, video_id, headers)
vod = config.get('video', {}).get('vod', {})
def is_rented():
if '>You rented this title.<' in webpage:
return True
if config.get('user', {}).get('purchased'):
return True
for purchase_option in vod.get('purchase_options', []):
if purchase_option.get('purchased'):
return True
label = purchase_option.get('label_string')
if label and (label.startswith('You rented this') or label.endswith(' remaining')):
return True
return False
if is_rented() and vod.get('is_trailer'):
feature_id = vod.get('feature_id')
if feature_id and not data.get('force_feature_id', False):
return self.url_result(smuggle_url(
'https://player.vimeo.com/player/%s' % feature_id,
{'force_feature_id': True}), 'Vimeo')
# Extract video description
if not video_description:
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
'description', webpage, default=None)
if not video_description and is_pro:
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not is_player:
self._downloader.report_warning('Cannot find video description')
# Extract upload date
if not timestamp:
timestamp = self._search_regex(
r'<time[^>]+datetime="([^"]+)"', webpage,
'timestamp', default=None)
try:
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
formats = []
source_format = self._extract_original_format(
'https://vimeo.com/' + video_id, video_id)
if source_format:
formats.append(source_format)
info_dict_config = self._parse_config(config, video_id)
formats.extend(info_dict_config['formats'])
self._vimeo_sort_formats(formats)
json_ld = self._search_json_ld(webpage, video_id, default={})
if not cc_license:
cc_license = self._search_regex(
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
webpage, 'license', default=None, group='license')
channel_url = 'https://vimeo.com/channels/%s' % channel_id if channel_id else None
info_dict = {
'formats': formats,
'timestamp': unified_timestamp(timestamp),
'description': video_description,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'license': cc_license,
'channel_id': channel_id,
'channel_url': channel_url,
}
info_dict = merge_dicts(info_dict, info_dict_config, json_ld)
return info_dict
class VimeoOndemandIE(VimeoIE):
IE_NAME = 'vimeo:ondemand'
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/ondemand/([^/]+/)?(?P<id>[^/?#&]+)'
_TESTS = [{
# ondemand video not available via https://vimeo.com/id
'url': 'https://vimeo.com/ondemand/20704',
'md5': 'c424deda8c7f73c1dfb3edd7630e2f35',
'info_dict': {
'id': '105442900',
'ext': 'mp4',
'title': 'המעבדה - במאי יותם פלדמן',
'uploader': 'גם סרטים',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/gumfilms',
'uploader_id': 'gumfilms',
'description': 'md5:4c027c965e439de4baab621e48b60791',
'upload_date': '20140906',
'timestamp': 1410032453,
},
'params': {
'format': 'best[protocol=https]',
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
# requires Referer to be passed along with og:video:url
'url': 'https://vimeo.com/ondemand/36938/126682985',
'info_dict': {
'id': '126584684',
'ext': 'mp4',
'title': 'Rävlock, rätt läte på rätt plats',
'uploader': 'Lindroth & Norin',
'uploader_url': r're:https?://(?:www\.)?vimeo\.com/lindrothnorin',
'uploader_id': 'lindrothnorin',
'description': 'md5:c3c46a90529612c8279fb6af803fc0df',
'upload_date': '20150502',
'timestamp': 1430586422,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'url': 'https://vimeo.com/ondemand/nazmaalik',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/141692381',
'only_matching': True,
}, {
'url': 'https://vimeo.com/ondemand/thelastcolony/150274832',
'only_matching': True,
}]
class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
'info_dict': {
'id': 'tributes',
'title': 'Vimeo Tributes',
},
'playlist_mincount': 25,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/channels/%s'
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
return self._TITLE or self._html_search_regex(
self._TITLE_RE, webpage, 'list title', fatal=False)
def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
yield self._extract_list_title(webpage)
# Try extracting href first since not all videos are available via
# short https://vimeo.com/id URL (e.g. https://vimeo.com/channels/tributes/6213729)
clips = re.findall(
r'id="clip_(\d+)"[^>]*>\s*<a[^>]+href="(/(?:[^/]+/)*\1)(?:[^>]+\btitle="([^"]+)")?', webpage)
if clips:
for video_id, video_url, video_title in clips:
yield self.url_result(
compat_urlparse.urljoin(base_url, video_url),
VimeoIE.ie_key(), video_id=video_id, video_title=video_title)
# More relaxed fallback
else:
for video_id in re.findall(r'id=["\']clip_(\d+)', webpage):
yield self.url_result(
'https://vimeo.com/%s' % video_id,
VimeoIE.ie_key(), video_id=video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
def _extract_videos(self, list_id, base_url):
title_and_entries = self._title_and_entries(list_id, base_url)
list_title = next(title_and_entries)
return self.playlist_result(title_and_entries, list_id, list_title)
def _real_extract(self, url):
channel_id = self._match_id(url)
return self._extract_videos(channel_id, self._BASE_URL_TEMPL % channel_id)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<id>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
'info_dict': {
'title': 'Nki',
'id': 'nkistudio',
},
'playlist_mincount': 66,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/%s'
class VimeoAlbumIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:album'
_VALID_URL = r'https://vimeo\.com/(?:album|showcase)/(?P<id>\d+)(?:$|[?#]|/(?!video))'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'info_dict': {
'id': '2632481',
'title': 'Staff Favorites: November 2013',
},
'playlist_mincount': 13,
}, {
'note': 'Password-protected album',
'url': 'https://vimeo.com/album/3253534',
'info_dict': {
'title': 'test',
'id': '3253534',
},
'playlist_count': 1,
'params': {
'videopassword': 'youtube-dl',
}
}]
_PAGE_SIZE = 100
def _fetch_page(self, album_id, authorizaion, hashed_pass, page):
api_page = page + 1
query = {
'fields': 'link,uri',
'page': api_page,
'per_page': self._PAGE_SIZE,
}
if hashed_pass:
query['_hashed_pass'] = hashed_pass
videos = self._download_json(
'https://api.vimeo.com/albums/%s/videos' % album_id,
album_id, 'Downloading page %d' % api_page, query=query, headers={
'Authorization': 'jwt ' + authorizaion,
})['data']
for video in videos:
link = video.get('link')
if not link:
continue
uri = video.get('uri')
video_id = self._search_regex(r'/videos/(\d+)', uri, 'video_id', default=None) if uri else None
yield self.url_result(link, VimeoIE.ie_key(), video_id)
def _real_extract(self, url):
album_id = self._match_id(url)
webpage = self._download_webpage(url, album_id)
viewer = self._parse_json(self._search_regex(
r'bootstrap_data\s*=\s*({.+?})</script>',
webpage, 'bootstrap data'), album_id)['viewer']
jwt = viewer['jwt']
album = self._download_json(
'https://api.vimeo.com/albums/' + album_id,
album_id, headers={'Authorization': 'jwt ' + jwt},
query={'fields': 'description,name,privacy'})
hashed_pass = None
if try_get(album, lambda x: x['privacy']['view']) == 'password':
password = self._downloader.params.get('videopassword')
if not password:
raise ExtractorError(
'This album is protected by a password, use the --video-password option',
expected=True)
self._set_vimeo_cookie('vuid', viewer['vuid'])
try:
hashed_pass = self._download_json(
'https://vimeo.com/showcase/%s/auth' % album_id,
album_id, 'Verifying the password', data=urlencode_postdata({
'password': password,
'token': viewer['xsrft'],
}), headers={
'X-Requested-With': 'XMLHttpRequest',
})['hashed_pass']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
raise ExtractorError('Wrong password', expected=True)
raise
entries = OnDemandPagedList(functools.partial(
self._fetch_page, album_id, jwt, hashed_pass), self._PAGE_SIZE)
return self.playlist_result(
entries, album_id, album.get('name'), album.get('description'))
class VimeoGroupsIE(VimeoChannelIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'https://vimeo\.com/groups/(?P<id>[^/]+)(?:/(?!videos?/\d+)|$)'
_TESTS = [{
'url': 'https://vimeo.com/groups/kattykay',
'info_dict': {
'id': 'kattykay',
'title': 'Katty Kay',
},
'playlist_mincount': 27,
}]
_BASE_URL_TEMPL = 'https://vimeo.com/groups/%s'
class VimeoReviewIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'(?P<url>https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)/[0-9a-f]{10})'
_TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'id': '75524534',
'ext': 'mp4',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
'uploader_id': 'user21297594',
'description': "Comedian Dick Hardwick's five minute demo filmed in front of a live theater audience.\nEdit by Doug Mattocks",
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'note': 'video player needs Referer',
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
'info_dict': {
'id': '91613211',
'ext': 'mp4',
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
'uploader': 'DevWeek Events',
'duration': 2773,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader_id': 'user22258446',
},
'skip': 'video gone',
}, {
'note': 'Password protected',
'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
'info_dict': {
'id': '138823582',
'ext': 'mp4',
'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
'uploader': 'TMB',
'uploader_id': 'user37284429',
},
'params': {
'videopassword': 'holygrail',
},
'skip': 'video gone',
}]
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page_url, video_id = re.match(self._VALID_URL, url).groups()
clip_data = self._download_json(
page_url.replace('/review/', '/review/data/'),
video_id)['clipData']
config_url = clip_data['configUrl']
config = self._download_json(config_url, video_id)
info_dict = self._parse_config(config, video_id)
source_format = self._extract_original_format(
page_url + '/action', video_id)
if source_format:
info_dict['formats'].append(source_format)
self._vimeo_sort_formats(info_dict['formats'])
info_dict['description'] = clean_html(clip_data.get('description'))
return info_dict
class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
_TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = sanitized_Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(VimeoChannelIE):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TESTS = [{
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
'info_dict': {
'id': 'user755559',
'title': 'urza’s Likes',
},
}, {
'url': 'https://vimeo.com/stormlapse/likes',
'only_matching': True,
}]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
user_id = self._match_id(url)
return self._extract_videos(user_id, 'https://vimeo.com/%s/likes' % user_id)
class VHXEmbedIE(VimeoBaseInfoExtractor):
IE_NAME = 'vhx:embed'
_VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
config_url = self._parse_json(self._search_regex(
r'window\.OTTData\s*=\s*({.+})', webpage,
'ott data'), video_id, js_to_json)['config_url']
config = self._download_json(config_url, video_id)
info = self._parse_config(config, video_id)
self._vimeo_sort_formats(info['formats'])
return info
|
__author__ = 'Alex'
from Movement import Movement
class BaseCommand:
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'unknown'
self.m = movement
def execute(selfself):pass
class Forward(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'forward'
self.m = movement
def execute(self):
self.m.moveCM(10)
class Reverse(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'reverse'
self.m = movement
def execute(self):
self.m.moveCM(10)
class Left(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'left'
self.m = movement
def execute(self):
self.m.turnDegrees(-90)
class Right(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'right'
self.m = movement
def execute(self):
self.m.turnDegrees(90)
|
import sys
import os
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'yara'
copyright = u'2014-2019, VirusTotal'
version = '3.9'
release = '3.9.0'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
html_theme = "default"
html_static_path = ['_static']
htmlhelp_basename = 'yaradoc'
latex_elements = {
}
latex_documents = [
('index', 'yara.tex', u'yara Documentation',
u'Victor M. Alvarez', 'manual'),
]
man_pages = [
('index', 'yara', u'yara Documentation',
[u'Victor M. Alvarez'], 1)
]
texinfo_documents = [
('index', 'yara', u'yara Documentation',
u'Victor M. Alvarez', 'yara', 'One line description of project.',
'Miscellaneous'),
]
|
"""Libraries of Keras metrics."""
import tensorflow as tf
def _apply_mask(y_true, sample_weight, masked_tokens, dtype):
if sample_weight is None:
sample_weight = tf.ones_like(y_true, dtype)
else:
sample_weight = tf.cast(sample_weight, dtype)
for token in masked_tokens:
mask = tf.cast(tf.not_equal(y_true, token), dtype)
sample_weight = sample_weight * mask
return sample_weight
class NumTokensCounter(tf.keras.metrics.Sum):
"""A `tf.keras.metrics.Metric` that counts tokens seen after masking."""
def __init__(self, masked_tokens=None, name='num_tokens', dtype=tf.int64):
self._masked_tokens = masked_tokens or []
super().__init__(name, dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
sample_weight = _apply_mask(y_true, sample_weight, self._masked_tokens,
self._dtype)
sample_weight = tf.reshape(sample_weight, [-1])
super().update_state(sample_weight)
def get_config(self):
config = super().get_config()
config['masked_tokens'] = tuple(self._masked_tokens)
return config
class MaskedCategoricalAccuracy(tf.keras.metrics.SparseCategoricalAccuracy):
"""An accuracy metric that masks some tokens."""
def __init__(self, masked_tokens=None, name='accuracy', dtype=None):
self._masked_tokens = masked_tokens or []
super().__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
sample_weight = _apply_mask(y_true, sample_weight, self._masked_tokens,
self._dtype)
num_classes = tf.shape(y_pred)[-1]
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1, num_classes])
sample_weight = tf.reshape(sample_weight, [-1])
super().update_state(y_true, y_pred, sample_weight)
def get_config(self):
config = super().get_config()
config['masked_tokens'] = tuple(self._masked_tokens)
return config
|
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from cs_gan import file_utils
from cs_gan import gan
from cs_gan import image_metrics
from cs_gan import utils
flags.DEFINE_integer(
'num_training_iterations', 1200000,
'Number of training iterations.')
flags.DEFINE_string(
'ode_mode', 'rk4', 'Integration method.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_float(
'grad_reg_weight', 0.02, 'Step size for latent optimisation.')
flags.DEFINE_string(
'opt_name', 'gd', 'Name of the optimiser (gd|adam).')
flags.DEFINE_bool(
'schedule_lr', True, 'The method to project z.')
flags.DEFINE_bool(
'reg_first_grad_only', True, 'Whether only to regularise the first grad.')
flags.DEFINE_integer(
'num_latents', 128, 'The number of latents')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'image_metrics_every_step', 1000,
'The interval at which to log (expensive) image metrics.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
flags.DEFINE_integer(
'num_eval_samples', 10000,
'The number of samples used to evaluate FID/IS.')
flags.DEFINE_string(
'dataset', 'cifar', 'The dataset used for learning (cifar|mnist).')
flags.DEFINE_string(
'output_dir', '/tmp/ode_gan/gan', 'Location where to save output files.')
flags.DEFINE_float('disc_lr', 4e-2, 'Discriminator Learning rate.')
flags.DEFINE_float('gen_lr', 4e-2, 'Generator Learning rate.')
flags.DEFINE_bool(
'run_real_data_metrics', False,
'Whether or not to run image metrics on real data.')
flags.DEFINE_bool(
'run_sample_metrics', True,
'Whether or not to run image metrics on samples.')
FLAGS = flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def _copy_vars(v_list):
"""Copy variables in v_list."""
t_list = []
for v in v_list:
t_list.append(tf.identity(v))
return t_list
def _restore_vars(v_list, t_list):
"""Restore variables in v_list from t_list."""
ops = []
for v, t in zip(v_list, t_list):
ops.append(v.assign(t))
return ops
def _scale_vars(s, v_list):
"""Scale all variables in v_list by s."""
return [s * v for v in v_list]
def _acc_grads(g_sum, g_w, g):
"""Accumulate gradients in g, weighted by g_w."""
return [g_sum_i + g_w * g_i for g_sum_i, g_i in zip(g_sum, g)]
def _compute_reg_grads(gen_grads, disc_vars):
"""Compute gradients norm (this is an upper-bpund of the full-batch norm)."""
gen_norm = tf.accumulate_n([tf.reduce_sum(u * u) for u in gen_grads])
disc_reg_grads = tf.gradients(gen_norm, disc_vars)
return disc_reg_grads
def run_model(prior, images, model, disc_reg_weight):
"""Run the model with new data and samples.
Args:
prior: the noise source as the generator input.
images: images sampled from dataset.
model: a GAN model defined in gan.py.
disc_reg_weight: regularisation weight for discrmininator gradients.
Returns:
debug_ops: statistics from the model, see gan.py for more detials.
disc_grads: discriminator gradients.
gen_grads: generator gradients.
"""
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
disc_grads = tf.gradients(
optimization_components['disc'].loss,
optimization_components['disc'].vars)
gen_grads = tf.gradients(
optimization_components['gen'].loss,
optimization_components['gen'].vars)
if disc_reg_weight > 0.0:
reg_grads = _compute_reg_grads(gen_grads,
optimization_components['disc'].vars)
disc_grads = _acc_grads(disc_grads, disc_reg_weight, reg_grads)
debug_ops = model_output.debug_ops
return debug_ops, disc_grads, gen_grads
def update_model(model, disc_grads, gen_grads, disc_opt, gen_opt,
global_step, update_scale):
"""Update model with gradients."""
disc_vars, gen_vars = model.get_variables()
with tf.control_dependencies(gen_grads + disc_grads):
disc_update_op = disc_opt.apply_gradients(
zip(_scale_vars(update_scale, disc_grads),
disc_vars))
gen_update_op = gen_opt.apply_gradients(
zip(_scale_vars(update_scale, gen_grads),
gen_vars),
global_step=global_step)
update_op = tf.group([disc_update_op, gen_update_op])
return update_op
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
# Compute the batch-size multiplier
if FLAGS.ode_mode == 'rk2':
batch_mul = 2
elif FLAGS.ode_mode == 'rk4':
batch_mul = 4
else:
batch_mul = 1
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
int(FLAGS.batch_size * batch_mul))
image_splits = tf.split(images, batch_mul)
logging.info('Generator learning rate: %d', FLAGS.gen_lr)
logging.info('Discriminator learning rate: %d', FLAGS.disc_lr)
global_step = tf.train.get_or_create_global_step()
# Construct optimizers.
if FLAGS.opt_name == 'adam':
disc_opt = tf.train.AdamOptimizer(FLAGS.disc_lr, beta1=0.5, beta2=0.999)
gen_opt = tf.train.AdamOptimizer(FLAGS.gen_lr, beta1=0.5, beta2=0.999)
elif FLAGS.opt_name == 'gd':
if FLAGS.schedule_lr:
gd_disc_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.disc_lr / 4., FLAGS.disc_lr, FLAGS.disc_lr / 2.],
boundaries=[500, 400000])
gd_gen_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.gen_lr / 4., FLAGS.gen_lr, FLAGS.gen_lr / 2.],
boundaries=[500, 400000])
else:
gd_disc_lr = FLAGS.disc_lr
gd_gen_lr = FLAGS.gen_lr
disc_opt = tf.train.GradientDescentOptimizer(gd_disc_lr)
gen_opt = tf.train.GradientDescentOptimizer(gd_gen_lr)
else:
raise ValueError('Unknown ODE mode!')
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset, use_sn=False)
model = gan.GAN(metric_net, generator)
prior = utils.make_prior(FLAGS.num_latents)
# Setup ODE parameters.
if FLAGS.ode_mode == 'rk2':
ode_grad_weights = [0.5, 0.5]
step_scale = [1.0]
elif FLAGS.ode_mode == 'rk4':
ode_grad_weights = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
step_scale = [0.5, 0.5, 1.]
elif FLAGS.ode_mode == 'euler':
# Euler update
ode_grad_weights = [1.0]
step_scale = []
else:
raise ValueError('Unknown ODE mode!')
# Extra steps for RK updates.
num_extra_steps = len(step_scale)
if FLAGS.reg_first_grad_only:
first_reg_weight = FLAGS.grad_reg_weight / ode_grad_weights[0]
other_reg_weight = 0.0
else:
first_reg_weight = FLAGS.grad_reg_weight
other_reg_weight = FLAGS.grad_reg_weight
debug_ops, disc_grads, gen_grads = run_model(prior, image_splits[0],
model, first_reg_weight)
disc_vars, gen_vars = model.get_variables()
final_disc_grads = _scale_vars(ode_grad_weights[0], disc_grads)
final_gen_grads = _scale_vars(ode_grad_weights[0], gen_grads)
restore_ops = []
# Preparing for further RK steps.
if num_extra_steps > 0:
# copy the variables before they are changed by update_op
saved_disc_vars = _copy_vars(disc_vars)
saved_gen_vars = _copy_vars(gen_vars)
# Enter RK loop.
with tf.control_dependencies(saved_disc_vars + saved_gen_vars):
step_deps = []
for i_step in range(num_extra_steps):
with tf.control_dependencies(step_deps):
# Compute gradient steps for intermediate updates.
update_op = update_model(
model, disc_grads, gen_grads, disc_opt, gen_opt,
None, step_scale[i_step])
with tf.control_dependencies([update_op]):
_, disc_grads, gen_grads = run_model(
prior, image_splits[i_step + 1], model, other_reg_weight)
# Accumlate gradients for final update.
final_disc_grads = _acc_grads(final_disc_grads,
ode_grad_weights[i_step + 1],
disc_grads)
final_gen_grads = _acc_grads(final_gen_grads,
ode_grad_weights[i_step + 1],
gen_grads)
# Make new restore_op for each step.
restore_ops = []
restore_ops += _restore_vars(disc_vars, saved_disc_vars)
restore_ops += _restore_vars(gen_vars, saved_gen_vars)
step_deps = restore_ops
with tf.control_dependencies(restore_ops):
update_op = update_model(
model, final_disc_grads, final_gen_grads, disc_opt, gen_opt,
global_step, 1.0)
samples = generator(prior.sample(FLAGS.batch_size), is_training=False)
# Get data needed to compute FID. We also compute metrics on
# real data as a sanity check and as a reference point.
eval_real_data = utils.get_real_data_for_eval(FLAGS.num_eval_samples,
FLAGS.dataset,
split='train')
def sample_fn(x):
return utils.optimise_and_sample(x, module=model,
data=None, is_training=False)[0]
if FLAGS.run_sample_metrics:
sample_metrics = image_metrics.get_image_metrics_for_samples(
eval_real_data, sample_fn,
prior, data_processor,
num_eval_samples=FLAGS.num_eval_samples)
else:
sample_metrics = {}
if FLAGS.run_real_data_metrics:
data_metrics = image_metrics.get_image_metrics(
eval_real_data, eval_real_data)
else:
data_metrics = {}
sample_exporter = file_utils.FileExporter(
os.path.join(FLAGS.output_dir, 'samples'))
# Hooks.
debug_ops['it'] = global_step
# Abort training on Nans.
nan_disc_hook = tf.train.NanTensorHook(debug_ops['disc_loss'])
nan_gen_hook = tf.train.NanTensorHook(debug_ops['gen_loss'])
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)
loss_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.summary_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(debug_ops))
metrics_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.image_metrics_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(sample_metrics))
hooks = [checkpoint_saver_hook, metrics_summary_saver_hook,
nan_disc_hook, nan_gen_hook, step_conter_hook,
loss_summary_saver_hook]
# Start training.
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for key, value in sess.run(data_metrics).items():
logging.info('%s: %d', key, value)
for i in range(FLAGS.num_training_iterations):
sess.run(update_op)
if i % FLAGS.export_every == 0:
samples_np, data_np = sess.run([samples, image_splits[0]])
# Create an object which gets data and does the processing.
data_np = data_processor.postprocess(data_np)
samples_np = data_processor.postprocess(samples_np)
sample_exporter.save(samples_np, 'samples')
sample_exporter.save(data_np, 'data')
if __name__ == '__main__':
tf.enable_resource_variables()
app.run(main)
|
from facette.utils import *
from facette.v1.plotserie import PlotSerie
import json
PLOT_ID = "id"
PLOT_NAME = "name"
PLOT_DESCRIPTION = "description"
PLOT_TYPE = "type"
PLOT_SERIES = "series"
PLOT_STACK_MODE = "stack_mode"
PLOT_START = "start"
PLOT_END = "end"
PLOT_STEP = "step"
PLOT_MODIFIED = "modified"
PLOT_UNIT_LABEL = "unit_label"
PLOT_UNIT_TYPE = "unit_type"
GRAPH_TYPE_AREA = 1
GRAPH_TYPE_LINE = 2
STACK_MODE_NONE = 1
STACK_MODE_NORMAL = 2
STACK_MODE_PERCENT = 3
class Plot:
def __init__(self, js=""):
self.plot = {}
self.id = facette_to_json(PLOT_ID, js, self.plot)
self.name = facette_to_json(PLOT_NAME, js, self.plot)
self.description = facette_to_json(PLOT_DESCRIPTION, js, self.plot)
self.type = facette_to_json(PLOT_TYPE, js, self.plot)
self.stack_mode = facette_to_json(PLOT_STACK_MODE, js, self.plot)
self.start = facette_to_json(PLOT_START, js, self.plot)
self.end = facette_to_json(PLOT_END, js, self.plot)
self.step = facette_to_json(PLOT_STEP, js, self.plot)
self.modified = facette_to_json(PLOT_MODIFIED, js, self.plot)
self.unit_label = facette_to_json(PLOT_UNIT_LABEL, js, self.plot)
self.unit_type = facette_to_json(PLOT_UNIT_TYPE, js, self.plot)
self.series = []
if js.get(PLOT_SERIES):
for x in js[PLOT_SERIES]:
e = PlotSerie(x)
self.series.append(e)
self.plot[PLOT_SERIES] = self.series
def __str__(self):
js = self.plot
series = []
for s in self.series:
series.append(json.loads(str(s)))
js[PLOT_SERIES] = series
return json.dumps(js)
def __repr__(self):
return str(self)
|
"""
Set up the logging
"""
import logging
import tempfile
import os
def initialize_logging():
"""
Set up the screen and file logging.
:return: The log filename
"""
# set up DEBUG logging to file, INFO logging to STDERR
log_file = os.path.join(tempfile.gettempdir(), 'spfy.log')
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=log_file,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.INFO)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
return log_file
|
import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Firefox(capabilities={"marionette": True})
#(desired_capabilities={"chromeOptions": {"args": ["--start-fullscreen"]}})
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_xpath("//input[@name='username']").send_keys("admin")
driver.find_element_by_xpath("//input[@name='password']").send_keys("admin")
driver.find_element_by_xpath("//button[@name='login']").click()
|
import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1. / (1. + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f * (1. - f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
#test_sigmoid()
|
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import datetime
import socket
import time
import sys
import os.path
lib_path = os.path.abspath('../utils')
sys.path.append(lib_path)
from myParser import *
from myCrypto import *
import re
import hashlib
host='localhost'
port=9090
state="INITIAL"
device=""
server="mysensors"
class mySensorDatagramProtocol(DatagramProtocol):
def __init__(self, host,port,reactor):
self.ip= socket.gethostbyname(host)
self.port = port
#self._reactor=reactor
#self.ip=reactor.resolve(host)
def startProtocol(self):
self.transport.connect(self.ip,self.port)
if state=='INITIAL':
#If system is at the initial state, it will send the device creation Senze
self.register()
else:
response=raw_input("Enter your Senze:")
self.sendDatagram(response)
def stopProtocol(self):
#on disconnect
#self._reactor.listenUDP(0, self)
print "STOP **************"
def register(self):
global server
cry=myCrypto(name=device)
senze ='SHARE #pubkey %s @%s' %(pubkey,server)
senze=cry.signSENZE(senze)
self.transport.write(senze)
def sendDatagram(self,senze):
global server
cry=myCrypto(name=device)
senze=cry.signSENZE(senze)
print senze
self.transport.write(senze)
def datagramReceived(self, datagram, host):
print 'Datagram received: ', repr(datagram)
parser=myParser(datagram)
recipients=parser.getUsers()
sender=parser.getSender()
signature=parser.getSignature()
data=parser.getData()
sensors=parser.getSensors()
cmd=parser.getCmd()
if cmd=="DATA":
if 'UserCreated' in data['msg']:
#Creating the .devicename file and store the device name and PIN
f=open(".devicename",'w')
f.write(device+'\n')
f.close()
print device+ " was created at the server."
print "You should execute the program again."
print "The system halted!"
reactor.stop()
elif 'UserCreationFailed' in data['msg']:
print "This user name may be already taken"
print "You can try it again with different username"
print "The system halted!"
reactor.stop()
#self.sendDatagram()
def init():
#cam=myCamDriver()
global device
global pubkey
global state
#If .device name is not there, we will read the device name from keyboard
#else we will get it from .devicename file
try:
if not os.path.isfile(".devicename"):
device=raw_input("Enter the device name: ")
# Account need to be created at the server
state='INITIAL'
else:
#The device name will be read form the .devicename file
f=open(".devicename","r")
device = f.readline().rstrip("\n")
state='READY'
except:
print "ERRER: Cannot access the device name file."
raise SystemExit
#Here we will generate public and private keys for the device
#These keys will be used to perform authentication and key exchange
try:
cry=myCrypto(name=device)
#If keys are not available yet
if not os.path.isfile(cry.pubKeyLoc):
# Generate or loads an RSA keypair with an exponent of 65537 in PEM format
# Private key and public key was saved in the .devicenamePriveKey and .devicenamePubKey files
cry.generateRSA(bits=1024)
pubkey=cry.loadRSAPubKey()
except:
print "ERRER: Cannot genereate private/public keys for the device."
raise SystemExit
print pubkey
#Check the network connectivity.
#check_connectivity(ServerName)
def main():
global host
global port
protocol = mySensorDatagramProtocol(host,port,reactor)
reactor.listenUDP(0, protocol)
reactor.run()
if __name__ == '__main__':
init()
main()
|
"""
Tests.
"""
import unittest
from bruges.rockphysics import fluidsub
vp_gas = 2429.0
vs_gas = 1462.4
rho_gas = 2080.
vp_brine = 2850.5
vs_brine = 1416.1
rho_brine = 2210.0
phi = 0.275 # Don't know this... reading from fig
rhohc = 250.0 # gas
rhow = 1040.0 # brine
sw = 0.3 # Don't know this... just guessing
swnew = 1.0 # Don't know this... just guessing
khc = 207000000.0 # gas
kw = 2950000000.0 # brine
kclay = 25000000000.0
kqtz = 37000000000.0
vclay = 0.05
kmin = 36266406250.0 # Don't know this... reading from fig
class FluidsubTest(unittest.TestCase):
"""
Tests fluid sub calculations against Smith et al 2003.
https://dl.dropboxusercontent.com/u/14965965/Smith_etal_2003.pdf
"""
def test_avseth(self):
# Base case: gas
# Subbing with: brine
sub = fluidsub.avseth_fluidsub(vp=vp_gas,
vs=vs_gas,
rho=rho_gas,
phi=phi,
rhof1=rhohc,
rhof2=rhow,
kmin=37000000000,
kf1=khc,
kf2=kw)
self.assertAlmostEqual(sub[0], vp_brine, places=-1) # Cannot match :(
self.assertAlmostEqual(sub[1], vs_brine, places=-1) # Cannot match :(
self.assertAlmostEqual(sub[2], rho_brine, places=-1) # Cannot match :(
def test_smith(self):
# Base case: gas
# Subbing with: brine
sub = fluidsub.smith_fluidsub(vp=vp_gas,
vs=vs_gas,
rho=rho_gas,
phi=phi,
rhohc=rhohc,
rhow=rhow,
sw=sw,
swnew=swnew,
khc=khc,
kw=kw,
kclay=kclay,
kqtz=kqtz,
vclay=vclay)
self.assertAlmostEqual(sub[0], vp_brine, places=-1)
self.assertAlmostEqual(sub[1], vs_brine, places=-1)
self.assertAlmostEqual(sub[2], rho_brine, places=-1) # Cannot match :(
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(FluidsubTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
from __future__ import absolute_import
from rigour.errors import ValidationFailed
from rigour.types import *
from rigour.constraints import length_between
import rigour
import pytest
def test_secrecy_declared_before():
t = String().secret().constrain(length_between(4,6))
with pytest.raises(ValidationFailed) as excinfo:
t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
def test_secrecy_declared_after():
t = String().constrain(length_between(4,6)).secret()
with pytest.raises(ValidationFailed) as excinfo:
t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
|
"""
The No Age scheduler is based on the Heapset scheduler, though it does not take age into account.
.. warning:: This scheduler does not take the age into account, making it **unusable** in simulations where the *timeAdvance* function can return (exactly) 0. If unsure, do **not** use this scheduler, but the more general Heapset scheduler.
The heap will contain only the timestamps of events that should happen. One of the dictionaries will contain the actual models that transition at the specified time. The second dictionary than contains a reverse relation: it maps the models to their time_next. This reverse relation is necessary to know the *old* time_next value of the model. Because as soon as the model has its time_next changed, its previously scheduled time will be unknown. This 'previous time' is **not** equal to the *timeLast*, as it might be possible that the models wait time was interrupted.
For a schedule, the model is added to the dictionary at the specified time_next. In case it is the first element at this location in the dictionary, we also add the timestamp to the heap. This way, the heap only contains *unique* timestamps and thus the actual complexity is reduced to the number of *different* timestamps. Furthermore, the reverse relation is also updated.
Unscheduling is done similarly by simply removing the element from the dictionary.
Rescheduling is a slight optimisation of unscheduling, followed by scheduling.
This scheduler does still schedule models that are inactive (their time_next is infinity), though this does not influence the complexity. The complexity is not affected due to infinity being a single element in the heap that is always present. Since a heap has O(log(n)) complexity, this one additional element does not have a serious impact.
The main advantage over the Activity Heap is that it never gets dirty and thus doesn't require periodical cleanup. The only part that gets dirty is the actual heap, which only contains small tuples. Duplicates of these will also be reduced to a single element, thus memory consumption should not be a problem in most cases.
This scheduler is ideal in situations where most transitions happen at exactly the same time, as we can then profit from the internal structure and simply return the mapped elements. It results in sufficient efficiency in most other cases, mainly due to the code base being a lot smaller then the Activity Heap.
"""
from heapq import heappush, heappop
from pypdevs.logger import *
class SchedulerNA(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
self.heap = []
self.reverse = [None] * total_models
self.mapped = {}
self.infinite = float('inf')
# Init the basic 'inactive' entry here, to prevent scheduling in the heap itself
self.mapped[self.infinite] = set()
self.epsilon = epsilon
for m in models:
self.schedule(m)
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
try:
self.mapped[model.time_next[0]].add(model)
except KeyError:
self.mapped[model.time_next[0]] = set([model])
heappush(self.heap, model.time_next[0])
try:
self.reverse[model.model_id] = model.time_next[0]
except IndexError:
self.reverse.append(model.time_next[0])
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
try:
self.mapped[self.reverse[model.model_id]].remove(model)
except KeyError:
pass
self.reverse[model.model_id] = None
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
#NOTE the usage of exceptions is a lot better for the PyPy JIT and nets a noticable speedup
# as the JIT generates guard statements for an 'if'
for model in reschedule_set:
model_id = model.model_id
try:
self.mapped[self.reverse[model_id]].remove(model)
except KeyError:
# Element simply not present, so don't need to unschedule it
pass
self.reverse[model_id] = tn = model.time_next[0]
try:
self.mapped[tn].add(model)
except KeyError:
# Create a tuple with a single entry and use it to initialize the mapped entry
self.mapped[tn] = set((model, ))
heappush(self.heap, tn)
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
first = self.heap[0]
while len(self.mapped[first]) == 0:
del self.mapped[first]
heappop(self.heap)
first = self.heap[0]
# The age was stripped of
return (first, 1)
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
t, age = time
imm_children = set()
try:
first = self.heap[0]
if (abs(first - t) < self.epsilon):
#NOTE this would change the original set, though this doesn't matter as it is no longer used
imm_children = self.mapped.pop(first)
heappop(self.heap)
first = self.heap[0]
while (abs(first - t) < self.epsilon):
imm_children |= self.mapped.pop(first)
heappop(self.heap)
first = self.heap[0]
except IndexError:
pass
return imm_children
|
from datetime import datetime
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from core.spaces.file_validation import ContentTypeRestrictedFileField
from fields import StdImageField
from allowed_types import ALLOWED_CONTENT_TYPES
class Space(models.Model):
"""
Spaces model. This model stores a "space" or "place" also known as a
participative process in reality. Every place has a minimum set of
settings for customization.
There are three main permission roles in every space: administrator
(admins), moderators (mods) and regular users (users).
"""
name = models.CharField(_('Name'), max_length=250, unique=True,
help_text=_('Max: 250 characters'))
url = models.CharField(_('URL'), max_length=100, unique=True,
validators=[RegexValidator(regex='^[a-z0-9_]+$',
message='Invalid characters in the space URL.')],
help_text=_('Valid characters are lowercase, digits and \
underscore. This will be the accesible URL'))
description = models.TextField(_('Description'),
default=_('Write here your description.'))
pub_date = models.DateTimeField(_('Date of creation'), auto_now_add=True)
author = models.ForeignKey(User, blank=True, null=True,
verbose_name=_('Space creator'), help_text=_('Select a user that \
will be marked as creator of the space'))
logo = StdImageField(upload_to='spaces/logos', size=(100, 75, False),
help_text = _('Valid extensions are jpg, jpeg, png and gif'))
banner = StdImageField(upload_to='spaces/banners', size=(500, 75, False),
help_text = _('Valid extensions are jpg, jpeg, png and gif'))
public = models.BooleanField(_('Public space'), help_text=_("This will \
make the space visible to everyone, but registration will be \
necessary to participate."))
mod_debate = models.BooleanField(_('Debate'))
mod_proposals = models.BooleanField(_('Proposals'))
mod_news = models.BooleanField(_('News'))
mod_cal = models.BooleanField(_('Calendar'))
mod_docs = models.BooleanField(_('Documents'))
mod_voting = models.BooleanField(_('Voting'))
class Meta:
ordering = ['name']
verbose_name = _('Space')
verbose_name_plural = _('Spaces')
get_latest_by = 'pub_date'
permissions = (
('view_space', 'Can view this space.'),
('admin_space', 'Can administrate this space.'),
('mod_space', 'Can moderate this space.')
)
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('space-index', (), {
'space_url': self.url})
class Entity(models.Model):
"""
This model stores the name of the entities responsible for the creation
of the space or supporting it.
"""
name = models.CharField(_('Name'), max_length=100, unique=True)
website = models.CharField(_('Website'), max_length=100, null=True,
blank=True)
logo = models.ImageField(upload_to='spaces/logos', verbose_name=_('Logo'),
blank=True, null=True)
space = models.ForeignKey(Space, blank=True, null=True)
class Meta:
ordering = ['name']
verbose_name = _('Entity')
verbose_name_plural = _('Entities')
def __unicode__(self):
return self.name
class Document(models.Model):
"""
This models stores documents for the space, like a document repository,
There is no restriction in what a user can upload to the space.
:methods: get_file_ext, get_file_size
"""
title = models.CharField(_('Document title'), max_length=100,
help_text=_('Max: 100 characters'))
space = models.ForeignKey(Space, blank=True, null=True,
help_text=_('Change the space to whom belongs this document'))
docfile = ContentTypeRestrictedFileField(_('File'),
upload_to='spaces/documents/%Y/%m/%d',
content_types=ALLOWED_CONTENT_TYPES,
max_upload_size=26214400,
help_text=_('Permitted file types: DOC, DOCX, PPT, ODT, ODF, ODP, \
PDF, RST, TXT.'))
pub_date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, verbose_name=_('Author'), blank=True,
null=True, help_text=_('Change the user that will figure as the \
author'))
def get_file_ext(self):
filename = self.docfile.name
extension = filename.split('.')
return extension[1].upper()
def get_file_size(self):
if self.docfile.size < 1023:
return str(self.docfile.size) + " Bytes"
elif self.docfile.size >= 1024 and self.docfile.size <= 1048575:
return str(round(self.docfile.size / 1024.0, 2)) + " KB"
elif self.docfile.size >= 1048576:
return str(round(self.docfile.size / 1024000.0, 2)) + " MB"
class Meta:
ordering = ['pub_date']
verbose_name = _('Document')
verbose_name_plural = _('Documents')
get_latest_by = 'pub_date'
# There is no 'view-document' view, so I'll leave the get_absolute_url
# method without permalink. Remember that the document files are accesed
# through the url() method in templates.
def get_absolute_url(self):
return '/spaces/%s/docs/%s' % (self.space.url, self.id)
class Event(models.Model):
"""
Meeting data model. Every space (process) has N meetings. This will
keep record of the assistants, meeting name, etc.
"""
title = models.CharField(_('Event name'), max_length=250,
help_text="Max: 250 characters")
space = models.ForeignKey(Space, blank=True, null=True)
user = models.ManyToManyField(User, verbose_name=_('Users'),
help_text=_('List of the users that will assist or assisted to the \
event.'))
pub_date = models.DateTimeField(auto_now_add=True)
event_author = models.ForeignKey(User, verbose_name=_('Created by'),
blank=True, null=True, related_name='meeting_author',
help_text=_('Select the user that will be designated as author.'))
event_date = models.DateTimeField(verbose_name=_('Event date'),
help_text=_('Select the date where the event is celebrated.'))
description = models.TextField(_('Description'), blank=True, null=True)
location = models.TextField(_('Location'), blank=True, null=True)
latitude = models.DecimalField(_('Latitude'), blank=True, null=True,
max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))
longitude = models.DecimalField(_('Longitude'), blank=True, null=True,
max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))
def is_due(self):
if self.event_date < datetime.now():
return True
else:
return False
class Meta:
ordering = ['event_date']
verbose_name = _('Event')
verbose_name_plural = _('Events')
get_latest_by = 'event_date'
permissions = (
('view_event', 'Can view this event'),
('admin_event', 'Can administrate this event'),
('mod_event', 'Can moderate this event'),
)
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('view-event', (), {
'space_url': self.space.url,
'event_id': str(self.id)})
class Intent(models.Model):
"""
Intent data model. Intent stores the reference of a user-token when a user
asks entering in a restricted space.
.. versionadded: 0.1.5
"""
user = models.ForeignKey(User)
space = models.ForeignKey(Space)
token = models.CharField(max_length=32)
requested_on = models.DateTimeField(auto_now_add=True)
def get_approve_url(self):
site = Site.objects.all()[0]
return "http://%s%sintent/approve/%s" % (site.domain, self.space.get_absolute_url(), self.token)
|
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext_lazy
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
class AddProtocol(policy.PolicyTargetMixin, tables.LinkAction):
name = "create"
verbose_name = _("Add Protocol")
url = "horizon:identity:identity_providers:protocols:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_protocol"),)
def get_link_url(self, datum=None):
idp_id = self.table.kwargs['identity_provider_id']
return reverse(self.url, args=(idp_id,))
class RemoveProtocol(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ngettext_lazy(
"Delete Protocol",
"Delete Protocols",
count
)
@staticmethod
def action_past(count):
return ngettext_lazy(
"Deleted Protocol",
"Deleted Protocols",
count
)
policy_rules = (("identity", "identity:delete_protocol"),)
def delete(self, request, obj_id):
identity_provider = self.table.kwargs['identity_provider_id']
protocol = obj_id
api.keystone.protocol_delete(request, identity_provider, protocol)
class ProtocolsTable(tables.DataTable):
protocol = tables.Column("id",
verbose_name=_("Protocol ID"))
mapping = tables.Column("mapping_id",
verbose_name=_("Mapping ID"))
def get_object_display(self, datum):
return datum.id
class Meta(object):
name = "idp_protocols"
verbose_name = _("Protocols")
table_actions = (AddProtocol, RemoveProtocol)
row_actions = (RemoveProtocol, )
|
"""nsx_gw_devices
Revision ID: 19180cf98af6
Revises: 117643811bca
Create Date: 2014-02-26 02:46:26.151741
"""
revision = '19180cf98af6'
down_revision = '117643811bca'
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'),
mysql_engine='InnoDB')
# Copy data from networkgatewaydevices into networkgatewaydevicereference
op.execute("INSERT INTO networkgatewaydevicereferences SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevices")
# drop networkgatewaydevices
op.drop_table('networkgatewaydevices')
op.create_table(
'networkgatewaydevices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Create a networkgatewaydevice for each existing reference.
# For existing references nsx_id == neutron_id
# Do not fill conenctor info as they would be unknown
op.execute("INSERT INTO networkgatewaydevices (id, nsx_id) SELECT "
"id, id as nsx_id FROM networkgatewaydevicereferences")
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('networkgatewaydevices')
# Re-create previous version of networkgatewaydevices table
op.create_table(
'networkgatewaydevices',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
# Copy from networkgatewaydevicereferences to networkgatewaydevices
op.execute("INSERT INTO networkgatewaydevices SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevicereferences")
# Dropt networkgatewaydevicereferences
op.drop_table('networkgatewaydevicereferences')
|
from datetime import datetime
import random
import string
from bson import ObjectId
class DuplicateUserException(Exception):
def __init__(self, message='User name/email already exits'):
Exception.__init__(self, message)
pass
class UserServiceException(Exception):
def __init__(self, message=None):
Exception.__init__(self, message)
@classmethod
def cannot_delete_super_admin(cls):
return UserServiceException("Cannot delete super admin user!")
class UserService(object):
def __init__(self, db):
self.db = db
self.users = self.db.user_collection
def generate_api_key(self):
s = string.ascii_letters + string.digits
return ''.join(random.sample(s, 20))
def create(self, item):
if self.user_exists(item['email']):
raise DuplicateUserException()
item.pop('_id', None)
item['created_at'] = datetime.now()
item['status'] = True
if 'api_key' not in item:
item['api_key'] = self.generate_api_key()
if 'roles' not in item or item['roles'] is None or len(item['roles']) == 0:
item['roles'] = ['member']
return self.users.insert(item)
def get_by_email(self, email):
return self.users.find_one({"email": email})
def validate_user(self, username, password):
query = {'email': username, 'password': password}
return self.users.find(query).count() > 0
def search(self, email=None):
query = {}
if email is not None:
query['email'] = email
return [x for x in self.users.find(query)]
def delete(self, id):
item = self.get_by_id(id)
if item and 'roles' in item and item['roles'] is not None and 'super_admin' in item['roles']:
raise UserServiceException.cannot_delete_super_admin()
return self.users.remove({"_id": ObjectId(id)})
def get_by_id(self, id):
return self.users.find_one({"_id": ObjectId(id)})
def get_by_api_key(self, api_key):
return self.users.find_one({"api_key": api_key})
def update(self, item):
if item['_id'] is None:
return item
if self.user_exists(item['email'], str(item['_id'])):
raise DuplicateUserException()
item['updated_at'] = datetime.now()
self.users.save(item)
return item
def user_exists(self, email, id=None):
query = {}
if id is not None:
query = {"_id": {"$ne": ObjectId(id)}}
query['email'] = email
return self.users.find(query).count() > 0
|
from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
from functools import wraps
import gzip
from inspect import getargspec
from itertools import (
combinations,
count,
product,
)
import operator
import os
from os.path import abspath, dirname, join, realpath
import shutil
from sys import _getframe
import tempfile
from logbook import TestHandler
from mock import patch
from nose.tools import nottest
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from six import itervalues, iteritems, with_metaclass
from six.moves import filter, map
from sqlalchemy import create_engine
from testfixtures import TempDirectory
from toolz import concat, curry
from zipline.assets import AssetFinder, AssetDBWriter
from zipline.assets.synthetic import make_simple_equity_info
from zipline.data.data_portal import DataPortal
from zipline.data.loader import get_benchmark_filename, INDEX_MAPPING
from zipline.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentWriter,
)
from zipline.finance.blotter import Blotter
from zipline.finance.trading import TradingEnvironment
from zipline.finance.order import ORDER_STATUS
from zipline.lib.labelarray import LabelArray
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.utils import security_list
from zipline.utils.calendars import get_calendar
from zipline.utils.input_validation import expect_dimensions
from zipline.utils.numpy_utils import as_column, isnat
from zipline.utils.pandas_utils import timedelta_to_integral_seconds
from zipline.utils.paths import ensure_directory
from zipline.utils.sentinel import sentinel
import numpy as np
from numpy import float64
EPOCH = pd.Timestamp(0, tz='UTC')
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit='s', tz='UTC')
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH)
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def check_algo_results(test,
results,
expected_transactions_count=None,
expected_order_count=None,
expected_positions_count=None,
sid=None):
if expected_transactions_count is not None:
txns = flatten_list(results["transactions"])
test.assertEqual(expected_transactions_count, len(txns))
if expected_positions_count is not None:
raise NotImplementedError
if expected_order_count is not None:
# de-dup orders on id, because orders are put back into perf packets
# whenever they a txn is filled
orders = set([order['id'] for order in
flatten_list(results["orders"])])
test.assertEqual(expected_order_count, len(orders))
def flatten_list(list):
return [item for sublist in list for item in sublist]
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(os.path.join(old_dir, subdir),
os.path.join(new_dir, subdir))
with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \
patch.object(security_list, 'using_copy', True,
create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, 'using_copy'):
raise Exception('add_security_data must be used within '
'security_list_copy context')
directory = os.path.join(
security_list.SECURITY_LISTS_DIR,
"leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, 'w') as f:
for sym in deletes:
f.write(sym)
f.write('\n')
add_path = os.path.join(directory, "add")
with open(add_path, 'w') as f:
for sym in adds:
f.write(sym)
f.write('\n')
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def chrange(start, stop):
"""
Construct an iterable of length-1 strings beginning with `start` and ending
with `stop`.
Parameters
----------
start : str
The first character.
stop : str
The last character.
Returns
-------
chars: iterable[str]
Iterable of strings beginning with start and ending with stop.
Examples
--------
>>> chrange('A', 'C')
['A', 'B', 'C']
"""
return list(map(chr, range(ord(start), ord(stop) + 1)))
def make_trade_data_for_asset_info(dates,
asset_info,
price_start,
price_step_by_date,
price_step_by_sid,
volume_start,
volume_step_by_date,
volume_step_by_sid,
frequency,
writer=None):
"""
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid.
"""
trade_data = {}
sids = asset_info.index
price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid
price_date_deltas = (np.arange(len(dates), dtype=float64) *
price_step_by_date)
prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start
volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid
volume_date_deltas = np.arange(len(dates)) * volume_step_by_date
volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start
for j, sid in enumerate(sids):
start_date, end_date = asset_info.loc[sid, ['start_date', 'end_date']]
# Normalize here so the we still generate non-NaN values on the minutes
# for an asset's last trading day.
for i, date in enumerate(dates.normalize()):
if not (start_date <= date <= end_date):
prices[i, j] = 0
volumes[i, j] = 0
df = pd.DataFrame(
{
"open": prices[:, j],
"high": prices[:, j],
"low": prices[:, j],
"close": prices[:, j],
"volume": volumes[:, j],
},
index=dates,
)
if writer:
writer.write_sid(sid, df)
trade_data[sid] = df
return trade_data
def check_allclose(actual,
desired,
rtol=1e-07,
atol=0,
err_msg='',
verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(
actual,
desired,
atol=atol,
rtol=rtol,
err_msg=err_msg,
verbose=verbose,
)
def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y))
assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y)
if isinstance(x, LabelArray):
# Check that both arrays have missing values in the same locations...
assert_array_equal(
x.is_missing(),
y.is_missing(),
err_msg=err_msg,
verbose=verbose,
)
# ...then check the actual values as well.
x = x.as_string_array()
y = y.as_string_array()
elif x.dtype.kind in 'mM':
x_isnat = isnat(x)
y_isnat = isnat(y)
assert_array_equal(
x_isnat,
y_isnat,
err_msg="NaTs not equal",
verbose=verbose,
)
# Fill NaTs with zero for comparison.
x = np.where(x_isnat, np.zeros_like(x), x)
y = np.where(y_isnat, np.zeros_like(y), y)
return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
def write_minute_data(trading_calendar, tempdir, minutes, sids):
first_session = trading_calendar.minute_to_session_label(
minutes[0], direction="none"
)
last_session = trading_calendar.minute_to_session_label(
minutes[-1], direction="none"
)
sessions = trading_calendar.sessions_in_range(first_session, last_session)
write_bcolz_minute_data(
trading_calendar,
sessions,
tempdir.path,
create_minute_bar_data(minutes, sids),
)
return tempdir.path
def create_minute_bar_data(minutes, sids):
length = len(minutes)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
'open': np.arange(length) + 10 + sid_idx,
'high': np.arange(length) + 15 + sid_idx,
'low': np.arange(length) + 8 + sid_idx,
'close': np.arange(length) + 10 + sid_idx,
'volume': 100 + sid_idx,
},
index=minutes,
)
def create_daily_bar_data(sessions, sids):
length = len(sessions)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": (np.array(range(10, 10 + length)) + sid_idx),
"high": (np.array(range(15, 15 + length)) + sid_idx),
"low": (np.array(range(8, 8 + length)) + sid_idx),
"close": (np.array(range(10, 10 + length)) + sid_idx),
"volume": np.array(range(100, 100 + length)) + sid_idx,
"day": [session.value for session in sessions]
},
index=sessions,
)
def write_daily_data(tempdir, sim_params, sids, trading_calendar):
path = os.path.join(tempdir.path, "testdaily.bcolz")
BcolzDailyBarWriter(path, trading_calendar,
sim_params.start_session,
sim_params.end_session).write(
create_daily_bar_data(sim_params.sessions, sids),
)
return path
def create_data_portal(asset_finder, tempdir, sim_params, sids,
trading_calendar, adjustment_reader=None):
if sim_params.data_frequency == "daily":
daily_path = write_daily_data(tempdir, sim_params, sids,
trading_calendar)
equity_daily_reader = BcolzDailyBarReader(daily_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
adjustment_reader=adjustment_reader
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
minute_path = write_minute_data(trading_calendar, tempdir, minutes,
sids)
equity_minute_reader = BcolzMinuteBarReader(minute_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
adjustment_reader=adjustment_reader
)
def write_bcolz_minute_data(trading_calendar, days, path, data):
BcolzMinuteBarWriter(
path,
trading_calendar,
days[0],
days[-1],
US_EQUITIES_MINUTES_PER_DAY
).write(data)
def create_minute_df_for_asset(trading_calendar,
start_dt,
end_dt,
interval=1,
start_val=1,
minute_blacklist=None):
asset_minutes = trading_calendar.minutes_for_sessions_in_range(
start_dt, end_dt
)
minutes_count = len(asset_minutes)
minutes_arr = np.array(range(start_val, start_val + minutes_count))
df = pd.DataFrame(
{
"open": minutes_arr + 1,
"high": minutes_arr + 2,
"low": minutes_arr - 1,
"close": minutes_arr,
"volume": 100 * minutes_arr,
},
index=asset_minutes,
)
if interval > 1:
counter = 0
while counter < len(minutes_arr):
df[counter:(counter + interval - 1)] = 0
counter += interval
if minute_blacklist is not None:
for minute in minute_blacklist:
df.loc[minute] = 0
return df
def create_daily_df_for_asset(trading_calendar, start_day, end_day,
interval=1):
days = trading_calendar.sessions_in_range(start_day, end_day)
days_count = len(days)
days_arr = np.arange(days_count) + 2
df = pd.DataFrame(
{
"open": days_arr + 1,
"high": days_arr + 2,
"low": days_arr - 1,
"close": days_arr,
"volume": days_arr * 100,
},
index=days,
)
if interval > 1:
# only keep every 'interval' rows
for idx, _ in enumerate(days_arr):
if (idx + 1) % interval != 0:
df["open"].iloc[idx] = 0
df["high"].iloc[idx] = 0
df["low"].iloc[idx] = 0
df["close"].iloc[idx] = 0
df["volume"].iloc[idx] = 0
return df
def trades_by_sid_to_dfs(trades_by_sid, index):
for sidint, trades in iteritems(trades_by_sid):
opens = []
highs = []
lows = []
closes = []
volumes = []
for trade in trades:
opens.append(trade.open_price)
highs.append(trade.high)
lows.append(trade.low)
closes.append(trade.close_price)
volumes.append(trade.volume)
yield sidint, pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
},
index=index,
)
def create_data_portal_from_trade_history(asset_finder, trading_calendar,
tempdir, sim_params, trades_by_sid):
if sim_params.data_frequency == "daily":
path = os.path.join(tempdir.path, "testdaily.bcolz")
writer = BcolzDailyBarWriter(
path, trading_calendar,
sim_params.start_session,
sim_params.end_session
)
writer.write(
trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions),
)
equity_daily_reader = BcolzDailyBarReader(path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
length = len(minutes)
assets = {}
for sidint, trades in iteritems(trades_by_sid):
opens = np.zeros(length)
highs = np.zeros(length)
lows = np.zeros(length)
closes = np.zeros(length)
volumes = np.zeros(length)
for trade in trades:
# put them in the right place
idx = minutes.searchsorted(trade.dt)
opens[idx] = trade.open_price * 1000
highs[idx] = trade.high * 1000
lows[idx] = trade.low * 1000
closes[idx] = trade.close_price * 1000
volumes[idx] = trade.volume
assets[sidint] = pd.DataFrame({
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
"dt": minutes
}).set_index("dt")
write_bcolz_minute_data(
trading_calendar,
sim_params.sessions,
tempdir.path,
assets
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
class FakeDataPortal(DataPortal):
def __init__(self, env, trading_calendar=None,
first_trading_day=None):
if trading_calendar is None:
trading_calendar = get_calendar("NYSE")
super(FakeDataPortal, self).__init__(env.asset_finder,
trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
data_frequency, ffill=True):
if frequency == "1d":
end_idx = \
self.trading_calendar.all_sessions.searchsorted(end_dt)
days = self.trading_calendar.all_sessions[
(end_idx - bar_count + 1):(end_idx + 1)
]
df = pd.DataFrame(
np.full((bar_count, len(assets)), 100.0),
index=days,
columns=assets
)
return df
class FetcherDataPortal(DataPortal):
"""
Mock dataportal that returns fake data for history and non-fetcher
spot value.
"""
def __init__(self, asset_finder, trading_calendar, first_trading_day=None):
super(FetcherDataPortal, self).__init__(asset_finder, trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
# if this is a fetcher field, exercise the regular code path
if self._is_extra_source(asset, field, self._augmented_sources_map):
return super(FetcherDataPortal, self).get_spot_value(
asset, field, dt, data_frequency)
# otherwise just return a fixed value
return int(asset)
# XXX: These aren't actually the methods that are used by the superclasses,
# so these don't do anything, and this class will likely produce unexpected
# results for history().
def _get_daily_window_for_sid(self, asset, field, days_in_window,
extra_slot=True):
return np.arange(days_in_window, dtype=np.float64)
def _get_minute_window_for_asset(self, asset, field, minutes_for_window):
return np.arange(minutes_for_window, dtype=np.float64)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Parameters
----------
url : string
The URL for the database connection.
**frames
The frames to pass to the AssetDBWriter.
By default this maps equities:
('A', 'B', 'C') -> map(ord, 'ABC')
See Also
--------
empty_assets_db
tmp_asset_finder
"""
_default_equities = sentinel('_default_equities')
def __init__(self,
url='sqlite:///:memory:',
equities=_default_equities,
**frames):
self._url = url
self._eng = None
if equities is self._default_equities:
equities = make_simple_equity_info(
list(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
)
frames['equities'] = equities
self._frames = frames
self._eng = None # set in enter and exit
def __enter__(self):
self._eng = eng = create_engine(self._url)
AssetDBWriter(eng).write(**self._frames)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, '_eng was not set in __enter__'
self._eng.dispose()
self._eng = None
def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None)
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Parameters
----------
url : string
The URL for the database connection.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
tmp_assets_db
"""
def __init__(self,
url='sqlite:///:memory:',
finder_cls=AssetFinder,
**frames):
self._finder_cls = finder_cls
super(tmp_asset_finder, self).__init__(url=url, **frames)
def __enter__(self):
return self._finder_cls(super(tmp_asset_finder, self).__enter__())
def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None)
class tmp_trading_env(tmp_asset_finder):
"""Create a temporary trading environment.
Parameters
----------
load : callable, optional
Function that returns benchmark returns and treasury curves.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
empty_trading_env
tmp_asset_finder
"""
def __init__(self, load=None, *args, **kwargs):
super(tmp_trading_env, self).__init__(*args, **kwargs)
self._load = load
def __enter__(self):
return TradingEnvironment(
load=self._load,
asset_db_path=super(tmp_trading_env, self).__enter__().engine,
)
def empty_trading_env():
return tmp_trading_env(equities=None)
class SubTestFailures(AssertionError):
def __init__(self, *failures):
self.failures = failures
def __str__(self):
return 'failures:\n %s' % '\n '.join(
'\n '.join((
', '.join('%s=%r' % item for item in scope.items()),
'%s: %s' % (type(exc).__name__, exc),
)) for scope, exc in self.failures,
)
@nottest
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``zipline.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
zipline.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception as e:
if not names:
names = count()
failures.append((dict(zip(names, scope)), e))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec
class MockDailyBarReader(object):
def get_value(self, col, sid, dt):
return 100
def create_mock_adjustment_data(splits=None, dividends=None, mergers=None):
if splits is None:
splits = create_empty_splits_mergers_frame()
elif not isinstance(splits, pd.DataFrame):
splits = pd.DataFrame(splits)
if mergers is None:
mergers = create_empty_splits_mergers_frame()
elif not isinstance(mergers, pd.DataFrame):
mergers = pd.DataFrame(mergers)
if dividends is None:
dividends = create_empty_dividends_frame()
elif not isinstance(dividends, pd.DataFrame):
dividends = pd.DataFrame(dividends)
return splits, mergers, dividends
def create_mock_adjustments(tempdir, days, splits=None, dividends=None,
mergers=None):
path = tempdir.getpath("test_adjustments.db")
SQLiteAdjustmentWriter(path, MockDailyBarReader(), days).write(
*create_mock_adjustment_data(splits, dividends, mergers)
)
return path
def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`.
"""
if compare_nat_equal and left is pd.NaT and right is pd.NaT:
return
return pd.util.testing.assert_equal(left, right, msg=msg)
def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1))
def to_series(knowledge_dates, earning_dates):
"""
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable.
"""
return pd.Series(
index=pd.to_datetime(knowledge_dates),
data=pd.to_datetime(earning_dates),
)
def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz='utc')
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],)
@contextmanager
def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder)
def parameter_space(__fail_fast=False, **params):
"""
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Examples
--------
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
zipline.testing.subtest
"""
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == 'self':
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % extra
)
def make_param_sets():
return product(*(params[name] for name in argnames))
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(f)(*args, **kwargs)
return wrapped
return decorator
def create_empty_dividends_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('ex_date', 'datetime64[ns]'),
('pay_date', 'datetime64[ns]'),
('record_date', 'datetime64[ns]'),
('declared_date', 'datetime64[ns]'),
('amount', 'float64'),
('sid', 'int32'),
],
),
index=pd.DatetimeIndex([], tz='UTC'),
)
def create_empty_splits_mergers_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('effective_date', 'int64'),
('ratio', 'float64'),
('sid', 'int64'),
],
),
index=pd.DatetimeIndex([]),
)
def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
alternating = np.empty(shape, dtype=np.bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not(first_value)
first_value = not(first_value)
return alternating
def make_cascading_boolean_array(shape, first_value=True):
"""
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
cascading = np.full(shape, not(first_value), dtype=np.bool)
ending_col = shape[1] - 1
for row in cascading:
if ending_col > 0:
row[:ending_col] = first_value
ending_col -= 1
else:
break
return cascading
@expect_dimensions(array=2)
def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array)
@nottest
def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler
def write_compressed(path, content):
"""
Write a compressed (gzipped) file to `path`.
"""
with gzip.open(path, 'wb') as f:
f.write(content)
def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, 'rb') as f:
return f.read()
zipline_git_root = abspath(
join(realpath(dirname(__file__)), '..', '..'),
)
@nottest
def test_resource_path(*path_parts):
return os.path.join(zipline_git_root, 'tests', 'resources', *path_parts)
@contextmanager
def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value
class tmp_dir(TempDirectory, object):
"""New style class that wrapper for TempDirectory in python 2.
"""
pass
class _TmpBarReader(with_metaclass(ABCMeta, tmp_dir)):
"""A helper for tmp_bcolz_equity_minute_bar_reader and
tmp_bcolz_equity_daily_bar_reader.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
"""
@abstractproperty
def _reader_cls(self):
raise NotImplementedError('_reader')
@abstractmethod
def _write(self, env, days, path, data):
raise NotImplementedError('_write')
def __init__(self, env, days, data, path=None):
super(_TmpBarReader, self).__init__(path=path)
self._env = env
self._days = days
self._data = data
def __enter__(self):
tmpdir = super(_TmpBarReader, self).__enter__()
env = self._env
try:
self._write(
env,
self._days,
tmpdir.path,
self._data,
)
return self._reader_cls(tmpdir.path)
except:
self.__exit__(None, None, None)
raise
class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader):
"""A temporary BcolzMinuteBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : iterable[(int, pd.DataFrame)]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzMinuteBarReader
_write = staticmethod(write_bcolz_minute_data)
class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader):
"""A temporary BcolzDailyBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzDailyBarReader
@staticmethod
def _write(env, days, path, data):
BcolzDailyBarWriter(path, days).write(data)
@contextmanager
def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
'attempted to call read_csv on %r which not in the url map' %
filepath_or_buffer,
)
with patch.object(module, 'read_csv', patched_read_csv):
yield
def copy_market_data(src_market_data_dir, dest_root_dir):
symbol = 'SPY'
filenames = (get_benchmark_filename(symbol), INDEX_MAPPING[symbol][1])
ensure_directory(os.path.join(dest_root_dir, 'data'))
for filename in filenames:
shutil.copyfile(
os.path.join(src_market_data_dir, filename),
os.path.join(dest_root_dir, 'data', filename)
)
@curry
def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
_getframe(2).f_globals.setdefault('__test__', {})[
f.__name__ if name is None else name
] = f
return f
class RecordBatchBlotter(Blotter):
"""Blotter that tracks how its batch_order method was called.
"""
def __init__(self, data_frequency):
super(RecordBatchBlotter, self).__init__(data_frequency)
self.order_batch_called = []
def batch_order(self, *args, **kwargs):
self.order_batch_called.append((args, kwargs))
return super(RecordBatchBlotter, self).batch_order(*args, **kwargs)
class AssetID(CustomFactor):
"""
CustomFactor that returns the AssetID of each asset.
Useful for providing a Factor that produces a different value for each
asset.
"""
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets
class AssetIDPlusDay(CustomFactor):
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets + today.day
class OpenPrice(CustomFactor):
window_length = 1
inputs = [USEquityPricing.open]
def compute(self, today, assets, out, open):
out[:] = open
|
import socket
import re
from xii import error, util
class Validator():
def __init__(self, example=None, description=None):
self._description = description
self._example = example
def structure(self, accessor):
if accessor == "example":
return self._example
return self._description
class TypeCheck(Validator):
want_type = None
want = "none"
def __init__(self, example, desc=None):
if desc is None:
desc = self.want
Validator.__init__(self, example, desc)
def validate(self, pre, structure):
if isinstance(structure, self.want_type):
return True
raise error.ValidatorError("{} needs to be {}".format(pre, self.want))
return False
class Int(TypeCheck):
want = "int"
want_type = int
class Bool(TypeCheck):
want = "bool"
want_type = bool
class String(TypeCheck):
want = "string"
want_type = str
class Ip(TypeCheck):
want = "ip"
want_type = str
def validate(self, pre, structure):
TypeCheck.validate(self, pre, structure)
try:
socket.inet_pton(socket.AF_INET, structure)
return True
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, structure)
return True
except socket.error:
pass
raise error.ValidatorError("{} is not a valid IP address".format(pre))
class ByteSize(TypeCheck):
want = "memory"
want_type = str
validator = re.compile("(?P<value>\d+)(\ *)(?P<unit>[kMGT])")
def validate(self, pre, structure):
TypeCheck.validate(self, pre, structure)
if self.validator.match(structure):
return True
else:
raise error.ValidatorError("{} is not a valid memory size".format(pre))
class List(TypeCheck):
want = "list"
want_type = list
def __init__(self, schema, desc=None):
TypeCheck.__init__(self, desc)
self.schema = schema
def validate(self, pre, structure):
TypeCheck.validate(self, pre, structure)
def _validate_each(item):
return self.schema.validate(pre, item)
return sum(map(_validate_each, structure)) > 1
def structure(self, accessor):
return [self.schema.structure(accessor)]
class Or(Validator):
def __init__(self, schemas, desc=None, exclusive=True):
Validator.__init__(self, desc)
self.schemas = schemas
self.exclusive = exclusive
def validate(self, pre, structure):
errors = []
def _validate_each(schema):
try:
return schema.validate(pre, structure)
except error.ValidatorError as err:
errors.append(err)
return False
state = sum(map(_validate_each, self.schemas))
if self.exclusive and (state > 1 or state == 0):
def _error_lines():
it = iter(errors)
yield " ".join(next(it).error())
for err in it:
yield "or"
yield " ".join(err.error())
raise error.ValidatorError(["{} is ambigous:".format(pre)] +
list(_error_lines()))
return True
def structure(self, accessor):
desc = []
descs = [ s.structure(accessor) for s in self.schemas ]
for d in descs[:-1]:
desc.append(d)
desc.append("__or__")
desc.append(descs[-1])
return desc
class KeyValidator(Validator):
def structure(self, accessor, overwrite=None):
name = self.name
if overwrite:
name = overwrite
return ("{}".format(name), self.schema.structure(accessor))
class VariableKeys(KeyValidator):
def __init__(self, schema, example, desc=None):
KeyValidator.__init__(self, desc, example)
self.name = "*"
self.example = example
self.schema = schema
def validate(self, pre, structure):
if not isinstance(structure, dict):
raise error.ValidatorError("{} needs to be a dict".format(pre))
def _validate_each(pair):
(name, next_structure) = pair
return self.schema.validate(pre + " > " + name, next_structure)
return sum(map(_validate_each, structure.items())) >= 1
def structure(self, accessor):
if accessor == "example":
return KeyValidator.structure(self, accessor, self.example)
return KeyValidator.structure(self, accessor)
class Key(KeyValidator):
def __init__(self, name, schema, desc=None, example=None):
KeyValidator.__init__(self, desc, example)
self.name = name
self.schema = schema
def validate(self, pre, structure):
if not isinstance(structure, dict):
raise error.ValidatorError("{} needs to be a dict".format(pre))
value_of_key = util.safe_get(self.name, structure)
if not value_of_key:
return False
return self.schema.validate(pre + " > " + self.name, value_of_key)
class RequiredKey(KeyValidator):
def __init__(self, name, schema, desc=None, example=None):
Validator.__init__(self, desc, example)
self.name = name
self.schema = schema
def validate(self, pre, structure):
value_of_key = util.safe_get(self.name, structure)
if not value_of_key:
raise error.ValidatorError("{} must have {} "
"defined".format(pre, self.name))
return self.schema.validate(pre + " > " + self.name, value_of_key)
class Dict(TypeCheck):
want = "dictonary"
want_type = dict
def __init__(self, schemas, desc=None):
TypeCheck.__init__(self, desc)
self.schemas = schemas
def validate(self, pre, structure):
TypeCheck.validate(self, pre, structure)
def _validate(schema):
return schema.validate(pre, structure)
return sum(map(_validate, self.schemas)) >= 1
def structure(self, accessor):
desc_dict = {}
for key, value in [s.structure(accessor) for s in self.schemas]:
desc_dict[key] = value
return desc_dict
|
from email.mime import text
import email.utils
import smtplib
import socket
import mailjet_rest
from scoreboard import main
app = main.get_app()
class MailFailure(Exception):
"""Inability to send mail."""
pass
def send(message, subject, to, to_name=None, sender=None, sender_name=None):
"""Send an email."""
sender = sender or app.config.get('MAIL_FROM')
sender_name = sender_name or app.config.get('MAIL_FROM_NAME') or ''
mail_provider = app.config.get('MAIL_PROVIDER')
if mail_provider is None:
app.logger.error('No MAIL_PROVIDER configured!')
raise MailFailure('No MAIL_PROVIDER configured!')
elif mail_provider == 'smtp':
_send_smtp(message, subject, to, to_name, sender, sender_name)
elif mail_provider == 'mailjet':
_send_mailjet(message, subject, to, to_name, sender, sender_name)
else:
app.logger.error('Invalid MAIL_PROVIDER configured!')
raise MailFailure('Invalid MAIL_PROVIDER configured!')
def _send_smtp(message, subject, to, to_name, sender, sender_name):
"""SMTP implementation of sending email."""
host = app.config.get('MAIL_HOST')
if not host:
raise MailFailure('SMTP Server Not Configured')
try:
server = smtplib.SMTP(host)
except (smtplib.SMTPConnectError, socket.error) as ex:
app.logger.error('Unable to send mail: %s', str(ex))
raise MailFailure('Error connecting to SMTP server.')
msg = text.MIMEText(message)
msg['Subject'] = subject
msg['To'] = email.utils.formataddr((to_name, to))
msg['From'] = email.utils.formataddr((sender_name, sender))
try:
if app.debug:
server.set_debuglevel(True)
server.sendmail(sender, [to], msg.as_string())
except (smtplib.SMTPException, socket.error) as ex:
app.logger.error('Unable to send mail: %s', str(ex))
raise MailFailure('Error sending mail to SMTP server.')
finally:
try:
server.quit()
except smtplib.SMTPException:
pass
def _send_mailjet(message, subject, to, to_name, sender, sender_name):
"""Mailjet implementation of sending email."""
api_key = app.config.get('MJ_APIKEY_PUBLIC')
api_secret = app.config.get('MJ_APIKEY_PRIVATE')
if not api_key or not api_secret:
app.logger.error('Missing MJ_APIKEY_PUBLIC/MJ_APIKEY_PRIVATE!')
return
# Note the data structures we use are api v3.1
client = mailjet_rest.Client(
auth=(api_key, api_secret),
api_url='https://api.mailjet.com/',
version='v3.1')
from_obj = {
"Email": sender,
}
if sender_name:
from_obj["Name"] = sender_name
to_obj = [{
"Email": to,
}]
if to_name:
to_obj[0]["Name"] = to_name
message = {
"From": from_obj,
"To": to_obj,
"Subject": subject,
"TextPart": message,
}
result = client.send.create(data={'Messages': [message]})
if result.status_code != 200:
app.logger.error(
'Error sending via mailjet: (%d) %r',
result.status_code, result.text)
raise MailFailure('Error sending via mailjet!')
try:
j = result.json()
except Exception:
app.logger.error('Error sending via mailjet: %r', result.text)
raise MailFailure('Error sending via mailjet!')
if j['Messages'][0]['Status'] != 'success':
app.logger.error('Error sending via mailjet: %r', j)
raise MailFailure('Error sending via mailjet!')
|
import pytest
from ray.train.callbacks.results_preprocessors import (
ExcludedKeysResultsPreprocessor,
IndexedResultsPreprocessor,
SequentialResultsPreprocessor,
AverageResultsPreprocessor,
MaxResultsPreprocessor,
WeightedAverageResultsPreprocessor,
)
def test_excluded_keys_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}]
expected = [{"b": 2}, {"b": 4}]
preprocessor = ExcludedKeysResultsPreprocessor("a")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_indexed_results_preprocessor():
results = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
expected = [{"a": 1}, {"a": 3}]
preprocessor = IndexedResultsPreprocessor([0, 2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_sequential_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = [{"b": 2}, {"b": 6}]
preprocessor_1 = ExcludedKeysResultsPreprocessor("a")
# [{"b": 2}, {"b": 4}, {"b": 6}, {"b": 8}]
preprocessor_2 = IndexedResultsPreprocessor([0, 2])
preprocessor = SequentialResultsPreprocessor([preprocessor_1, preprocessor_2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_average_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"avg(a)": np.mean([result["a"] for result in results]),
"avg(b)": np.mean([result["b"] for result in results]),
}
)
preprocessor = AverageResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_max_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"max(a)": np.max([result["a"] for result in results]),
"max(b)": np.max([result["b"] for result in results]),
}
)
preprocessor = MaxResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_weighted_average_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
total_weight = np.sum([result["b"] for result in results])
for res in expected:
res.update(
{
"weight_avg_b(a)": np.sum(
[result["a"] * result["b"] / total_weight for result in results]
)
}
)
preprocessor = WeightedAverageResultsPreprocessor(["a"], "b")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
@pytest.mark.parametrize(
("results_preprocessor", "expected_value"),
[(AverageResultsPreprocessor, 2.0), (MaxResultsPreprocessor, 3.0)],
)
def test_warning_in_aggregate_results_preprocessors(
caplog, results_preprocessor, expected_value
):
import logging
from copy import deepcopy
from ray.util import debug
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"a": 1}, {"a": "invalid"}, {"a": 3}, {"a": "invalid"}]
results3 = [{"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}]
results4 = [{"a": 1}, {"a": 2}, {"a": 3}, {"c": 4}]
# test case 1: metric key `b` is missing from all workers
results_preprocessor1 = results_preprocessor(["b"])
results_preprocessor1.preprocess(results1)
assert "`b` is not reported from workers, so it is ignored." in caplog.text
# test case 2: some values of key `a` have invalid data type
results_preprocessor2 = results_preprocessor(["a"])
expected2 = deepcopy(results2)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected2:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results2) == expected2
# test case 3: all key `a` values are invalid
results_preprocessor2.preprocess(results3)
assert "`a` value type is not valid, so it is ignored." in caplog.text
# test case 4: some workers don't report key `a`
expected4 = deepcopy(results4)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected4:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results4) == expected4
for record in caplog.records:
assert record.levelname == "WARNING"
debug.reset_log_once("b")
debug.reset_log_once("a")
def test_warning_in_weighted_average_results_preprocessors(caplog):
import logging
from copy import deepcopy
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"b": 1}, {"b": 2}, {"b": 3}, {"b": 4}]
results3 = [
{"a": 1, "c": 3},
{"a": 2, "c": "invalid"},
{"a": "invalid", "c": 1},
{"a": 4, "c": "invalid"},
]
results4 = [
{"a": 1, "c": "invalid"},
{"a": 2, "c": "invalid"},
{"a": 3, "c": "invalid"},
{"a": 4, "c": "invalid"},
]
# test case 1: weight key `b` is not reported from all workers
results_preprocessor1 = WeightedAverageResultsPreprocessor(["a"], "b")
expected1 = deepcopy(results1)
for res in expected1:
res.update({"weight_avg_b(a)": 2.5})
assert results_preprocessor1.preprocess(results1) == expected1
assert (
"Averaging weight `b` is not reported by all workers in `train.report()`."
in caplog.text
)
assert "Use equal weight instead." in caplog.text
# test case 2: metric key `a` (to be averaged) is not reported from all workers
results_preprocessor1.preprocess(results2)
assert "`a` is not reported from workers, so it is ignored." in caplog.text
# test case 3: both metric and weight keys have invalid data type
results_preprocessor2 = WeightedAverageResultsPreprocessor(["a"], "c")
expected3 = deepcopy(results3)
for res in expected3:
res.update({"weight_avg_c(a)": 1.0})
assert results_preprocessor2.preprocess(results3) == expected3
# test case 4: all weight values are invalid
expected4 = deepcopy(results4)
for res in expected4:
res.update({"weight_avg_c(a)": 2.5})
assert results_preprocessor2.preprocess(results4) == expected4
assert "Averaging weight `c` value type is not valid." in caplog.text
for record in caplog.records:
assert record.levelname == "WARNING"
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
from builtins import map
from builtins import str
from builtins import object
from copy import deepcopy
import logging
from bq_data_access.v2.seqpeek.seqpeek_interpro import InterProDataProvider
logger = logging.getLogger('main_logger')
SAMPLE_ID_FIELD_NAME = 'sample_id'
TRACK_ID_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'uniprot_aapos'
PROTEIN_ID_FIELD = 'ensg_id'
PROTEIN_DOMAIN_DB = 'PFAM'
SEQPEEK_VIEW_DEBUG_MODE = False
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids)
def get_number_of_mutated_positions(track):
sample_locations = set()
for mutation in track['mutations']:
sample_locations.add(mutation[COORDINATE_FIELD_NAME])
return len(sample_locations)
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval
def sort_track_mutations(mutations_array):
return sorted(mutations_array, key=lambda k: k[COORDINATE_FIELD_NAME])
def get_track_statistics_by_track_type(track, cohort_info_map):
track_id = track[TRACK_ID_FIELD]
result = {
'samples': {
'numberOf': get_number_of_unique_samples(track),
'mutated_positions': get_number_of_mutated_positions(track)
}
}
if track['type'] == 'tumor':
cohort_info = cohort_info_map[track_id]
result['cohort_size'] = cohort_info['size']
else:
# Do not assign cohort size for the 'COMBINED' track.
result['cohort_size'] = None
return result
def filter_protein_domains(match_array):
return [m for m in match_array if m['dbname'] == PROTEIN_DOMAIN_DB]
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type)
def build_seqpeek_regions(protein_data):
return [{
'type': 'exon',
'start': 0,
'end': protein_data['length']
}]
def build_summary_track(tracks):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary'
}
def get_track_label_and_cohort_information(track_id_value, cohort_info_map):
cohort_info = cohort_info_map[track_id_value]
label = cohort_info['name']
cohort_size = cohort_info['size']
return label, cohort_size
def get_track_label(track, cohort_info_array):
# The IDs in cohort_info_array are integers, whereas the track IDs are strings.
cohort_map = {str(item['id']): item['name'] for item in cohort_info_array}
return cohort_map[track[TRACK_ID_FIELD]]
def get_protein_domains(uniprot_id):
protein = InterProDataProvider().get_data(uniprot_id)
return protein
class MAFData(object):
def __init__(self, cohort_info, data):
self.cohort_info = cohort_info
self.data = data
@classmethod
def from_dict(cls, param):
return cls(param['cohort_set'], param['items'])
def build_track_data(track_id_list, all_tumor_mutations):
tracks = []
for track_id in track_id_list:
tracks.append({
TRACK_ID_FIELD: track_id,
'mutations': [m for m in all_tumor_mutations if int(track_id) in set(m['cohort'])]
})
return tracks
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if PROTEIN_ID_FIELD in m:
uniprot_id = m[PROTEIN_ID_FIELD]
break
return uniprot_id
def get_genes_tumors_lists_debug():
return {
'symbol_list': ['EGFR', 'TP53', 'PTEN'],
'disease_codes': ['ACC', 'BRCA', 'GBM']
}
def get_genes_tumors_lists_remote():
context = {
'symbol_list': [],
'track_id_list': []
}
return context
def get_genes_tumors_lists():
if SEQPEEK_VIEW_DEBUG_MODE:
return get_genes_tumors_lists_debug()
else:
return get_genes_tumors_lists_remote()
def get_track_id_list(param):
return list(map(str, param))
def format_removed_row_statistics_to_list(stats_dict):
result = []
for key, value in list(stats_dict.items()):
result.append({
'name': key,
'num': value
})
return result
class SeqPeekViewDataBuilder(object):
def build_view_data(self, hugo_symbol, filtered_maf_vector, seqpeek_cohort_info, cohort_id_list, removed_row_statistics, tables_used):
context = get_genes_tumors_lists()
cohort_info_map = {str(item['id']): item for item in seqpeek_cohort_info}
track_id_list = get_track_id_list(cohort_id_list)
# Since the gene (hugo_symbol) parameter is part of the GNAB feature ID,
# it will be sanity-checked in the SeqPeekMAFDataAccess instance.
uniprot_id = find_uniprot_id(filtered_maf_vector)
logging.info("UniProt ID: " + str(uniprot_id))
protein_data = get_protein_domains(uniprot_id)
track_data = build_track_data(track_id_list, filtered_maf_vector)
plot_data = {
'gene_label': hugo_symbol,
'tracks': track_data,
'protein': protein_data
}
# Pre-processing
# - Sort mutations by chromosomal coordinate
for track in plot_data['tracks']:
track['mutations'] = sort_track_mutations(track['mutations'])
# Annotations
# - Add label, possibly human readable
# - Add type that indicates whether the track is driven by data from search or
# if the track is aggregate
for track in plot_data['tracks']:
track['type'] = 'tumor'
label, cohort_size = get_track_label_and_cohort_information(track[TRACK_ID_FIELD], cohort_info_map)
track['label'] = label
# Display the "combined" track only if more than one cohort is visualized
if len(cohort_id_list) >= 2:
plot_data['tracks'].append(build_summary_track(plot_data['tracks']))
for track in plot_data['tracks']:
# Calculate statistics
track['statistics'] = get_track_statistics_by_track_type(track, cohort_info_map)
# Unique ID for each row
track['render_info'] = {
'row_id': get_table_row_id(track[TRACK_ID_FIELD])
}
plot_data['regions'] = build_seqpeek_regions(plot_data['protein'])
plot_data['protein']['matches'] = filter_protein_domains(plot_data['protein']['matches'])
tumor_list = ','.join(track_id_list)
context.update({
'plot_data': plot_data,
'hugo_symbol': hugo_symbol,
'tumor_list': tumor_list,
'cohort_id_list': track_id_list,
'removed_row_statistics': format_removed_row_statistics_to_list(removed_row_statistics),
'bq_tables': list(set(tables_used))
})
return context
|
import numpy as np
import xgboost as xgb
import pytest
try:
import shap
except ImportError:
shap = None
pass
pytestmark = pytest.mark.skipif(shap is None, reason="Requires shap package")
def test_with_shap():
from sklearn.datasets import fetch_california_housing
X, y = fetch_california_housing(return_X_y=True)
dtrain = xgb.DMatrix(X, label=y)
model = xgb.train({"learning_rate": 0.01}, dtrain, 10)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
margin = model.predict(dtrain, output_margin=True)
assert np.allclose(np.sum(shap_values, axis=len(shap_values.shape) - 1),
margin - explainer.expected_value, 1e-3, 1e-3)
|
import pathlib
import importlib
import sys
__all__ = ['sample', 'sampleTxt', 'sampleBin']
this = pathlib.Path(__file__)
datadir = this.parent.parent / 'data'
loader = importlib.machinery.SourceFileLoader('sample', str(datadir / 'sample.py'))
sample = loader.load_module()
sampleTxt = datadir / 'sample.txt'
sampleBin = datadir / 'sample.bin'
|
import datetime
from keystone import exception
from keystone.auth import plugins as auth_plugins
from keystone.common import dependency
from keystone.openstack.common import log
from oauthlib.oauth2 import RequestValidator
try: from oslo.utils import timeutils
except ImportError: from keystone.openstack.common import timeutils
METHOD_NAME = 'oauth2_validator'
LOG = log.getLogger(__name__)
@dependency.requires('oauth2_api')
class OAuth2Validator(RequestValidator):
"""OAuthlib request validator."""
# Ordered roughly in order of appearance in the authorization grant flow
# Pre- and post-authorization.
def validate_client_id(self, client_id, request, *args, **kwargs):
# Simple validity check, does client exist? Not banned?
client_dict = self.oauth2_api.get_consumer(client_id)
if client_dict:
return True
# NOTE(garcianavalon) Currently the sql driver raises an exception
# if the consumer doesnt exist so we throw the Keystone NotFound
# 404 Not Found exception instead of the OAutlib InvalidClientId
# 400 Bad Request exception.
return False
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
# Is the client allowed to use the supplied redirect_uri? i.e. has
# the client previously registered this EXACT redirect uri.
client_dict = self.oauth2_api.get_consumer(client_id)
registered_uris = client_dict['redirect_uris']
return redirect_uri in registered_uris
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
# The redirect used if none has been supplied.
# Prefer your clients to pre register a redirect uri rather than
# supplying one on each authorization request.
# TODO(garcianavalon) implement
pass
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
# Is the client allowed to access the requested scopes?
if not scopes:
return True # the client is not requesting any scope
client_dict = self.oauth2_api.get_consumer(client_id)
if not client_dict['scopes']:
return False # the client isnt allowed any scopes
for scope in scopes:
if not scope in client_dict['scopes']:
return False
return True
def get_default_scopes(self, client_id, request, *args, **kwargs):
# Scopes a client will authorize for if none are supplied in the
# authorization request.
# TODO(garcianavalon) implement
pass
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of response type, the
# one associated with their one allowed grant type.
# FIXME(garcianavalon) we need to support multiple grant types
# for the same consumers right now. In the future we should
# separate them and only allow one grant type (registering
# each client one time for each grant or allowing components)
# or update the tools to allow to create clients with
# multiple grants
# client_dict = self.oauth2_api.get_consumer(client_id)
# allowed_response_type = client_dict['response_type']
# return allowed_response_type == response_type
return True
# Post-authorization
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
# Remember to associate it with request.scopes, request.redirect_uri
# request.client, request.state and request.user (the last is passed in
# post_authorization credentials, i.e. { 'user': request.user}.
authorization_code = {
'code': code['code'], # code is a dict with state and the code
'consumer_id': client_id,
'scopes': request.scopes,
'authorizing_user_id': request.user_id, # populated through the credentials
'state': request.state,
'redirect_uri': request.redirect_uri
}
token_duration = 28800 # TODO(garcianavalon) extract as configuration option
# TODO(garcianavalon) find a better place to do this
now = timeutils.utcnow()
future = now + datetime.timedelta(seconds=token_duration)
expiry_date = timeutils.isotime(future, subsecond=True)
authorization_code['expires_at'] = expiry_date
self.oauth2_api.store_authorization_code(authorization_code)
# Token request
def authenticate_client(self, request, *args, **kwargs):
# Whichever authentication method suits you, HTTP Basic might work
# TODO(garcianavalon) write it cleaner
LOG.debug('OAUTH2: authenticating client')
authmethod, auth = request.headers['Authorization'].split(' ', 1)
auth = auth.decode('unicode_escape')
if authmethod.lower() == 'basic':
auth = auth.decode('base64')
client_id, secret = auth.split(':', 1)
client_dict = self.oauth2_api.get_consumer_with_secret(client_id)
if client_dict['secret'] == secret:
# TODO(garcianavalon) this can be done in a cleaner way
#if we change the consumer model attribute to client_id
request.client = type('obj', (object,),
{'client_id' : client_id})
LOG.info('OAUTH2: succesfully authenticated client %s',
client_dict['name'])
return True
return False
def authenticate_client_id(self, client_id, request, *args, **kwargs):
# Don't allow public (non-authenticated) clients
# TODO(garcianavalon) check this method
return False
def validate_code(self, client_id, code, client, request, *args, **kwargs):
# Validate the code belongs to the client. Add associated scopes,
# state and user to request.scopes, request.state and request.user.
authorization_code = self.oauth2_api.get_authorization_code(code)
if not authorization_code['valid']:
return False
if not authorization_code['consumer_id'] == request.client.client_id:
return False
request.scopes = authorization_code['scopes']
request.state = authorization_code['state']
request.user = authorization_code['authorizing_user_id']
return True
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
# You did save the redirect uri with the authorization code right?
authorization_code = self.oauth2_api.get_authorization_code(code)
return authorization_code['redirect_uri'] == redirect_uri
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of grant.
# FIXME(garcianavalon) we need to support multiple grant types
# for the same consumers right now. In the future we should
# separate them and only allow one grant type (registering
# each client one time for each grant or allowing components)
# or update the tools to allow to create clients with
# multiple grants
# # client_id comes as None, we use the one in request
# client_dict = self.oauth2_api.get_consumer(request.client.client_id)
# return grant_type == client_dict['grant_type']
# TODO(garcianavalon) sync with SQL backend soported grant_types
return grant_type in [
'password', 'authorization_code', 'client_credentials', 'refresh_token',
]
def save_bearer_token(self, token, request, *args, **kwargs):
# Remember to associate it with request.scopes, request.user and
# request.client. The two former will be set when you validate
# the authorization code. Don't forget to save both the
# access_token and the refresh_token and set expiration for the
# access_token to now + expires_in seconds.
# token is a dictionary with the following elements:
# {
# u'access_token': u'iC1DQuu7zOgNIjquPXPmXE5hKnTwgu',
# u'expires_in': 3600,
# u'token_type': u'Bearer',
# u'state': u'yKxWeujbz9VUBncQNrkWvVcx8EXl1w',
# u'scope': u'basic_scope',
# u'refresh_token': u'02DTsL6oWgAibU7xenvXttwG80trJC'
# }
# TODO(garcinanavalon) create a custom TokenCreator instead of
# hacking the dictionary
if getattr(request, 'client', None):
consumer_id = request.client.client_id
else:
consumer_id = request.client_id
if getattr(request, 'user', None):
user_id = request.user
else:
user_id = request.user_id
expires_at = datetime.datetime.today() + datetime.timedelta(seconds=token['expires_in'])
access_token = {
'id':token['access_token'],
'consumer_id':consumer_id,
'authorizing_user_id':user_id,
'scopes': request.scopes,
'expires_at':datetime.datetime.strftime(expires_at, '%Y-%m-%d %H:%M:%S'),
'refresh_token': token.get('refresh_token', None),
}
self.oauth2_api.store_access_token(access_token)
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
# Authorization codes are use once, invalidate it when a Bearer token
# has been acquired.
self.oauth2_api.invalidate_authorization_code(code)
# Protected resource request
def validate_bearer_token(self, token, scopes, request):
# Remember to check expiration and scope membership
try:
access_token = self.oauth2_api.get_access_token(token)
except exception.NotFound:
return False
if (datetime.datetime.strptime(access_token['expires_at'], '%Y-%m-%d %H:%M:%S')
< datetime.datetime.today()):
return False
if access_token['scopes'] != scopes:
return False
# NOTE(garcianavalon) we set some attributes in request for later use. There
# is no documentation about this so I follow the comments found in the example
# at https://oauthlib.readthedocs.org/en/latest/oauth2/endpoints/resource.html
# which are:
# oauthlib_request has a few convenient attributes set such as
# oauthlib_request.client = the client associated with the token
# oauthlib_request.user = the user associated with the token
# oauthlib_request.scopes = the scopes bound to this token
# request.scopes is set by oauthlib already
request.user = access_token['authorizing_user_id']
request.client = access_token['consumer_id']
return True
# Token refresh request
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
# Obtain the token associated with the given refresh_token and
# return its scopes, these will be passed on to the refreshed
# access token if the client did not specify a scope during the
# request.
# TODO(garcianavalon)
return ['all_info']
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client
:param refresh_token: Unicode refresh_token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Refresh token grant
"""
# TODO(garcianavalon)
return True
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
try:
access_token = self.oauth2_api.get_access_token_by_refresh_token(refresh_token)
# Validate that the refresh token is not expired
token_duration = 28800 # TODO(garcianavalon) extract as configuration option
refresh_token_duration = 14 # TODO(garcianavalon) extract as configuration option
# TODO(garcianavalon) find a better place to do this
access_token_expiration_date = datetime.datetime.strptime(
access_token['expires_at'], '%Y-%m-%d %H:%M:%S')
refres_token_expiration_date = (
access_token_expiration_date
- datetime.timedelta(seconds=token_duration)
+ datetime.timedelta(days=refresh_token_duration))
if refres_token_expiration_date < datetime.datetime.today():
return False
except exception.NotFound:
return False
request.user = access_token['authorizing_user_id']
return True
# Support for password grant
def validate_user(self, username, password, client, request,
*args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username
:param password: Unicode password
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
# To validate the user, try to authenticate it
password_plugin = auth_plugins.password.Password()
auth_payload = {
'user': {
"domain": {
"id": "default"
},
"name": username,
"password": password
}
}
auth_context = {}
try:
password_plugin.authenticate(
context={},
auth_payload=auth_payload,
auth_context=auth_context)
# set the request user
request.user = auth_context['user_id']
return True
except Exception:
return False
|
from zope.i18nmessageid import MessageFactory
PloneMessageFactory = MessageFactory('plone')
from Products.CMFCore.permissions import setDefaultRoles
setDefaultRoles('signature.portlets.gdsignature: Add GroupDocs Signature portlet',
('Manager', 'Site Administrator', 'Owner',))
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.models import Q
from pathlib import Path
from webframe.functions import TRUE_VALUES, LogMessage as lm, getTime
from webframe.models import Preference, AbstractPreference
from uuid import UUID
import logging, os, glob, sys, re
logger=logging.getLogger('webframe.commands.prefs')
class Command(BaseCommand):
help = '''Mainpulate the preference in database. Including insert/update/delete/view/import/gensecret/gendoc; Importing support csv|xlsx file.'''
def __getIndent__(self, indent=0, ch=' '):
return ch*indent
def create_parser(self, cmdName, subcommand, **kwargs):
parser=super().create_parser(cmdName, subcommand, **kwargs)
parser.epilog='''Example:\r\n
\tpref import path_to_prefs #Import a folder or a csv/xlsx file\r\n
\tpref set ABC --value="def" #Set the preference "ABC" to value "def"\r\n
\tpref gensecret #Generate the encryption secret; PLEASE backup in secure way.\r\n
\tpref gendoc prefsDoc.html #Generate the documentation and save as as output.html
'''
return parser
def add_arguments(self, parser):
#Default Value
pattern='Pref({pref.id}:{pref.name}): {pref.value}'
action='show'
max=256
wildcard='*'
tmpl='webframe/prefsDoc.html'
#Adding arguments
parser.add_argument('action', type=str, help='The action to be taken. One of import/export/show/set/delete/gensecret/gendoc; Default is {0}'.format(action), default=action)
parser.add_argument('name', type=str, nargs='?', help='[import/export/show/set/delete/gendoc]; The name of the preference or path of importing/exporting file (csv|xlsx);')
parser.add_argument('--file', dest='file', type=str, help='[import/export/gendoc]; The file path for import/export/output.')
parser.add_argument('--value', dest='value', type=str, help='[set/delete]; The value of the preference;', default=None)
parser.add_argument('--owner', dest='owner', type=str, help='[set/delete]; The owner of the preference; Optional;', default=None)
parser.add_argument('--noowner', dest='noowner', action='store_true', help='[show/set/delete]; The target preference has no owner; Optional; Default False')
parser.add_argument('--parent', dest='parent', type=str, help='[show/set/delete]; The parent\'s name of the preference. Optional;', default=None)
parser.add_argument('--noparent', dest='noparent', action='store_true', help='[show/set/delete]; The target preference has no parent; Optional; Default False')
parser.add_argument('--pattern', dest='pattern', type=str, help='[show]; The output pattern. {0}'.format(pattern), default=pattern)
parser.add_argument('--max', dest='max', type=int, help='[show]; The maximum number of preference to show. Default is {0}'.format(max), default=max)
parser.add_argument('--wildcard', dest='wildcard', type=str, help='[show]; Specify the wildcard; Default is {0}'.format(wildcard), default=wildcard)
#Importing
parser.add_argument('--sep', dest='separator', type=str, default=',', help='[import]; The separator when CSV importing; Default \",\"')
parser.add_argument('--encoding', dest='encoding', type=str, default='utf-8', help='[import]; The encoding when CSV importing; Default \"utf-8\"')
parser.add_argument('--quotechar', dest='quotechar', type=str, default='\"', help='[import]; The quote-char when CSV importing; Default double quote: \"')
parser.add_argument('--filepath', dest='filepath', action='store_true', help='[import]; Import the file-path in preferences; Default False')
parser.add_argument('--force', '-f ', dest='force', action='store_true', help='[import]; Force the import', default=False)
#Generate Doc
parser.add_argument('--tmpl', dest='tmpl', type=str, help="[gendoc]; The template name when generating document; Default: {0}".format(tmpl), default=tmpl)
def __get_owner__(self, owner=None):
if not owner: return None
logger.debug('Getting owner by: "%s"', owner)
owner=owner if owner else self.kwargs['owner']
return get_user_model().objects.get(username=owner) if owner else None
def __get_parent__(self, parent=None):
parent=parent if parent else self.kwargs['parent']
if parent:
try:
#Get parent by uuid
return Preference.objects.get(id=parent)
except:
try:
#Get parent by name
return Preference.objects.get(name=parent)
except:
pass
return None
def __get_pref__(self, **kwargs):
owner=kwargs['owner'] if 'owner' in kwargs else self.__get_owner__()
parent=kwargs['parent'] if 'parent' in kwargs else self.__get_parent__()
name=kwargs['name'] if 'name' in kwargs else self.kwargs['name']
lang=kwargs['lang'] if 'lang' in kwargs else None
if self.kwargs['filepath']: name=os.path.basename(name)
if self.kwargs['parent'] and parent==None:
raise Preference.DoesNotExist('Parent Preference not found: {0}'.format(self.kwargs['parent']))
rst=Preference.objects.all()
if name and name!='*':
rst=rst.filter(name=name)
if owner:
rst=rst.filter(owner=owner)
elif self.kwargs['noowner']:
rst=rst.filter(owner__isnull=True)
if parent:
rst=rst.filter(parent=parent)
elif self.kwargs['noparent']:
rst=rst.filter(parent__isnull=True)
if self.kwargs['filepath']:
rst=rst.filter(tipe=AbstractPreference.TYPE_FILEPATH)
rst=rst.order_by('owner', 'parent', 'sequence', 'name')
return rst
def __get_name__( self, name ):
'''
Get the name and sequence according to the name.
@param name The string including the sequence and name. For example, '01.Target' will return a tuple (1, 'Target')
@return A tuple including the sequence and the name
'''
p=re.search(r'^\d+\.', name)
if p:
s=p.group(0)
return name[len(s):].strip(), int(name[0:len(s)-1])
return (name, sys.maxsize if hasattr(sys, 'maxsize') else sys.maxint) #Default append
def output( self, pref, pattern=None ):
pattern=pattern if pattern else self.kwargs['pattern']
print(pattern.format(pref=pref))
pattern=' {0}'.format(pattern)
for ch in pref.childs:
self.output(ch, pattern)
def handle(self, *args, **kwargs):
verbosity=int(kwargs['verbosity'])
if verbosity==3:
logger.setLevel(logging.DEBUG)
elif verbosity==2:
logger.setLevel(logging.INFO)
elif verbosity==1:
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.ERROR)
self.kwargs=kwargs
action=kwargs['action']
if action=='import':
self.imp()
elif action=='create': #for backward compatibility
self.set()
elif action=='update': #for backward compatibility
self.set()
elif action=='set':
self.set()
elif action=='delete':
self.delete()
elif action=='show':
self.show()
elif action=='gensecret':
self.gensecret()
elif action=='gendoc':
self.gendoc()
elif action=='export':
self.expCsv()
else:
logger.warning('Unknown action: {0}'.format(action))
logger.warn('DONE!')
def show(self):
logger.info('Showing the preference ...')
q=Preference.objects.all()
if self.kwargs['name']:
logger.info(' with the name filter: {0}'.format(self.kwargs['name']))
if self.kwargs['wildcard'] in self.kwargs['name']:
q=q.filter(name__icontains=self.kwargs['name'].replace(self.kwargs['wildcard'], ''))
else:
q=q.filter(name=self.kwargs['name'])
if self.kwargs['value']:
logger.info(' with the value filter: {0}'.format(self.kwargs['value']))
q=q.filter(value__icontains=self.kwargs['value'])
if self.kwargs['owner']:
logger.info(' which belongs to user: {0}'.format(self.kwargs['owner']))
q=q.filter(owner__username=self.kwargs['owner'])
if self.kwargs['parent']:
logger.info(' which belongs to preference: {0}'.format(self.kwargs['parent']))
q=q.filter(parent__name__iexact=self.kwargs['parent'])
else:
q=q.filter(parent__isnull=True)
for p in q:
self.output(p)
logger.warning('There have {0} preference(s) has been shown'.format(len(q)))
def set(self):
with transaction.atomic():
try:
pref=self.__get_pref__()
if pref.count()<1: raise Preference.DoesNotExist
cnt=pref.update(value=self.kwargs['value'])
logger.info('{0} of Preference(s) has been updated'.format(cnt))
except Preference.DoesNotExist:
p=Preference(name=self.kwargs['name'], value=self.kwargs['value'], owner=owner, parent=parent)
p.save()
logger.info('The preference<{0}> has been created with value: {1}'.format(p.name, p.value))
def delete(self):
pref=self.__get_pref__()
cnt=pref.count()
pref.delete()
logger.warning('{0} of Preference(s) has been deleted'.format(cnt))
def expRow( self, wr, pref, indent=0 ):
'''
Import the specified preference to csv.
'''
cnt=0
tab=self.__getIndent__(indent)
logger.debug(lm('{0}Exporting preference: {1}::{2}...', tab, pref.id, pref.name))
wr.writerow([
pref.name # [0]
, pref.realValue # [1]
, pref.parent.id if pref.parent else '' # [2]
, pref.owner.username if pref.owner else '' # [3]
, pref.helptext # [4]
, Preference.TYPES[pref.tipe][1] # [5]
, pref.encrypted # [6]
, pref.regex # [7]
])
cnt+=1
for p in pref.childs:
cnt+=self.expRow(wr, p, indent+3)
return cnt
def expCsv( self ):
'''
Import the specified list of preferences to csv.
'''
import csv
f=self.kwargs['file']
with open(f, 'w', encoding=self.kwargs['encoding']) as fp:
wr=csv.writer(fp, delimiter=self.kwargs['separator'], quotechar=self.kwargs['quotechar'], quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)
cnt=0
for p in self.__get_pref__():
cnt+=self.expRow(wr, p, 0)
logger.info(lm('Exported {0} records', cnt))
def improw( self, cols, idx=0 ):
try:
name=cols[0]
val=cols[1]
parent=self.__get_parent__(cols[2])
owner=self.__get_owner__(cols[3])
helptext=cols[4]
tipe=cols[5]
encrypted=cols[6] in TRUE_VALUES
regex=cols[7]
lang=cols[8] if len(cols)>8 else None
logger.debug(' Importing row: {0}: {1} [{2}]'.format(idx, name, 'encrypted' if encrypted else 'clear-text'))
self.kwargs['name']=name
pref=self.__get_pref__(name=name, owner=owner, parent=parent, lang=lang)
if pref.count()<1: raise Preference.DoesNotExist
for p in pref:
p.encrypted=encrypted
p.helptext=helptext
p.tipe=tipe
p.regex=regex
#The value must be the last steps to set due to validation. Otherwise, once importing/assign a new value into this field, the last validation rule may be applied incorrectly
p.value=val
p.save()
except Preference.DoesNotExist:
Preference(name=name, _value=val, owner=owner, parent=parent, encrypted=encrypted, helptext=helptext, regex=regex, lang=lang).save()
except:
logger.debug(cols)
logger.exception('Error when handling the column')
raise
def impXlsx( self, f ):
'''
Import xlsx file.
'''
from openpyxl import load_workbook
wb=load_workbook(filename=f)
ws=wb.active
logger.info(' Importing worksheet: {0}!{1}'.format(f, ws.title))
cnt=0
with transaction.atomic():
for r in range(1, ws.max_row+1):
cols=list()
name=ws.cell(row=r, column=1).value
if isinstance(name, str): name=name.strip()
if not name: continue #Skip the row when it has no pref.name
if r==1 and (name.upper()=='ID' or name.upper()=='NAME' or name.upper()=='ID/Name'): continue #Skip the first row if header row
cols.append(name) #Name/ID
cols.append(ws.cell(row=r, column=2).value) #Value
cols.append(ws.cell(row=r, column=3).value) #Parent
cols.append(ws.cell(row=r, column=4).value) #Owner
cols.append(ws.cell(row=r, column=5).value) #Reserved
cols.append(ws.cell(row=r, column=6).value) #Tipe
cols.append(ws.cell(row=r, column=7).value) #encrypted
self.improw( cols, r )
cnt+=1
logger.info(' Imported {0} row(s)'.format(cnt))
def impCsv( self, f ):
'''
Import the csv file.
'''
import csv
with transaction.atomic():
logger.info(' Importing csv: {0}'.format(f))
cnt=0
with open(f, 'r', encoding=self.kwargs['encoding']) as fp:
if self.kwargs['quotechar']:
rows=csv.reader(fp, delimiter=self.kwargs['separator'], quotechar=self.kwargs['quotechar'], quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)
else:
rows=csv.reader(fp, delimiter=self.kwargs['separator'], quoting=csv.QUOTE_NONE, skipinitialspace=True)
for row in rows:
if len(row)<1: continue #Skip the empty row
name=row[0].strip()
if not name: continue #Skip the row when it has no name
if cnt==0 and (name.upper()=='ID' or name.upper()=='NAME' or name.upper()=='ID/NAME'): continue #Skip the first row if header row
self.improw( row, cnt )
cnt+=1
logger.info(' Imported {0} row(s)'.format(cnt))
def impdir( self, d ):
if os.path.isdir(d):
logger.info('Importing directory: {0}'.format(d))
else:
logger.warning('This is not the directory: {0}'.format(d))
return
cnt=0
with transaction.atomic():
p=Preference.objects.pref('IMPORTED_PREFERENCES', returnValue=False)
p.helptext='<p>Sysetm use only! <strong>DO NOT MODIFY</strong> youself unless you understand the risk.</p>'
p.save()
for f in os.listdir(d):
if not (f.upper().endswith('.XLSX') or f.upper().endswith('.CSV')): continue #only support *.xlsx and *.csv
f=os.path.join(d, f)
try:
Preference.objects.get(name=f, parent=p)
if self.kwargs['force']: raise Preference.DoesNotExist
except Preference.DoesNotExist:
self.impfile( f )
cnt+=1
Preference(name=f, parent=p).save()
logger.debug('Imported {0} file(s)'.format(cnt))
def impfile( self, f ):
if not (os.path.isfile(f) and os.access(f, os.R_OK)):
logger.warning('The file is not readable: {0}'.format(f))
return
fn=f.lower()
if fn.endswith('.xlsx'):
self.impXlsx(f)
elif fn.endswith('.csv'):
self.impCsv(f)
else:
logger.info('Unsupported file: {0}'.format(f))
def imppath( self, p, parent=None):
name, seq=self.__get_name__(os.path.basename(p))
if os.path.isdir(p):
try:
pref=self.__get_pref__(name=name)
if pref.count()<1: raise Preference.DoesNotExist
pref=pref[0]
except Preference.DoesNotExist:
pref=Preference(name=name, parent=parent)
pref.tipe=AbstractPreference.TYPE_FILEPATH
pref.sequence=seq
pref.save()
for f in os.listdir(p):
path=os.path.join(p, f)
self.imppath(path, pref)
#Handling the ordering after import all the childs
ord=1
for c in pref.childs:
c.sequence=ord
c.save()
ord+=1
else:
try:
pref=self.__get_pref__(name=name)
if pref.count()<1: raise Preference.DoesNotExist
pref=pref[0]
except Preference.DoesNotExist:
pref=Preference(name=name, parent=parent)
pref.pathValue=p if os.path.isabs(p) else os.path.abspath(p)
pref.tipe=AbstractPreference.TYPE_FILEPATH
pref.sequence=seq
pref.save()
def imp(self):
disableOrder=getattr(settings, 'DISABLE_REORDER', False)
setattr(settings, 'DISABLE_REORDER', True) #Disable the re-ordering features during importing
try:
f=self.kwargs['file']
if self.kwargs['filepath']:
self.imppath(f)
elif os.path.isdir(f):
self.impdir(f)
elif os.path.isfile(f):
self.impfile(f)
finally:
setattr(settings, 'DISABLE_REORDER', disableOrder) #Resume the re-ordering features after importing
def gensecret(self):
from webframe.models import AbstractPreference
key=AbstractPreference.__getSecret__()
logger.warning(lm('Your secret is: {0}', key))
def gendoc(self):
from django.shortcuts import render
from django.template import loader, Template, Context
from webframe.providers import template_injection, fmt_injection
tmpl=getattr(self.kwargs, 'tmpl', 'webframe/prefDoc.html')
logger.warning(lm('Generating the documents according template: {0}', tmpl))
tmpl=loader.get_template(tmpl)
params=dict()
params.update(template_injection(None))
params.update(fmt_injection(None))
#params['target']=Preference.objects.filter(parent__isnull=True)
params['target']=self.__get_pref__()
params['TYPES']=Preference.TYPES
params['now']=getTime('now')
txt=tmpl.render(params)
output=self.kwargs.get('file')
if not output: output='prefsDoc.html'
logger.warning(lm('Generated! Outputing into: {0}', output))
with open(output, 'w') as f:
f.write(txt)
|
import hashlib
import base64
import datetime
import urllib2
import json
class TemplateSMS:
account_sid = ''
account_token = ''
app_id = ''
server_ip = ''
server_port = ''
soft_version = ''
timestamp = ''
def set_account(self, account_sid, token):
self.account_sid = account_sid
self.account_token = token
def __init__(self, ip, port, version):
self.server_ip = ip
self.server_port = port
self.soft_version = version
def set_app_id(self, app_id):
self.app_id = app_id
def send_template_sms(self, to, random, valid_min, temp_id):
now_date = datetime.datetime.now()
self.timestamp = now_date.strftime("%Y%m%d%H%M%S")
signature = self.account_sid + self.account_token + self.timestamp
sig = hashlib.md5()
sig.update(signature)
sig = sig.hexdigest().upper()
url = "https://" + self.server_ip + ":" + self.server_port + "/" + self.soft_version + "/Accounts/" + \
self.account_sid + "/SMS/TemplateSMS?sig=" + sig
src = self.account_sid + ":" + self.timestamp
req = urllib2.Request(url)
b = '["%s","%s"]' % (random, valid_min)
body = '''{"to": "%s", "datas": %s, "templateId": "%s", "appId": "%s"}''' % (to, b, temp_id, self.app_id)
req.add_data(body)
auth = base64.encodestring(src).strip()
req.add_header("Authorization", auth)
req.add_header("Accept", 'application/json;')
req.add_header("Content-Type", "application/json;charset=utf-8;")
req.add_header("Host", "127.0.0.1")
req.add_header("content-length", len(body))
try:
res = urllib2.urlopen(req)
data = res.read()
res.close()
locations = json.loads(data)
return locations
except:
return {'172001': 'network error'}
def query_account_info(self):
now_date = datetime.datetime.now()
self.timestamp = now_date.strftime("%Y%m%d%H%M%S")
signature = self.account_sid + self.account_token + self.timestamp
sig = hashlib.md5()
sig.update(signature)
sig = sig.hexdigest().upper()
url = "https://" + self.server_ip + ":" + self.server_port + "/" + self.soft_version + "/Accounts/" + \
self.account_sid + "/AccountInfo?sig=" + sig
src = self.account_sid + ":" + self.timestamp
auth = base64.encodestring(src).strip()
req = urllib2.Request(url)
req.add_header("Accept", "application/json")
req.add_header("Content-Type", "application/jsoncharset=utf-8")
req.add_header("Authorization", auth)
try:
res = urllib2.urlopen(req)
data = res.read()
res.close()
locations = json.loads(data)
return locations
except:
return {"statusCode": '172001'}
|
from __future__ import print_function, division, absolute_import
import os
import argparse
import random
import numpy as np
import datetime
import os.path as osp
import sys
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(1, osp.join(cur_dir, '.'))
from sklearn.datasets import load_svmlight_file
from scipy.sparse import csr_matrix
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import tensorflow as tf
from tf_utils import pinv_naive, pinv
path_train = osp.join(cur_dir, "../a9a/a9a")
path_test = osp.join(cur_dir, "../a9a/a9a.t")
MAX_ITER = 100
np_dtype = np.float32
tf_dtype = tf.float32
manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
np.random.seed(manualSeed)
X_train, y_train = load_svmlight_file(path_train, n_features=123, dtype=np_dtype)
X_test, y_test = load_svmlight_file(path_test, n_features=123, dtype=np_dtype)
N_train = X_train.shape[0]
N_test = X_test.shape[0]
X_train = np.hstack((np.ones((N_train, 1)), X_train.toarray())).astype(np_dtype)
X_test = np.hstack((np.ones((N_test, 1)), X_test.toarray())).astype(np_dtype)
y_train = y_train.reshape((N_train, 1))
y_test = y_test.reshape((N_test, 1))
y_train = np.where(y_train == -1, 0, 1)
y_test = np.where(y_test == -1, 0, 1)
def neg_log_likelihood(w, X, y, L2_param=None):
"""
w: dx1
X: Nxd
y: Nx1
L2_param: \lambda>0, will introduce -\lambda/2 ||w||_2^2
"""
# print(type(X), X.dtype)
res = tf.matmul(tf.matmul(tf.transpose(w), tf.transpose(X)), y.astype(np_dtype)) - \
tf.reduce_sum(tf.math.log(1 + tf.exp(tf.matmul(X, w))))
if L2_param != None and L2_param > 0:
res += -0.5 * L2_param * tf.matmul(tf.transpose(w), w)
return -res[0][0]
def prob(X, w):
"""
X: Nxd
w: dx1
---
prob: N x num_classes(2)"""
y = tf.constant(np.array([0.0, 1.0]), dtype=tf.float32)
prob = tf.exp(tf.matmul(X, w) * y) / (1 + tf.exp(tf.matmul(X, w)))
return prob
def compute_acc(X, y, w):
p = prob(X, w)
y_pred = tf.cast(tf.argmax(p, axis=1), tf.float32)
y = tf.cast(tf.squeeze(y), tf.float32)
acc = tf.reduce_mean(tf.cast(tf.equal(y, y_pred), tf.float32))
return acc
def update(w_old, X, y, L2_param=0):
"""
w_new = w_old - w_update
w_update = (X'RX+lambda*I)^(-1) (X'(mu-y) + lambda*w_old)
lambda is L2_param
w_old: dx1
X: Nxd
y: Nx1
---
w_update: dx1
"""
d = X.shape[1]
mu = tf.sigmoid(tf.matmul(X, w_old)) # Nx1
R_flat = mu * (1 - mu) # element-wise, Nx1
L2_reg_term = L2_param * tf.eye(d)
XRX = tf.matmul(tf.transpose(X), R_flat * X) + L2_reg_term # dxd
# np.save('XRX_tf.npy', XRX.numpy())
# calculate pseudo inverse via SVD
# method 1
# slightly better than tfp.math.pinv when L2_param=0
XRX_pinv = pinv_naive(XRX)
# method 2
# XRX_pinv = pinv(XRX)
# w = w - (X^T R X)^(-1) X^T (mu-y)
# w_new = tf.assign(w_old, w_old - tf.matmul(tf.matmul(XRX_pinv, tf.transpose(X)), mu - y))
y = tf.cast(y, tf_dtype)
w_update = tf.matmul(XRX_pinv, tf.matmul(tf.transpose(X), mu - y) + L2_param * w_old)
return w_update
def optimize(w_old, w_update):
"""custom update op, instead of using SGD variants"""
return w_old.assign(w_old - w_update)
def train_IRLS(X_train, y_train, X_test=None, y_test=None, L2_param=0, max_iter=MAX_ITER):
"""train Logistic Regression via IRLS algorithm
X: Nxd
y: Nx1
---
"""
N, d = X_train.shape
w = tf.Variable(0.01 * tf.ones((d, 1), dtype=tf.float32), name="w")
current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
summary_writer = tf.summary.create_file_writer(f"./logs/{current_time}")
print("start training...")
print("L2 param(lambda): {}".format(L2_param))
i = 0
# iteration
while i <= max_iter:
print("iter: {}".format(i))
# print('\t neg log likelihood: {}'.format(sess.run(neg_L, feed_dict=train_feed_dict)))
neg_L = neg_log_likelihood(w, X_train, y_train, L2_param)
print("\t neg log likelihood: {}".format(neg_L))
train_acc = compute_acc(X_train, y_train, w)
with summary_writer.as_default():
tf.summary.scalar("train_acc", train_acc, step=i)
tf.summary.scalar("train_neg_L", neg_L, step=i)
test_acc = compute_acc(X_test, y_test, w)
with summary_writer.as_default():
tf.summary.scalar("test_acc", test_acc, step=i)
print("\t train acc: {}, test acc: {}".format(train_acc, test_acc))
L2_norm_w = np.linalg.norm(w.numpy())
print("\t L2 norm of w: {}".format(L2_norm_w))
if i > 0:
diff_w = np.linalg.norm(w_update.numpy())
print("\t diff of w_old and w: {}".format(diff_w))
if diff_w < 1e-2:
break
w_update = update(w, X_train, y_train, L2_param)
w = optimize(w, w_update)
i += 1
print("training done.")
if __name__ == "__main__":
# test_acc should be about 0.85
lambda_ = 20 # 0
train_IRLS(X_train, y_train, X_test, y_test, L2_param=lambda_, max_iter=100)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train.reshape(N_train,))
y_pred_train = classifier.predict(X_train)
train_acc = np.sum(y_train.reshape(N_train,) == y_pred_train)/N_train
print('train_acc: {}'.format(train_acc))
y_pred_test = classifier.predict(X_test)
test_acc = np.sum(y_test.reshape(N_test,) == y_pred_test)/N_test
print('test acc: {}'.format(test_acc))
|
from google.cloud import aiplatform_v1
def sample_delete_study():
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteStudyRequest(
name="name_value",
)
# Make the request
client.delete_study(request=request)
|
import sys
import logging
import hexdump
import vstruct
import vivisect
import envi
import envi.archs.i386 as x86
import envi.archs.amd64 as x64
import sdb
from sdb import SDB_TAGS
from sdb_dump_common import SdbIndex
from sdb_dump_common import item_get_child
from sdb_dump_common import item_get_children
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("sdb_dump_patch")
g_logger.setLevel(logging.DEBUG)
ARCH_32 = "32"
ARCH_64 = "64"
def disassemble(buf, base=0, arch=ARCH_32):
if arch == ARCH_32:
d = x86.i386Disasm()
elif arch == ARCH_64:
d = x64.Amd64Disasm()
else:
raise RuntimeError('unknown arch: ' + str(arch))
offset = 0
while True:
if offset >= len(buf):
break
o = d.disasm(buf, offset, base)
yield "0x%x: %s" % (base + offset, str(o))
offset += o.size
class GreedyVArray(vstruct.VArray):
def __init__(self, C):
vstruct.VArray.__init__(self)
self._C = C
def vsParse(self, bytez, offset=0, fast=False):
soffset = offset
while offset < len(bytez):
c = self._C()
try:
offset = c.vsParse(bytez, offset=offset, fast=False)
except:
break
self.vsAddElement(c)
return offset
def vsParseFd(self, fd):
raise NotImplementedError()
def dump_patch(bits, arch=ARCH_32):
ps = GreedyVArray(sdb.PATCHBITS)
ps.vsParse(bits.value.value)
for i, _ in ps:
p = ps[int(i)]
print(" opcode: %s" % str(p["opcode"]))
print(" module name: %s" % p.module_name)
print(" rva: 0x%08x" % p.rva)
print(" unk: 0x%08x" % p.unknown)
print(" payload:")
print(hexdump.hexdump(str(p.pattern), result="return"))
print(" disassembly:")
for l in disassemble(str(p.pattern), p.rva, arch=arch):
print(" " + l)
print("")
def _main(sdb_path, patch_name):
from sdb import SDB
with open(sdb_path, "rb") as f:
buf = f.read()
g_logger.debug("loading database")
s = SDB()
s.vsParse(bytearray(buf))
g_logger.debug("done loading database")
index = SdbIndex()
g_logger.debug("indexing strings")
index.index_sdb(s)
g_logger.debug("done indexing strings")
try:
library = item_get_child(s.database_root, SDB_TAGS.TAG_LIBRARY)
except KeyError:
pass
else:
for shim_ref in item_get_children(library, SDB_TAGS.TAG_SHIM_REF):
patch = item_get_child(shim_ref, SDB_TAGS.TAG_PATCH)
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name != patch_name:
continue
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
try:
patch = item_get_child(s.database_root, SDB_TAGS.TAG_PATCH)
except KeyError:
pass
else:
name_ref = item_get_child(patch, SDB_TAGS.TAG_NAME)
name = index.get_string(name_ref.value.reference)
if name == patch_name:
bits = item_get_child(patch, SDB_TAGS.TAG_PATCH_BITS)
dump_patch(bits, arch=ARCH_32)
def main():
import sys
return sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
|
"""Keystone's pep8 extensions.
In order to make the review process faster and easier for core devs we are
adding some Keystone specific pep8 checks. This will catch common errors
so that core devs don't have to.
There are two types of pep8 extensions. One is a function that takes either
a physical or logical line. The physical or logical line is the first param
in the function definition and can be followed by other parameters supported
by pep8. The second type is a class that parses AST trees. For more info
please see pep8.py.
"""
import ast
import re
import six
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
class CheckForMutableDefaultArgs(BaseASTChecker):
"""Checks for the use of mutable objects as function/method defaults.
We are only checking for list and dict literals at this time. This means
that a developer could specify an instance of their own and cause a bug.
The fix for this is probably more work than it's worth because it will
get caught during code review.
"""
CHECK_DESC = 'K001 Using mutable as a function/method default'
MUTABLES = (
ast.List, ast.ListComp,
ast.Dict, ast.DictComp,
ast.Set, ast.SetComp,
ast.Call)
def visit_FunctionDef(self, node):
for arg in node.args.defaults:
if isinstance(arg, self.MUTABLES):
self.add_error(arg)
super(CheckForMutableDefaultArgs, self).generic_visit(node)
def block_comments_begin_with_a_space(physical_line, line_number):
"""There should be a space after the # of block comments.
There is already a check in pep8 that enforces this rule for
inline comments.
Okay: # this is a comment
Okay: #!/usr/bin/python
Okay: # this is a comment
K002: #this is a comment
"""
MESSAGE = "K002 block comments should start with '# '"
# shebangs are OK
if line_number == 1 and physical_line.startswith('#!'):
return
text = physical_line.strip()
if text.startswith('#'): # look for block comments
if len(text) > 1 and not text[1].isspace():
return physical_line.index('#'), MESSAGE
class CheckForAssertingNoneEquality(BaseASTChecker):
"""Ensures that code does not use a None with assert(Not*)Equal."""
CHECK_DESC_IS = ('K003 Use self.assertIsNone(...) when comparing '
'against None')
CHECK_DESC_ISNOT = ('K004 Use assertIsNotNone(...) when comparing '
' against None')
def visit_Call(self, node):
# NOTE(dstanek): I wrote this in a verbose way to make it easier to
# read for those that have little experience with Python's AST.
if isinstance(node.func, ast.Attribute):
if node.func.attr == 'assertEqual':
for arg in node.args:
if isinstance(arg, ast.Name) and arg.id == 'None':
self.add_error(node, message=self.CHECK_DESC_IS)
elif node.func.attr == 'assertNotEqual':
for arg in node.args:
if isinstance(arg, ast.Name) and arg.id == 'None':
self.add_error(node, message=self.CHECK_DESC_ISNOT)
super(CheckForAssertingNoneEquality, self).generic_visit(node)
class CheckForLoggingIssues(BaseASTChecker):
DEBUG_CHECK_DESC = 'K005 Using translated string in debug logging'
NONDEBUG_CHECK_DESC = 'K006 Not using translating helper for logging'
EXCESS_HELPER_CHECK_DESC = 'K007 Using hints when _ is necessary'
LOG_MODULES = ('logging', 'keystone.openstack.common.log')
I18N_MODULES = (
'keystone.i18n._',
'keystone.i18n._LI',
'keystone.i18n._LW',
'keystone.i18n._LE',
'keystone.i18n._LC',
)
TRANS_HELPER_MAP = {
'debug': None,
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'error': '_LE',
'exception': '_LE',
'critical': '_LC',
}
def __init__(self, tree, filename):
super(CheckForLoggingIssues, self).__init__(tree, filename)
self.logger_names = []
self.logger_module_names = []
self.i18n_names = {}
# NOTE(dstanek): this kinda accounts for scopes when talking
# about only leaf node in the graph
self.assignments = {}
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
item._parent = node
self.visit(item)
elif isinstance(value, ast.AST):
value._parent = node
self.visit(value)
def _filter_imports(self, module_name, alias):
"""Keeps lists of logging and i18n imports
"""
if module_name in self.LOG_MODULES:
self.logger_module_names.append(alias.asname or alias.name)
elif module_name in self.I18N_MODULES:
self.i18n_names[alias.asname or alias.name] = alias.name
def visit_Import(self, node):
for alias in node.names:
self._filter_imports(alias.name, alias)
return super(CheckForLoggingIssues, self).generic_visit(node)
def visit_ImportFrom(self, node):
for alias in node.names:
full_name = '%s.%s' % (node.module, alias.name)
self._filter_imports(full_name, alias)
return super(CheckForLoggingIssues, self).generic_visit(node)
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Assign(self, node):
"""Look for 'LOG = logging.getLogger'
This handles the simple case:
name = [logging_module].getLogger(...)
- or -
name = [i18n_name](...)
And some much more comple ones:
name = [i18n_name](...) % X
- or -
self.name = [i18n_name](...) % X
"""
attr_node_types = (ast.Name, ast.Attribute)
if (len(node.targets) != 1
or not isinstance(node.targets[0], attr_node_types)):
# say no to: "x, y = ..."
return super(CheckForLoggingIssues, self).generic_visit(node)
target_name = self._find_name(node.targets[0])
if (isinstance(node.value, ast.BinOp) and
isinstance(node.value.op, ast.Mod)):
if (isinstance(node.value.left, ast.Call) and
isinstance(node.value.left.func, ast.Name) and
node.value.left.func.id in self.i18n_names):
# NOTE(dstanek): this is done to match cases like:
# `msg = _('something %s') % x`
node = ast.Assign(value=node.value.left)
if not isinstance(node.value, ast.Call):
# node.value must be a call to getLogger
self.assignments.pop(target_name, None)
return super(CheckForLoggingIssues, self).generic_visit(node)
# is this a call to an i18n function?
if (isinstance(node.value.func, ast.Name)
and node.value.func.id in self.i18n_names):
self.assignments[target_name] = node.value.func.id
return super(CheckForLoggingIssues, self).generic_visit(node)
if (not isinstance(node.value.func, ast.Attribute)
or not isinstance(node.value.func.value, attr_node_types)):
# function must be an attribute on an object like
# logging.getLogger
return super(CheckForLoggingIssues, self).generic_visit(node)
object_name = self._find_name(node.value.func.value)
func_name = node.value.func.attr
if (object_name in self.logger_module_names
and func_name == 'getLogger'):
self.logger_names.append(target_name)
return super(CheckForLoggingIssues, self).generic_visit(node)
def visit_Call(self, node):
"""Look for the 'LOG.*' calls.
"""
# obj.method
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckForLoggingIssues, self).generic_visit(node)
# must be a logger instance and one of the support logging methods
if (obj_name not in self.logger_names
or method_name not in self.TRANS_HELPER_MAP):
return super(CheckForLoggingIssues, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckForLoggingIssues, self).generic_visit(node)
if method_name == 'debug':
self._process_debug(node)
elif method_name in self.TRANS_HELPER_MAP:
self._process_non_debug(node, method_name)
return super(CheckForLoggingIssues, self).generic_visit(node)
def _process_debug(self, node):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if (isinstance(msg, ast.Call)
and isinstance(msg.func, ast.Name)
and msg.func.id in self.i18n_names):
self.add_error(msg, message=self.DEBUG_CHECK_DESC)
# if the first arg is a reference to a i18n call
elif (isinstance(msg, ast.Name)
and msg.id in self.assignments
and not self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.DEBUG_CHECK_DESC)
def _process_non_debug(self, node, method_name):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if isinstance(msg, ast.Call):
try:
func_name = msg.func.id
except AttributeError:
# in the case of logging only an exception, the msg function
# will not have an id associated with it, for instance:
# LOG.warning(six.text_type(e))
return
# the function name is the correct translation helper
# for the logging method
if func_name == self.TRANS_HELPER_MAP[method_name]:
return
# the function name is an alias for the correct translation
# helper for the loggine method
if (self.i18n_names[func_name] ==
self.TRANS_HELPER_MAP[method_name]):
return
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
# if the first arg is not a reference to the correct i18n hint
elif isinstance(msg, ast.Name):
# FIXME(dstanek): to make sure more robust we should be checking
# all names passed into a logging method. we can't right now
# because:
# 1. We have code like this that we'll fix when dealing with the %:
# msg = _('....') % {}
# LOG.warn(msg)
# 2. We also do LOG.exception(e) in several places. I'm not sure
# exactly what we should be doing about that.
if msg.id not in self.assignments:
return
helper_method_name = self.TRANS_HELPER_MAP[method_name]
if (self.assignments[msg.id] != helper_method_name
and not self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
elif (self.assignments[msg.id] == helper_method_name
and self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC)
def _is_raised_later(self, node, name):
def find_peers(node):
node_for_line = node._parent
for _field, value in ast.iter_fields(node._parent._parent):
if isinstance(value, list) and node_for_line in value:
return value[value.index(node_for_line) + 1:]
continue
return []
peers = find_peers(node)
for peer in peers:
if isinstance(peer, ast.Raise):
if (isinstance(peer.type, ast.Call) and
len(peer.type.args) > 0 and
isinstance(peer.type.args[0], ast.Name) and
name in (a.id for a in peer.type.args)):
return True
else:
return False
elif isinstance(peer, ast.Assign):
if name in (t.id for t in peer.targets):
return False
def check_oslo_namespace_imports(logical_line, blank_before, filename):
oslo_namespace_imports = re.compile(
r"(((from)|(import))\s+oslo\.)|(from\s+oslo\s+import\s+)")
if re.match(oslo_namespace_imports, logical_line):
msg = ("K333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def factory(register):
register(CheckForMutableDefaultArgs)
register(block_comments_begin_with_a_space)
register(CheckForAssertingNoneEquality)
register(CheckForLoggingIssues)
register(check_oslo_namespace_imports)
|
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Chassis100ChassisActions(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Chassis100ChassisActions - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'oem': 'object',
'chassis_reset': 'Chassis100Reset'
}
self.attribute_map = {
'oem': 'Oem',
'chassis_reset': '#Chassis.Reset'
}
self._oem = None
self._chassis_reset = None
@property
def oem(self):
"""
Gets the oem of this Chassis100ChassisActions.
:return: The oem of this Chassis100ChassisActions.
:rtype: object
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Chassis100ChassisActions.
:param oem: The oem of this Chassis100ChassisActions.
:type: object
"""
self._oem = oem
@property
def chassis_reset(self):
"""
Gets the chassis_reset of this Chassis100ChassisActions.
:return: The chassis_reset of this Chassis100ChassisActions.
:rtype: Chassis100Reset
"""
return self._chassis_reset
@chassis_reset.setter
def chassis_reset(self, chassis_reset):
"""
Sets the chassis_reset of this Chassis100ChassisActions.
:param chassis_reset: The chassis_reset of this Chassis100ChassisActions.
:type: Chassis100Reset
"""
self._chassis_reset = chassis_reset
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
"""
lunaport.dao.line
~~~~~~~~~~~~~~~~~
Storage interaction logic for line resource.
"""
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from sqlalchemy import text, exc
from ..wsgi import app, db
from .. domain.line import LineBuilder, LineAdaptor
from exceptions import StorageError
class Filter(object):
params_allowed = {
'name': (
"AND name LIKE '%:name%'"),
}
cast_to_int = []
def __init__(self, **kw):
self.rule = []
self.q_params = {}
for p, v in kw.iteritems():
if p not in self.params_allowed.keys():
continue
elif isinstance(v, (unicode, basestring)):
self.rule.append(self.params_allowed[p][0])
self.q_params.update({p: v})
else:
raise StorageError('Wrong *{}* param type.'.format(p))
def cmpl_query(self):
sql_text = '\n' + ' '.join(self.rule)
return sql_text, self.q_params
class Dao(object):
"""Interface for line storage"""
@classmethod
def insert(cls, ammo):
raise NotImplemented()
@classmethod
def get_single(cls, **kw):
raise NotImplemented()
@classmethod
def get_many(cls, **kw):
raise NotImplemented()
class RDBMS(Dao):
"""PostgreSQL wrapper, implementing line.dao interface"""
per_page_default = app.config.get('LINE_PER_PAGE_DEFAULT') or 10
per_page_max = app.config.get('LINE_PER_PAGE_MAX') or 100
select_join_part = '''
SELECT l.*,
dc.name AS dc_name
FROM line l,
dc dc
WHERE l.dc_id = dc.id'''
@staticmethod
def rdbms_call(q_text, q_params):
return db.engine.connect().execute(text(q_text), **q_params)
@classmethod
def insert(cls, line):
kw = LineAdaptor.to_dict(line)
kw['dc_name'] = kw['dc']['name']
pp(kw)
def query():
return cls.rdbms_call('''
INSERT INTO line
(
id,
name,
dc_id
)
VALUES (
:id,
:name,
(SELECT id FROM dc WHERE name = :dc_name)
)
returning id''', kw)
err_duplicate = 'line:{} allready exists'.format(kw.get('name'))
try:
pk_id = [r for r in query()].pop()[0]
except exc.IntegrityError as e:
if 'unique constraint "line_pkey"' in str(e):
raise StorageError(err_duplicate)
raise StorageError('Some kind of IntegrityError')
return pk_id
@classmethod
def get_single(cls, **kw):
if kw.get('line_id'):
query_params = {
'line_id': kw.get('line_id'),
}
rv = cls.rdbms_call(' '.join([cls.select_join_part, 'AND l.id = :line_id']), query_params)
row = rv.first()
if not row:
return None
t_kw = dict(zip(rv.keys(), row))
return LineBuilder.from_row(**t_kw)
@classmethod
def get_many(cls, **kw):
"""pagination"""
pagination_part = '\nORDER BY id DESC\nLIMIT :limit OFFSET :offset'
param_per_page = kw.get('per_page')
if param_per_page and (param_per_page <= cls.per_page_max):
per_page = param_per_page
else:
per_page = cls.per_page_default
page_num = kw.get('page')
# page number starts from 1, page 0 and 1 mean the same -
# first slice from data set.
if page_num and isinstance(page_num, int) and (page_num >= 2):
offset = (page_num - 1) * per_page
next_page = page_num + 1
prev_page = page_num - 1
else:
offset = 0
next_page = 2
prev_page = None
query_params = {
'limit': per_page,
'offset': offset,
}
"""filtering"""
f = Filter(**kw)
filter_part, q_params_up = f.cmpl_query()
query_params.update(q_params_up)
rv = cls.rdbms_call(
''.join([cls.select_join_part, filter_part, pagination_part]),
query_params)
rows = rv.fetchall()
if len(rows) == 0:
return None, None, None, None
elif len(rows) < per_page: # last chunk of data
next_page = None
def create_dc(row):
t_kw = dict(zip(rv.keys(), row))
return LineBuilder.from_row(**t_kw)
return map(create_dc, rows), per_page, next_page, prev_page
|
import os
import uuid
import pkg_resources
from pifpaf import drivers
class CephDriver(drivers.Driver):
DEFAULT_PORT = 6790
def __init__(self, port=DEFAULT_PORT,
**kwargs):
"""Create a new Ceph cluster."""
super(CephDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for Ceph Monitor"},
]
def _setUp(self):
super(CephDriver, self)._setUp()
self._ensure_xattr_support()
fsid = str(uuid.uuid4())
conffile = os.path.join(self.tempdir, "ceph.conf")
mondir = os.path.join(self.tempdir, "mon", "ceph-a")
osddir = os.path.join(self.tempdir, "osd", "ceph-0")
os.makedirs(mondir)
os.makedirs(osddir)
_, version = self._exec(["ceph", "--version"], stdout=True)
version = version.decode("ascii").split()[2]
version = pkg_resources.parse_version(version)
if version < pkg_resources.parse_version("12.0.0"):
extra = """
mon_osd_nearfull_ratio = 1
mon_osd_full_ratio = 1
osd_failsafe_nearfull_ratio = 1
osd_failsafe_full_ratio = 1
"""
else:
extra = """
mon_allow_pool_delete = true
"""
# FIXME(sileht): check availible space on /dev/shm
# if os.path.exists("/dev/shm") and os.access('/dev/shm', os.W_OK):
# journal_path = "/dev/shm/$cluster-$id-journal"
# else:
journal_path = "%s/osd/$cluster-$id/journal" % self.tempdir
with open(conffile, "w") as f:
f.write("""[global]
fsid = %(fsid)s
auth cluster required = none
auth service required = none
auth client required = none
osd pool default size = 1
osd pool default min size = 1
osd crush chooseleaf type = 0
run dir = %(tempdir)s
pid file = %(tempdir)s/$type.$id.pid
admin socket = %(tempdir)s/$cluster-$name.asok
mon data = %(tempdir)s/mon/$cluster-$id
osd data = %(tempdir)s/osd/$cluster-$id
osd journal = %(journal_path)s
log file = %(tempdir)s/$cluster-$name.log
mon cluster log file = %(tempdir)s/$cluster.log
filestore xattr use omap = True
osd max object name len = 256
osd max object namespace len = 64
osd op threads = 10
filestore max sync interval = 10001
filestore min sync interval = 10000
%(extra)s
journal_aio = false
journal_dio = false
journal zero on create = false
journal block align = false
setuser match path = %(tempdir)s/$type/$cluster-$id
[mon.a]
host = localhost
mon addr = 127.0.0.1:%(port)d
""" % dict(fsid=fsid, tempdir=self.tempdir, port=self.port,
journal_path=journal_path, extra=extra)) # noqa
ceph_opts = ["ceph", "-c", conffile]
mon_opts = ["ceph-mon", "-c", conffile, "--id", "a", "-d"]
osd_opts = ["ceph-osd", "-c", conffile, "--id", "0", "-d",
"-m", "127.0.0.1:%d" % self.port]
# Create and start monitor
self._exec(mon_opts + ["--mkfs"])
self._touch(os.path.join(mondir, "done"))
mon, _ = self._exec(
mon_opts,
wait_for_line=r"mon.a@0\(leader\).mds e1 print_map")
# Create and start OSD
self._exec(ceph_opts + ["osd", "create"])
self._exec(ceph_opts + ["osd", "crush", "add", "osd.0", "1",
"root=default"])
self._exec(osd_opts + ["--mkfs", "--mkjournal"])
if version < pkg_resources.parse_version("0.94.0"):
wait_for_line = "journal close"
else:
wait_for_line = "done with init"
osd, _ = self._exec(osd_opts, wait_for_line=wait_for_line)
if version >= pkg_resources.parse_version("12.0.0"):
self._exec(ceph_opts + ["osd", "set-full-ratio", "0.95"])
self._exec(ceph_opts + ["osd", "set-backfillfull-ratio", "0.95"])
self._exec(ceph_opts + ["osd", "set-nearfull-ratio", "0.95"])
# Wait it's ready
out = b""
while b"HEALTH_OK" not in out:
ceph, out = self._exec(ceph_opts + ["health"], stdout=True)
if b"HEALTH_ERR" in out:
raise RuntimeError("Fail to deploy ceph")
self.putenv("CEPH_CONF", conffile, True)
self.putenv("CEPH_CONF", conffile)
self.putenv("URL", "ceph://localhost:%d" % self.port)
|
import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
"""
Tests for the realizer with arbitrary reordering
enabled.
"""
def test_realize_output_in_order(self):
"""
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result)
def test_realize_output_out_of_order(self):
"""
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word2 word1 and word3 "
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
|
"""Exposes a RESTful interface ."""
import uuid
import empower_core.apimanager.apimanager as apimanager
class AlertsHandler(apimanager.APIHandler):
"""Alerts handler"""
URLS = [r"/api/v1/alerts/?",
r"/api/v1/alerts/([a-zA-Z0-9-]*)/?"]
@apimanager.validate(min_args=0, max_args=1)
def get(self, *args, **kwargs):
"""Lists all the alerts.
Args:
[0], the alert id (optional)
Example URLs:
GET /api/v1/alerts
GET /api/v1/alerts/52313ecb-9d00-4b7d-b873-b55d3d9ada26
"""
return self.service.alerts \
if not args else self.service.alerts[uuid.UUID(args[0])]
@apimanager.validate(returncode=201, min_args=0, max_args=1)
def post(self, *args, **kwargs):
"""Create a new alert.
Args:
[0], the alert id (optional)
Request:
version: protocol version (1.0)
alert: the alert
"""
alert_id = uuid.UUID(args[0]) if args else uuid.uuid4()
if 'alert' in kwargs:
alert = self.service.create(uuid=alert_id, alert=kwargs['alert'])
else:
alert = self.service.create(uuid=alert_id)
self.set_header("Location", "/api/v1/alerts/%s" % alert.uuid)
@apimanager.validate(returncode=204, min_args=0, max_args=1)
def delete(self, *args, **kwargs):
"""Delete one or all alerts.
Args:
[0], the alert id (optional)
Example URLs:
DELETE /api/v1/alerts
DELETE /api/v1/alerts/52313ecb-9d00-4b7d-b873-b55d3d9ada26
"""
if args:
self.service.remove(uuid.UUID(args[0]))
else:
self.service.remove_all()
|
import warnings; warnings.filterwarnings(action='ignore', category=DeprecationWarning, module="sgmllib")
import threading
import time
import os
import socket, urlparse, urllib, urllib2
import base64
import htmlentitydefs
import sgmllib
import re
import xml.dom.minidom
import StringIO
import bisect
import new
import api
import feed
import oauth
import json
import locale
from feed import feedparser
from soup import BeautifulSoup
try:
# Import persistent Cache.
# If this module is used separately, a dict is used (i.e. for this Python session only).
from cache import Cache, cache, TMP
except:
cache = {}
try:
from imap import Mail, MailFolder, Message, GMAIL
from imap import MailError, MailServiceError, MailLoginError, MailNotLoggedIn
from imap import FROM, SUBJECT, DATE, BODY, ATTACHMENTS
except:
pass
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
def decode_utf8(string):
""" Returns the given string as a unicode string (if possible).
"""
if isinstance(string, str):
for encoding in (("utf-8",), ("windows-1252",), ("utf-8", "ignore")):
try:
return string.decode(*encoding)
except:
pass
return string
return unicode(string)
def encode_utf8(string):
""" Returns the given string as a Python byte string (if possible).
"""
if isinstance(string, unicode):
try:
return string.encode("utf-8")
except:
return string
return str(string)
u = decode_utf8
s = encode_utf8
bytestring = s
class AsynchronousRequest:
def __init__(self, function, *args, **kwargs):
""" Executes the function in the background.
AsynchronousRequest.done is False as long as it is busy, but the program will not halt in the meantime.
AsynchronousRequest.value contains the function's return value once done.
AsynchronousRequest.error contains the Exception raised by an erronous function.
For example, this is useful for running live web requests while keeping an animation running.
For good reasons, there is no way to interrupt a background process (i.e. Python thread).
You are responsible for ensuring that the given function doesn't hang.
"""
self._response = None # The return value of the given function.
self._error = None # The exception (if any) raised by the function.
self._time = time.time()
self._function = function
self._thread = threading.Thread(target=self._fetch, args=(function,)+args, kwargs=kwargs)
self._thread.start()
def _fetch(self, function, *args, **kwargs):
""" Executes the function and sets AsynchronousRequest.response.
"""
try:
self._response = function(*args, **kwargs)
except Exception, e:
self._error = e
def now(self):
""" Waits for the function to finish and yields its return value.
"""
self._thread.join(); return self._response
@property
def elapsed(self):
return time.time() - self._time
@property
def done(self):
return not self._thread.isAlive()
@property
def value(self):
return self._response
@property
def error(self):
return self._error
def __repr__(self):
return "AsynchronousRequest(function='%s')" % self._function.__name__
def asynchronous(function, *args, **kwargs):
""" Returns an AsynchronousRequest object for the given function.
"""
return AsynchronousRequest(function, *args, **kwargs)
send = asynchronous
USER_AGENT = "Pattern/2.3 +http://www.clips.ua.ac.be/pages/pattern"
REFERRER = "http://www.clips.ua.ac.be/pages/pattern"
MOZILLA = "Mozilla/5.0"
GET = "get" # Data is encoded in the URL.
POST = "post" # Data is encoded in the message body.
PROTOCOL, USERNAME, PASSWORD, DOMAIN, PORT, PATH, PAGE, QUERY, ANCHOR = \
"protocol", "username", "password", "domain", "port", "path", "page", "query", "anchor"
MIMETYPE_WEBPAGE = ["text/html"]
MIMETYPE_STYLESHEET = ["text/css"]
MIMETYPE_PLAINTEXT = ["text/plain"]
MIMETYPE_PDF = ["application/pdf"]
MIMETYPE_NEWSFEED = ["application/rss+xml", "application/atom+xml"]
MIMETYPE_IMAGE = ["image/gif", "image/jpeg", "image/png", "image/tiff"]
MIMETYPE_AUDIO = ["audio/mpeg", "audio/mp4", "audio/x-aiff", "audio/x-wav"]
MIMETYPE_VIDEO = ["video/mpeg", "video/mp4", "video/quicktime"]
MIMETYPE_ARCHIVE = ["application/x-stuffit", "application/x-tar", "application/zip"]
MIMETYPE_SCRIPT = ["application/javascript", "application/ecmascript"]
def extension(filename):
""" Returns the extension in the given filename: "cat.jpg" => ".jpg".
"""
return os.path.splitext(filename)[1]
def urldecode(query):
""" Inverse operation of urllib.urlencode.
Returns a dictionary of (name, value)-items from a URL query string.
"""
def _format(s):
if s == "None":
return None
if s.isdigit():
return int(s)
try: return float(s)
except:
return s
query = [(kv.split("=")+[None])[:2] for kv in query.lstrip("?").split("&")]
query = [(urllib.unquote_plus(bytestring(k)), urllib.unquote_plus(bytestring(v))) for k, v in query]
query = [(u(k), u(v)) for k, v in query]
query = [(k, _format(v) or None) for k, v in query]
query = dict([(k,v) for k, v in query if k != ""])
return query
url_decode = urldecode
def proxy(host, protocol="https"):
""" Returns the value for the URL.open() proxy parameter.
- host: host address of the proxy server.
"""
return (host, protocol)
class URLError(Exception):
pass # URL contains errors (e.g. a missing t in htp://).
class URLTimeout(URLError):
pass # URL takes to long to load.
class HTTPError(URLError):
pass # URL causes an error on the contacted server.
class HTTP301Redirect(HTTPError):
pass # Too many redirects.
# The site may be trying to set a cookie and waiting for you to return it,
# or taking other measures to discern a browser from a script.
# For specific purposes you should build your own urllib2.HTTPRedirectHandler
# and pass it to urllib2.build_opener() in URL.open()
class HTTP400BadRequest(HTTPError):
pass # URL contains an invalid request.
class HTTP401Authentication(HTTPError):
pass # URL requires a login and password.
class HTTP403Forbidden(HTTPError):
pass # URL is not accessible (user-agent?)
class HTTP404NotFound(HTTPError):
pass # URL doesn't exist on the internet.
class HTTP420Error(HTTPError):
pass # Used by Twitter for rate limiting.
class HTTP500InternalServerError(HTTPError):
pass # Generic server error.
class URL:
def __init__(self, string=u"", method=GET, query={}):
""" URL object with the individual parts available as attributes:
For protocol://username:password@domain:port/path/page?query_string#anchor:
- URL.protocol: http, https, ftp, ...
- URL.username: username for restricted domains.
- URL.password: password for restricted domains.
- URL.domain : the domain name, e.g. nodebox.net.
- URL.port : the server port to connect to.
- URL.path : the server path of folders, as a list, e.g. ['news', '2010']
- URL.page : the page name, e.g. page.html.
- URL.query : the query string as a dictionary of (name, value)-items.
- URL.anchor : the page anchor.
If method is POST, the query string is sent with HTTP POST.
"""
self.__dict__["method"] = method # Use __dict__ directly since __setattr__ is overridden.
self.__dict__["_string"] = u(string)
self.__dict__["_parts"] = None
self.__dict__["_headers"] = None
self.__dict__["_redirect"] = None
if isinstance(string, URL):
self.__dict__["method"] = string.method
self.query.update(string.query)
if len(query) > 0:
# Requires that we parse the string first (see URL.__setattr__).
self.query.update(query)
def _parse(self):
""" Parses all the parts of the URL string to a dictionary.
URL format: protocal://username:password@domain:port/path/page?querystring#anchor
For example: http://user:pass@example.com:992/animal/bird?species=seagull&q#wings
This is a cached method that is only invoked when necessary, and only once.
"""
p = urlparse.urlsplit(self._string)
P = {PROTOCOL: p[0], # http
USERNAME: u"", # user
PASSWORD: u"", # pass
DOMAIN: p[1], # example.com
PORT: u"", # 992
PATH: p[2], # [animal]
PAGE: u"", # bird
QUERY: urldecode(p[3]), # {"species": "seagull", "q": None}
ANCHOR: p[4] # wings
}
# Split the username and password from the domain.
if "@" in P[DOMAIN]:
P[USERNAME], \
P[PASSWORD] = (p[1].split("@")[0].split(":")+[u""])[:2]
P[DOMAIN] = p[1].split("@")[1]
# Split the port number from the domain.
if ":" in P[DOMAIN]:
P[DOMAIN], \
P[PORT] = P[DOMAIN].split(":")
P[PORT] = int(P[PORT])
# Split the base page from the path.
if "/" in P[PATH]:
P[PAGE] = p[2].split("/")[-1]
P[PATH] = p[2][:len(p[2])-len(P[PAGE])].strip("/").split("/")
P[PATH] = filter(lambda v: v != "", P[PATH])
else:
P[PAGE] = p[2].strip("/")
P[PATH] = []
self.__dict__["_parts"] = P
# URL.string yields unicode(URL) by joining the different parts,
# if the URL parts have been modified.
def _get_string(self): return unicode(self)
def _set_string(self, v):
self.__dict__["_string"] = u(v)
self.__dict__["_parts"] = None
string = property(_get_string, _set_string)
@property
def parts(self):
""" Yields a dictionary with the URL parts.
"""
if not self._parts: self._parse()
return self._parts
@property
def querystring(self):
""" Yields the URL querystring: "www.example.com?page=1" => "page=1"
"""
s = self.parts[QUERY].items()
s = dict((bytestring(k), bytestring(v if v is not None else "")) for k, v in s)
s = urllib.urlencode(s)
return s
def __getattr__(self, k):
if k in self.__dict__ : return self.__dict__[k]
if k in self.parts : return self.__dict__["_parts"][k]
raise AttributeError, "'URL' object has no attribute '%s'" % k
def __setattr__(self, k, v):
if k in self.__dict__ : self.__dict__[k] = u(v); return
if k == "string" : self._set_string(v); return
if k == "query" : self.parts[k] = v; return
if k in self.parts : self.__dict__["_parts"][k] = u(v); return
raise AttributeError, "'URL' object has no attribute '%s'" % k
def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
""" Returns a connection to the url from which data can be retrieved with connection.read().
When the timeout amount of seconds is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError (e.g. HTTP404NotFound).
"""
url = self.string
# Use basic urllib.urlopen() instead of urllib2.urlopen() for local files.
if os.path.exists(url):
return urllib.urlopen(url)
# Get the query string as a separate parameter if method=POST.
post = self.method == POST and self.querystring or None
socket.setdefaulttimeout(timeout)
if proxy:
proxy = urllib2.ProxyHandler({proxy[1]: proxy[0]})
proxy = urllib2.build_opener(proxy, urllib2.HTTPHandler)
urllib2.install_opener(proxy)
try:
request = urllib2.Request(bytestring(url), post, {
"User-Agent": user_agent,
"Referer": referrer
})
# Basic authentication is established with authentication=(username, password).
if authentication is not None:
request.add_header("Authorization", "Basic %s" %
base64.encodestring('%s:%s' % authentication))
return urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 301: raise HTTP301Redirect
if e.code == 400: raise HTTP400BadRequest
if e.code == 401: raise HTTP401Authentication
if e.code == 403: raise HTTP403Forbidden
if e.code == 404: raise HTTP404NotFound
if e.code == 420: raise HTTP420Error
if e.code == 500: raise HTTP500InternalServerError
raise HTTPError
except socket.timeout:
raise URLTimeout
except urllib2.URLError, e:
if e.reason == "timed out" \
or e.reason[0] in (36, "timed out"):
raise URLTimeout
raise URLError, e.reason
except ValueError, e:
raise URLError, e
def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
# Filter OAuth parameters from cache id (they will be unique for each request).
if self._parts is None and self.method == GET and "oauth_" not in self._string:
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", "", id)
# Keep a separate cache of unicode and raw download for same URL.
if unicode is True:
id = "u" + id
if cached and id in cache:
if isinstance(cache, dict): # Not a Cache object.
return cache[id]
if unicode is True:
return cache[id]
if unicode is False:
return cache.get(id, unicode=False)
t = time.time()
# Open a connection with the given settings, read it and (by default) cache the data.
data = self.open(timeout, proxy, user_agent, referrer, authentication).read()
if unicode is True:
data = u(data)
if cached:
cache[id] = data
if throttle:
time.sleep(max(throttle-(time.time()-t), 0))
return data
def read(self, *args):
return self.open().read(*args)
@property
def exists(self, timeout=10):
""" Yields False if the URL generates a HTTP404NotFound error.
"""
try: self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError, URLTimeoutError:
return True
except URLError:
return False
except:
return True
return True
@property
def mimetype(self, timeout=10):
""" Yields the MIME-type of the document at the URL, or None.
MIME is more reliable than simply checking the document extension.
You can then do: URL.mimetype in MIMETYPE_IMAGE.
"""
try:
return self.headers["content-type"].split(";")[0]
except KeyError:
return None
@property
def headers(self, timeout=10):
""" Yields a dictionary with the HTTP response headers.
"""
if self.__dict__["_headers"] is None:
try:
h = dict(self.open(timeout).info())
except URLError:
h = {}
self.__dict__["_headers"] = h
return self.__dict__["_headers"]
@property
def redirect(self, timeout=10):
""" Yields the redirected URL, or None.
"""
if self.__dict__["_redirect"] is None:
try:
r = self.open(timeout).geturl()
except URLError:
r = None
self.__dict__["_redirect"] = r != self.string and r or ""
return self.__dict__["_redirect"] or None
def __str__(self):
return bytestring(self.string)
def __unicode__(self):
# The string representation includes the query attributes with HTTP GET.
# This gives us the advantage of not having to parse the URL
# when no separate query attributes were given (e.g. all info is in URL._string):
if self._parts is None and self.method == GET:
return self._string
P = self._parts
u = []
if P[PROTOCOL]:
u.append("%s://" % P[PROTOCOL])
if P[USERNAME]:
u.append("%s:%s@" % (P[USERNAME], P[PASSWORD]))
if P[DOMAIN]:
u.append(P[DOMAIN])
if P[PORT]:
u.append(":%s" % P[PORT])
if P[PATH]:
u.append("/%s/" % "/".join(P[PATH]))
if P[PAGE] and len(u) > 0:
u[-1] = u[-1].rstrip("/")
if P[PAGE]:
u.append("/%s" % P[PAGE])
if P[QUERY] and self.method == GET:
u.append("?%s" % self.querystring)
if P[ANCHOR]:
u.append("#%s" % P[ANCHOR])
u = u"".join(u)
u = u.lstrip("/")
return u
def __repr__(self):
return "URL('%s', method='%s')" % (str(self), str(self.method))
def copy(self):
return URL(self.string, self.method, self.query)
def download(url=u"", method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode)
def bind(object, method, function):
""" Attaches the function as a method with the given name to the given object.
"""
setattr(object, method, new.instancemethod(function, object))
class Stream(list):
def __init__(self, url, delimiter="\n", **kwargs):
""" Buffered stream of data from a given URL.
"""
self.socket = URL(url).open(**kwargs)
self.buffer = ""
self.delimiter = delimiter
def update(self, bytes=1024):
""" Reads a number of bytes from the stream.
If a delimiter is encountered, calls Stream.parse() on the packet.
"""
packets = []
self.buffer += self.socket.read(bytes)
self.buffer = self.buffer.split(self.delimiter, 1)
while len(self.buffer) > 1:
data = self.buffer[0]
data = self.parse(data)
packets.append(data)
self.buffer = self.buffer[-1]
self.buffer = self.buffer.split(self.delimiter, 1)
self.buffer = self.buffer[-1]
self.extend(packets)
return packets
def parse(self, data):
""" Must be overridden in a subclass.
"""
return data
def clear(self):
list.__init__(self, [])
def stream(url, delimiter="\n", parse=lambda data: data, **kwargs):
""" Returns a new Stream with the given parse method.
"""
stream = Stream(url, delimiter, **kwargs)
bind(stream, "parse", lambda stream, data: parse(data))
return stream
RE_URL_PUNCTUATION = ("\"'{(>", "\"'.,;)}")
RE_URL_HEAD = r"[%s|\[|\s]" % "|".join(RE_URL_PUNCTUATION[0]) # Preceded by space, parenthesis or HTML tag.
RE_URL_TAIL = r"[%s|\]]*[\s|\<]" % "|".join(RE_URL_PUNCTUATION[1]) # Followed by space, punctuation or HTML tag.
RE_URL1 = r"(https?://.*?)" + RE_URL_TAIL # Starts with http:// or https://
RE_URL2 = RE_URL_HEAD + r"(www\..*?\..*?)" + RE_URL_TAIL # Starts with www.
RE_URL3 = RE_URL_HEAD + r"([\w|-]*?\.(com|net|org))" + RE_URL_TAIL # Ends with .com, .net, .org
RE_URL1, RE_URL2, RE_URL3 = (
re.compile(RE_URL1, re.I),
re.compile(RE_URL2, re.I),
re.compile(RE_URL3, re.I))
def find_urls(string, unique=True):
""" Returns a list of URLs parsed from the string.
Works on http://, https://, www. links or domain names ending in .com, .org, .net.
Links can be preceded by leading punctuation (open parens)
and followed by trailing punctuation (period, comma, close parens).
"""
string = u(string)
string = string.replace(u"\u2024", ".")
string = string.replace(" ", " ")
matches = []
for p in (RE_URL1, RE_URL2, RE_URL3):
for m in p.finditer(" %s " % string):
s = m.group(1)
s = s.split("\">")[0].split("'>")[0] # google.com">Google => google.com
if not unique or s not in matches:
matches.append(s)
return matches
links = find_urls
RE_EMAIL = re.compile(r"[\w\-\.\+]+@(\w[\w\-]+\.)+[\w\-]+") # tom.de+smedt@clips.ua.ac.be
def find_email(string, unique=True):
""" Returns a list of e-mail addresses parsed from the string.
"""
string = u(string).replace(u"\u2024", ".")
matches = []
for m in RE_EMAIL.finditer(string):
s = m.group(0)
if not unique or s not in matches:
matches.append(s)
return matches
def find_between(a, b, string):
""" Returns a list of substrings between a and b in the given string.
"""
p = "%s(.*?)%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return [m for m in p.findall(string)]
BLOCK = [
"title", "h1", "h2", "h3", "h4", "h5", "h6", "p",
"center", "blockquote", "div", "table", "ul", "ol", "pre", "code", "form"
]
SELF_CLOSING = ["br", "hr", "img"]
LIST_ITEM = "*"
blocks = dict.fromkeys(BLOCK+["br", "tr", "td"], ("", "\n\n"))
blocks.update({
"li": ("%s " % LIST_ITEM, "\n"),
"img": ("", ""),
"br": ("", "\n"),
"th": ("", "\n"),
"tr": ("", "\n"),
"td": ("", "\t"),
})
class HTMLParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
pass
def handle_endtag(self, tag):
pass
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
def clean(self, html):
html = decode_utf8(html)
html = html.replace("/>", " />")
html = html.replace(" />", " />")
html = html.replace("<!", "<!")
html = html.replace("<!DOCTYPE", "<!DOCTYPE")
html = html.replace("<!doctype", "<!doctype")
html = html.replace("<!--", "<!--")
return html
def parse_declaration(self, i):
# We can live without sgmllib's parse_declaration().
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
return i + 1
def convert_charref(self, name):
# This fixes a bug in older versions of sgmllib when working with Unicode.
# Fix: ASCII ends at 127, not 255
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return chr(n)
class HTMLTagstripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def strip(self, html, exclude=[], replace=blocks):
""" Returns the HTML string with all element tags (e.g. <p>) removed.
- exclude : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are separated with linebreaks.
"""
if html is None:
return None
self._exclude = isinstance(exclude, dict) and exclude or dict.fromkeys(exclude, [])
self._replace = replace
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return "".join(self._data)
def clean(self, html):
# Escape all entities (just strip tags).
return HTMLParser.clean(self, html).replace("&", "&")
def handle_starttag(self, tag, attributes):
if tag in self._exclude:
# Create the tag attribute string,
# including attributes defined in the HTMLTagStripper._exclude dict.
a = len(self._exclude[tag]) > 0 and attributes or []
a = ["%s=\"%s\"" % (k,v) for k, v in a if k in self._exclude[tag]]
a = (" "+" ".join(a)).rstrip()
self._data.append("<%s%s>" % (tag, a))
if tag in self._replace:
self._data.append(self._replace[tag][0])
if tag in self._replace and tag in SELF_CLOSING:
self._data.append(self._replace[tag][1])
def handle_endtag(self, tag):
if tag in self._exclude and self._data and self._data[-1].startswith("<"+tag):
# Never keep empty elements (e.g. <a></a>).
self._data.pop(-1); return
if tag in self._exclude:
self._data.append("</%s>" % tag)
if tag in self._replace:
self._data.append(self._replace[tag][1])
def handle_data(self, data):
self._data.append(data.strip("\n\t"))
def handle_comment(self, comment):
if "comment" in self._exclude or \
"!--" in self._exclude:
self._data.append("<!--%s-->" % comment)
strip_tags = HTMLTagstripper().strip
def strip_element(string, tag, attributes=""):
""" Removes all elements with the given tagname and attributes from the string.
Open and close tags are kept in balance.
No HTML parser is used: strip_element(s, "a", "href='foo' class='bar'")
matches "<a href='foo' class='bar'" but not "<a class='bar' href='foo'".
"""
s = string.lower() # Case-insensitive.
t = tag.strip("</>")
a = (" " + attributes.lower().strip()).rstrip()
i = 0
j = 0
while j >= 0:
i = s.find("<%s%s" % (t, a), i)
j = s.find("</%s>" % t, i+1)
opened, closed = s[i:j].count("<%s" % t), 1
while opened > closed and j >= 0:
k = s.find("</%s>" % t, j+1)
opened += s[j:k].count("<%s" % t)
closed += 1
j = k
if i < 0: return string
if j < 0: return string[:i]
string = string[:i] + string[j+len(t)+3:]; s=string.lower()
return string
def strip_between(a, b, string):
""" Removes anything between (and including) string a and b inside the given string.
"""
p = "%s.*?%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return re.sub(p, "", string)
def strip_javascript(html):
return strip_between("<script.*?>", "</script>", html)
def strip_inline_css(html):
return strip_between("<style.*?>", "</style>", html)
def strip_comments(html):
return strip_between("<!--", "-->", html)
def strip_forms(html):
return strip_between("<form.*?>", "</form>", html)
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, (str, unicode)):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == '' :
return unichr(int(name)) # "&" => "&"
if hex in ("x","X"):
return unichr(int('0x'+name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return cp and unichr(cp) or match.group() # "&foo;" => "&foo;"
if isinstance(string, (str, unicode)):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
def encode_url(string):
return urllib.quote_plus(bytestring(string))
def decode_url(string):
return urllib.unquote_plus(string) # "black/white" => "black%2Fwhite".
RE_SPACES = re.compile("( |\xa0)+", re.M) # Matches one or more spaces.
RE_TABS = re.compile(r"\t+", re.M) # Matches one or more tabs.
def collapse_spaces(string, indentation=False, replace=" "):
""" Returns a string with consecutive spaces collapsed to a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_SPACES.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_tabs(string, indentation=False, replace=" "):
""" Returns a string with (consecutive) tabs replaced by a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_TABS.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_linebreaks(string, threshold=1):
""" Returns a string with consecutive linebreaks collapsed to at most the given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.splitlines()]
string = "\n".join(p)
string = re.sub(n+r"+", n, string)
return string
def plaintext(html, keep=[], replace=blocks, linebreaks=2, indentation=False):
""" Returns a string with all HTML tags removed.
Content inside HTML comments, the <style> tag and the <script> tags is removed.
- keep : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are followed by linebreaks.
- linebreaks : the maximum amount of consecutive linebreaks,
- indentation : keep left line indentation (tabs and spaces)?
"""
if not keep.__contains__("script"):
html = strip_javascript(html)
if not keep.__contains__("style"):
html = strip_inline_css(html)
if not keep.__contains__("form"):
html = strip_forms(html)
if not keep.__contains__("comment") and \
not keep.__contains__("!--"):
html = strip_comments(html)
html = html.replace("\r", "\n")
html = strip_tags(html, exclude=keep, replace=replace)
html = decode_entities(html)
html = collapse_spaces(html, indentation)
html = collapse_tabs(html, indentation)
html = collapse_linebreaks(html, linebreaks)
html = html.strip()
return html
SEARCH = "search" # Query for pages (i.e. links to websites).
IMAGE = "image" # Query for images.
NEWS = "news" # Query for news items.
TINY = "tiny" # Image size around 100x100.
SMALL = "small" # Image size around 200x200.
MEDIUM = "medium" # Image size around 500x500.
LARGE = "large" # Image size around 1000x1000.
RELEVANCY = "relevancy" # Sort results by most relevant.
LATEST = "latest" # Sort results by most recent.
class Result(dict):
def __init__(self, url):
""" An item in a list of results returned by SearchEngine.search().
All dictionary entries are available as unicode string attributes.
- url : the URL of the referred web content,
- title : the title of the content at the URL,
- text : the content text,
- language: the content language,
- author : for news items and images, the author,
- date : for news items, the publication date.
"""
dict.__init__(self)
self.url = url
@property
def description(self):
return self.text # Backwards compatibility.
def download(self, *args, **kwargs):
""" Download the content at the given URL.
By default it will be cached - see URL.download().
"""
return URL(self.url).download(*args, **kwargs)
def __getattr__(self, k):
return self.get(k, u"")
def __getitem__(self, k):
return self.get(k, u"")
def __setattr__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"") # Store strings as unicode.
def __setitem__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"")
def setdefault(self, k, v):
dict.setdefault(self, u(k), u(v))
def update(self, *args, **kwargs):
map = dict()
map.update(*args, **kwargs)
dict.update(self, [(u(k), u(v)) for k, v in map.items()])
def __repr__(self):
return "Result(url=%s)" % repr(self.url)
class Results(list):
def __init__(self, source=None, query=None, type=SEARCH, total=0):
""" A list of results returned from SearchEngine.search().
- source: the service that yields the results (e.g. GOOGLE, TWITTER).
- query : the query that yields the results.
- type : the query type (SEARCH, IMAGE, NEWS).
- total : the total result count.
This is not the length of the list, but the total number of matches for the given query.
"""
self.source = source
self.query = query
self.type = type
self.total = total
class SearchEngine:
def __init__(self, license=None, throttle=1.0, language=None):
""" A base class for a web service.
- license : license key for the API,
- throttle : delay between requests (avoid hammering the server).
Inherited by: Google, Yahoo, Bing, Twitter, Wikipedia, Flickr.
"""
self.license = license
self.throttle = throttle # Amount of sleep time after executing a query.
self.language = language # Result.language restriction (e.g., "en").
self.format = lambda x: x # Formatter applied to each attribute of each Result.
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
return Results(source=None, query=query, type=type)
class SearchEngineError(HTTPError):
pass
class SearchEngineTypeError(SearchEngineError):
pass # Raised when an unknown type is passed to SearchEngine.search().
class SearchEngineLimitError(SearchEngineError):
pass # Raised when the query limit for a license is reached.
GOOGLE = "https://www.googleapis.com/customsearch/v1?"
GOOGLE_LICENSE = api.license["Google"]
GOOGLE_CUSTOM_SEARCH_ENGINE = "000579440470800426354:_4qo2s0ijsi"
RE_GOOGLE_DATE = re.compile("^([A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}) {0,1}...")
class Google(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or GOOGLE_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Google for the given query.
- type : SEARCH,
- start: maximum 100 results => start 1-10 with count=10,
- count: maximum 10,
There is a daily limit of 10,000 queries. Google Custom Search is a paid service.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > (100 / count):
return Results(GOOGLE, query, type)
# 1) Create request URL.
url = URL(GOOGLE, query={
"key": self.license or GOOGLE_LICENSE,
"cx": GOOGLE_CUSTOM_SEARCH_ENGINE,
"q": query,
"start": 1 + (start-1) * count,
"num": min(count, 10),
"alt": "json"
})
# 2) Restrict language.
if self.language is not None:
url.query["lr"] = "lang_" + self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
if data.get("error", {}).get("code") == 403:
raise SearchEngineLimitError
results = Results(GOOGLE, query, type)
results.total = int(data.get("queries", {}).get("request", [{}])[0].get("totalResults") or 0)
for x in data.get("items", []):
r = Result(url=None)
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("htmlSnippet").replace("<br> ","").replace("<b>...</b>", "..."))
r.language = self.language or ""
r.date = ""
if not r.date:
# Google Search results can start with a date (parsed from the content):
m = RE_GOOGLE_DATE.match(r.text)
if m:
r.date = m.group(1)
r.text = "..." + r.text[len(m.group(0)):]
results.append(r)
return results
def translate(self, string, input="en", output="fr", **kwargs):
""" Returns the translation of the given string in the desired output language.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string,
"source": input,
"target": output
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("translations", [{}])[0].get("translatedText", "")
data = decode_entities(data)
return u(data)
def identify(self, string, **kwargs):
""" Returns a (language, confidence)-tuple for the given string.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2/detect?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string[:1000]
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("detections", [[{}]])[0][0]
data = u(data.get("language")), float(data.get("confidence"))
return data
YAHOO = "http://yboss.yahooapis.com/ysearch/"
YAHOO_LICENSE = api.license["Yahoo"]
class Yahoo(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or YAHOO_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Yahoo for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 35 for images.
There is no daily limit, however Yahoo BOSS is a paid service.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
url = YAHOO + "web"
if type == IMAGE:
url = YAHOO + "images"
if type == NEWS:
url = YAHOO + "news"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(YAHOO, query, type)
# 1) Create request URL.
url = URL(url, method=GET, query={
"q": encode_url(query),
"start": 1 + (start-1) * count,
"count": min(count, type==IMAGE and 35 or 50),
"format": "json"
})
# 2) Restrict language.
if self.language is not None:
market = locale.market(self.language)
if market:
url.query["market"] = market.lower()
# 3) BOSS OAuth authentication.
url.query.update({
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, method=GET, secret=self.license[1])
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Yahoo %s API is a paid service" % type
except HTTP403Forbidden:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("bossresponse") or {}
data = data.get({SEARCH:"web", IMAGE:"images", NEWS:"news"}[type], {})
results = Results(YAHOO, query, type)
results.total = int(data.get("totalresults") or 0)
for x in data.get("results", []):
r = Result(url=None)
r.url = self.format(x.get("url", x.get("clickurl")))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("abstract"))
r.date = self.format(x.get("date"))
r.author = self.format(x.get("source"))
r.language = self.format(x.get("language") and \
x.get("language").split(" ")[0] or self.language or "")
results.append(r)
return results
BING = "https://api.datamarket.azure.com/Bing/Search/"
BING_LICENSE = api.license["Bing"]
class Bing(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or BING_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from Bing for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 15 for news,
- size : for images, either SMALL, MEDIUM or LARGE.
There is no daily query limit.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
src = "Web"
if type == IMAGE:
src = "Image"
if type == NEWS:
src = "News"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(BING + src + "?", query, type)
# 1) Construct request URL.
url = URL(BING + "Composite", method=GET, query={
"Sources": "'" + src.lower() + "'",
"Query": "'" + query + "'",
"$skip": 1 + (start-1) * count,
"$top": min(count, type==NEWS and 15 or 50),
"$format": "json",
})
# 2) Restrict image size.
if size in (TINY, SMALL, MEDIUM, LARGE):
url.query["ImageFilters"] = {
TINY: "'Size:Small'",
SMALL: "'Size:Small'",
MEDIUM: "'Size:Medium'",
LARGE: "'Size:Large'" }[size]
# 3) Restrict language.
if type in (SEARCH, IMAGE) and self.language is not None:
url.query["Query"] = url.query["Query"][:-1] + " language: %s'" % self.language
#if self.language is not None:
# market = locale.market(self.language)
# if market:
# url.query["market"] = market
# 4) Parse JSON response.
kwargs["authentication"] = ("", self.license)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Bing %s API is a paid service" % type
data = json.loads(data)
data = data.get("d", {})
data = data.get("results", [{}])[0]
results = Results(BING, query, type)
results.total = int(data.get(src+"Total", 0))
for x in data.get(src, []):
r = Result(url=None)
r.url = self.format(x.get("MediaUrl", x.get("Url")))
r.title = self.format(x.get("Title"))
r.text = self.format(x.get("Description", x.get("Snippet")))
r.language = self.language or ""
r.date = self.format(x.get("DateTime", x.get("Date")))
r.author = self.format(x.get("Source"))
results.append(r)
return results
TWITTER = "http://search.twitter.com/"
TWITTER_STREAM = "https://stream.twitter.com/1/statuses/filter.json"
TWITTER_STATUS = "https://twitter.com/%s/status/%s"
TWITTER_LICENSE = api.license["Twitter"]
TWITTER_HASHTAG = re.compile(r"(\s|^)(#[a-z0-9_\-]+)", re.I) # Word starts with "#".
TWITTER_RETWEET = re.compile(r"(\s|^RT )(@[a-z0-9_\-]+)", re.I) # Word starts with "RT @".
class Twitter(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or TWITTER_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
""" Returns a list of results from Twitter for the given query.
- type : SEARCH or TRENDS,
- start: maximum 1500 results (10 for trends) => start 1-15 with count=100, 1500/count,
- count: maximum 100, or 10 for trends.
There is an hourly limit of 150+ queries (actual amount undisclosed).
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 1500 / count:
return Results(TWITTER, query, type)
# 1) Construct request URL.
url = URL(TWITTER + "search.json?", method=GET)
url.query = {
"q": query,
"page": start,
"rpp": min(count, 100)
}
if "geo" in kwargs:
# Filter by location with geo=(latitude, longitude, radius).
# It can also be a (latitude, longitude)-tuple with default radius "10km".
url.query["geocode"] = ",".join((map(str, kwargs.pop("geo")) + ["10km"])[:3])
# 2) Restrict language.
url.query["lang"] = self.language or ""
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(cached=cached, **kwargs)
except HTTP420Error:
raise SearchEngineLimitError
data = json.loads(data)
results = Results(TWITTER, query, type)
results.total = None
for x in data.get("results", data.get("trends", [])):
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("from_user"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at", data.get("as_of")))
r.author = self.format(x.get("from_user"))
r.profile = self.format(x.get("profile_image_url")) # Profile picture URL.
r.language = self.format(x.get("iso_language_code"))
results.append(r)
return results
def trends(self, **kwargs):
""" Returns a list with 10 trending topics on Twitter.
"""
url = URL("https://api.twitter.com/1/trends/1.json")
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(**kwargs)
data = json.loads(data)
return [u(x.get("name")) for x in data[0].get("trends", [])]
def stream(self, query):
""" Returns a live stream of Result objects for the given query.
"""
url = URL(TWITTER_STREAM)
url.query.update({
"track": query,
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_token": self.license[2][0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, GET,
self.license[1],
self.license[2][1])
return TwitterStream(url, delimiter="\n", format=self.format)
class TwitterStream(Stream):
def __init__(self, socket, delimiter="\n", format=lambda s: s):
Stream.__init__(self, socket, delimiter)
self.format = format
def parse(self, data):
""" TwitterStream.queue will populate with Result objects as
TwitterStream.update() is called iteratively.
"""
x = json.loads(data)
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.profile = self.format(x.get("profile_image_url"))
r.language = self.format(x.get("iso_language_code"))
return r
def author(name):
""" Returns a Twitter query-by-author-name that can be passed to Twitter.search().
For example: Twitter().search(author("tom_de_smedt"))
"""
return "from:%s" % name
def hashtags(string):
""" Returns a list of hashtags (words starting with a #hash) from a tweet.
"""
return [b for a, b in TWITTER_HASHTAG.findall(string)]
def retweets(string):
""" Returns a list of retweets (words starting with a RT @author) from a tweet.
"""
return [b for a, b in TWITTER_RETWEET.findall(string)]
WIKIA = "http://wikia.com"
WIKIPEDIA = "http://wikipedia.com"
WIKIPEDIA_LICENSE = api.license["Wikipedia"]
MEDIAWIKI_LICENSE = None
MEDIAWIKI = "http://{SUBDOMAIN}.{DOMAIN}{API}"
MEDIAWIKI_NAMESPACE = ["Main", "User", "Wikipedia", "File", "MediaWiki", "Template", "Help", "Category", "Portal", "Book"]
MEDIAWIKI_NAMESPACE += [s+" talk" for s in MEDIAWIKI_NAMESPACE] + ["Talk", "Special", "Media"]
MEDIAWIKI_NAMESPACE += ["WP", "WT", "MOS", "C", "CAT", "Cat", "P", "T", "H", "MP", "MoS", "Mos"]
_mediawiki_namespace = re.compile(r"^"+"|".join(MEDIAWIKI_NAMESPACE)+":", re.I)
MEDIAWIKI_DISAMBIGUATION = "<a href=\"/wiki/Help:Disambiguation\" title=\"Help:Disambiguation\">disambiguation</a> page"
MEDIAWIKI_REFERENCE = r"\s*\[[0-9]{1,3}\]"
class MediaWiki(SearchEngine):
def __init__(self, license=None, throttle=5.0, language="en"):
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
@property
def _url(self):
# Must be overridden in a subclass; see Wikia and Wikipedia.
return None
@property
def MediaWikiArticle(self):
return MediaWikiArticle
@property
def MediaWikiSection(self):
return MediaWikiSection
@property
def MediaWikiTable(self):
return MediaWikiTable
def __iter__(self):
return self.all()
def all(self, **kwargs):
""" Returns an iterator over all MediaWikiArticle objects.
Optional parameters can include those passed to
MediaWiki.list(), MediaWiki.search() and URL.download().
"""
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
articles = all
def list(self, namespace=0, start=None, count=100, cached=True, **kwargs):
""" Returns an iterator over all article titles (for a given namespace id).
"""
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# Fetch article titles (default) or a custom id.
id = kwargs.pop("_id", "title")
# Loop endlessly (= until the last request no longer yields an "apcontinue").
# See: http://www.mediawiki.org/wiki/API:Allpages
while start != -1:
url = URL(self._url, method=GET, query={
"action": "query",
"list": "allpages",
"apnamespace": namespace,
"apfrom": start or "",
"aplimit": min(count, 500),
"apfilterredir": "nonredirects",
"format": "json"
})
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
for x in data.get("query", {}).get("allpages", {}):
if x.get(id):
yield x[id]
start = data.get("query-continue", {}).get("allpages", {})
start = start.get("apcontinue", start.get("apfrom", -1))
raise StopIteration
def search(self, query, type=SEARCH, start=1, count=1, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a MediaWikiArticle for the given query.
The query is case-sensitive, for example on Wikipedia:
- "tiger" = Panthera tigris,
- "TIGER" = Topologically Integrated Geographic Encoding and Referencing.
"""
if type != SEARCH:
raise SearchEngineTypeError
if count < 1:
return None
# 1) Construct request URL (e.g., Wikipedia for a given language).
url = URL(self._url, method=GET, query={
"action": "parse",
"page": query.replace(" ","_"),
"redirects": 1,
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("timeout", 30) # Parsing the article takes some time.
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("parse", {})
a = self._parse_article(data, query=query)
a = self._parse_article_sections(a, data)
a = self._parse_article_section_structure(a)
if not a.html or "id=\"noarticletext\"" in a.html:
return None
return a
def _parse_article(self, data, **kwargs):
return self.MediaWikiArticle(
title = plaintext(data.get("displaytitle", data.get("title", ""))),
source = data.get("text", {}).get("*", ""),
disambiguation = data.get("text", {}).get("*", "").find(MEDIAWIKI_DISAMBIGUATION) >= 0,
links = [x["*"] for x in data.get("links", []) if not _mediawiki_namespace.match(x["*"])],
categories = [x["*"] for x in data.get("categories", [])],
external = [x for x in data.get("externallinks", [])],
media = [x for x in data.get("images", [])],
languages = dict([(x["lang"], x["*"]) for x in data.get("langlinks", [])]),
language = self.language,
parser = self, **kwargs)
def _parse_article_sections(self, article, data):
# If "References" is a section in the article,
# the HTML will contain a marker <h*><span class="mw-headline" id="References">.
# http://en.wikipedia.org/wiki/Section_editing
t = article.title
d = 0
i = 0
for x in data.get("sections", {}):
a = x.get("anchor")
if a:
p = r"<h.>\s*.*?\s*<span class=\"mw-headline\" id=\"%s\">" % a
p = re.compile(p)
m = p.search(article.source, i)
if m:
j = m.start()
article.sections.append(self.MediaWikiSection(article,
title = t,
start = i,
stop = j,
level = d))
t = x.get("line", "")
d = int(x.get("level", 2)) - 1
i = j
return article
def _parse_article_section_structure(self, article):
# Sections with higher level are children of previous sections with lower level.
for i, s2 in enumerate(article.sections):
for s1 in reversed(article.sections[:i]):
if s1.level < s2.level:
s2.parent = s1
s1.children.append(s2)
break
return article
class MediaWikiArticle:
def __init__(self, title=u"", source=u"", links=[], categories=[], languages={}, disambiguation=False, **kwargs):
""" A MediaWiki article returned from MediaWiki.search().
MediaWikiArticle.string contains the HTML content.
"""
self.title = title # Article title.
self.source = source # Article HTML content.
self.sections = [] # Article sections.
self.links = links # List of titles of linked articles.
self.categories = categories # List of categories. As links, prepend "Category:".
self.external = [] # List of external links.
self.media = [] # List of linked media (images, sounds, ...)
self.disambiguation = disambiguation # True when the article is a disambiguation page.
self.languages = languages # Dictionary of (language, article)-items, e.g. Cat => ("nl", "Kat")
self.language = kwargs.get("language", "en")
self.parser = kwargs.get("parser", MediaWiki())
for k, v in kwargs.items():
setattr(self, k, v)
def _plaintext(self, string, **kwargs):
""" Strips HTML tags, whitespace and wiki markup from the HTML source, including:
metadata, info box, table of contents, annotations, thumbnails, disambiguation link.
This is called internally from MediaWikiArticle.string.
"""
s = string
s = strip_between("<table class=\"metadata", "</table>", s) # Metadata.
s = strip_between("<table id=\"toc", "</table>", s) # Table of contents.
s = strip_between("<table class=\"infobox", "</table>", s) # Infobox.
s = strip_between("<table class=\"wikitable", "</table>", s) # Table.
s = strip_element(s, "table", "class=\"navbox") # Navbox.
s = strip_between("<div id=\"annotation", "</div>", s) # Annotations.
s = strip_between("<div class=\"dablink", "</div>", s) # Disambiguation message.
s = strip_between("<div class=\"magnify", "</div>", s) # Thumbnails.
s = strip_between("<div class=\"thumbcaption", "</div>", s) # Thumbnail captions.
s = re.sub(r"<img class=\"tex\".*?/>", "[math]", s) # LaTex math images.
s = plaintext(s, **kwargs)
s = re.sub(r"\[edit\]\s*", "", s) # [edit] is language dependent (e.g. nl => "[bewerken]")
s = s.replace("[", " [").replace(" [", " [") # Space before inline references.
return s
def plaintext(self, **kwargs):
return self._plaintext(self.source, **kwargs)
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
def __repr__(self):
return "MediaWikiArticle(title=%s)" % repr(self.title)
class MediaWikiSection:
def __init__(self, article, title=u"", start=0, stop=0, level=1):
""" A (nested) section in the content of a MediaWikiArticle.
"""
self.article = article # MediaWikiArticle the section is part of.
self.parent = None # MediaWikiSection the section is part of.
self.children = [] # MediaWikiSections belonging to this section.
self.title = title # Section title.
self._start = start # Section start index in MediaWikiArticle.string.
self._stop = stop # Section stop index in MediaWikiArticle.string.
self._level = level # Section depth (main title + intro = level 0).
self._tables = None
def plaintext(self, **kwargs):
return self.article._plaintext(self.source, **kwargs)
@property
def source(self):
return self.article.source[self._start:self._stop]
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
@property
def content(self):
# ArticleSection.string, minus the title.
s = self.plaintext()
if s == self.title or s.startswith(self.title+"\n"):
return s[len(self.title):].lstrip()
return s
@property
def tables(self):
""" Yields a list of MediaWikiTable objects in the section.
"""
if self._tables is None:
self._tables = []
b = "<table class=\"wikitable\"", "</table>"
p = self.article._plaintext
f = find_between
for s in f(b[0], b[1], self.source):
t = self.article.parser.MediaWikiTable(self,
title = p((f(r"<caption.*?>", "</caption>", s) + [""])[0]),
source = b[0] + s + b[1]
)
for i, row in enumerate(f(r"<tr", "</tr>", s)):
# 1) Parse <td> and <th> content and format it as plain text.
# 2) Parse <td colspan=""> attribute, duplicate spanning cells.
# 3) For <th> in the first row, update MediaWikiTable.headers.
r1 = f(r"<t[d|h]", r"</t[d|h]>", row)
r1 = (((f(r'colspan="', r'"', v)+[1])[0], v[v.find(">")+1:]) for v in r1)
r1 = ((int(n), v) for n, v in r1)
r2 = []; [[r2.append(p(v)) for j in range(n)] for n, v in r1]
if i == 0 and "</th>" in row:
t.headers = r2
else:
t.rows.append(r2)
self._tables.append(t)
return self._tables
@property
def level(self):
return self._level
depth = level
def __repr__(self):
return "MediaWikiSection(title='%s')" % bytestring(self.title)
class MediaWikiTable:
def __init__(self, section, title=u"", headers=[], rows=[], source=u""):
""" A <table class="wikitable> in a MediaWikiSection.
"""
self.section = section # MediaWikiSection the table is part of.
self.source = source # Table HTML.
self.title = title # Table title.
self.headers = headers # List of table headers.
self.rows = rows # List of table rows, each a list of cells.
@property
def html(self):
return self.source
def __repr__(self):
return "MediaWikiTable(title='%s')" % bytestring(self.title)
class Wikipedia(MediaWiki):
def __init__(self, license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[language].wikipedia.org.
"""
SearchEngine.__init__(self, license or WIKIPEDIA_LICENSE, throttle, language)
self._subdomain = language
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikipedia.org")
s = s.replace("{API}", '/w/api.php')
return s
@property
def MediaWikiArticle(self):
return WikipediaArticle
@property
def MediaWikiSection(self):
return WikipediaSection
@property
def MediaWikiTable(self):
return WikipediaTable
class WikipediaArticle(MediaWikiArticle):
def download(self, media, **kwargs):
""" Downloads an item from MediaWikiArticle.media and returns the content.
Note: images on Wikipedia can be quite large, and this method uses screen-scraping,
so Wikipedia might not like it that you download media in this way.
To save the media in a file:
data = article.download(media)
open(filename+extension(media),"w").write(data)
"""
url = "http://%s.wikipedia.org/wiki/File:%s" % (self.__dict__.get("language", "en"), media)
if url not in cache:
time.sleep(1)
data = URL(url).download(**kwargs)
data = re.search(r"upload.wikimedia.org/.*?/%s" % media, data)
data = data and URL("http://" + data.group(0)).download(**kwargs) or None
return data
def __repr__(self):
return "WikipediaArticle(title=%s)" % repr(self.title)
class WikipediaSection(MediaWikiSection):
def __repr__(self):
return "WikipediaSection(title='%s')" % bytestring(self.title)
class WikipediaTable(MediaWikiTable):
def __repr__(self):
return "WikipediaTable(title='%s')" % bytestring(self.title)
class Wikia(MediaWiki):
def __init__(self, domain="www", license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[domain].wikia.com.
"""
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
self._subdomain = domain
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikia.com")
s = s.replace("{API}", '/api.php')
return s
@property
def MediaWikiArticle(self):
return WikiaArticle
@property
def MediaWikiSection(self):
return WikiaSection
@property
def MediaWikiTable(self):
return WikiaTable
def all(self, **kwargs):
if kwargs.pop("batch", True):
# We can take advantage of Wikia's search API to reduce bandwith.
# Instead of executing a query to retrieve each article,
# we query for a batch of (10) articles.
iterator = self.list(_id="pageid", **kwargs)
while True:
batch, done = [], False
try:
for i in range(10): batch.append(iterator.next())
except StopIteration:
done = True # No more articles, finish batch and raise StopIteration.
url = URL(self._url.replace("api.php", "wikia.php"), method=GET, query={
"controller": "WikiaSearch",
"method": "getPages",
"ids": '|'.join(str(id) for id in batch),
"format": "json"
})
kwargs.setdefault("unicode", True)
kwargs.setdefault("cached", True)
kwargs["timeout"] = 10 * (1 + len(batch))
data = url.download(**kwargs)
data = json.loads(data)
for x in (data or {}).get("pages", {}).values():
yield WikiaArticle(title=x.get("title", ""), source=x.get("html", ""))
if done:
raise StopIteration
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
class WikiaArticle(MediaWikiArticle):
def __repr__(self):
return "WikiaArticle(title=%s)" % repr(self.title)
class WikiaSection(MediaWikiSection):
def __repr__(self):
return "WikiaSection(title='%s')" % bytestring(self.title)
class WikiaTable(MediaWikiTable):
def __repr__(self):
return "WikiaTable(title='%s')" % bytestring(self.title)
FLICKR = "http://api.flickr.com/services/rest/"
FLICKR_LICENSE = api.license["Flickr"]
INTERESTING = "interesting"
class Flickr(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or FLICKR_LICENSE, throttle, language)
def search(self, query, type=IMAGE, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Flickr for the given query.
Retrieving the URL of a result (i.e. image) requires an additional query.
- type : SEARCH, IMAGE,
- start: maximum undefined,
- count: maximum 500,
- sort : RELEVANCY, LATEST or INTERESTING.
There is no daily limit.
"""
if type not in (SEARCH, IMAGE):
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 500/count:
return Results(FLICKR, query, IMAGE)
# 1) Construct request URL.
url = FLICKR+"?"
url = URL(url, method=GET, query={
"api_key": self.license or "",
"method": "flickr.photos.search",
"text": query.replace(" ", "_"),
"page": start,
"per_page": min(count, 500),
"sort": { RELEVANCY: "relevance",
LATEST: "date-posted-desc",
INTERESTING: "interestingness-desc" }.get(sort)
})
if kwargs.get("copyright", True) is False:
# With copyright=False, only returns Public Domain and Creative Commons images.
# http://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
# 5: "Attribution-ShareAlike License"
# 7: "No known copyright restriction"
url.query["license"] = "5,7"
# 2) Parse XML response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = xml.dom.minidom.parseString(bytestring(data))
results = Results(FLICKR, query, IMAGE)
results.total = int(data.getElementsByTagName("photos")[0].getAttribute("total"))
for x in data.getElementsByTagName("photo"):
r = FlickrResult(url=None)
r.__dict__["_id"] = x.getAttribute("id")
r.__dict__["_size"] = size
r.__dict__["_license"] = self.license
r.__dict__["_throttle"] = self.throttle
r.text = self.format(x.getAttribute("title"))
r.author = self.format(x.getAttribute("owner"))
results.append(r)
return results
class FlickrResult(Result):
@property
def url(self):
# Retrieving the url of a FlickrResult (i.e. image location) requires another query.
# Note: the "Original" size no longer appears in the response,
# so Flickr might not like it if we download it.
url = FLICKR + "?method=flickr.photos.getSizes&photo_id=%s&api_key=%s" % (self._id, self._license)
data = URL(url).download(throttle=self._throttle, unicode=True)
data = xml.dom.minidom.parseString(bytestring(data))
size = { TINY: "Thumbnail",
SMALL: "Small",
MEDIUM: "Medium",
LARGE: "Original" }.get(self._size, "Medium")
for x in data.getElementsByTagName("size"):
if size == x.getAttribute("label"):
return x.getAttribute("source")
if size == "Original":
url = x.getAttribute("source")
url = url[:-len(extension(url))-2] + "_o" + extension(url)
return u(url)
FACEBOOK = "https://graph.facebook.com/"
FACEBOOK_LICENSE = api.license["Facebook"]
FEED = "feed" # Facebook timeline.
COMMENTS = "comments" # Facebook comments (for a given news feed post).
LIKES = "likes" # Facebook likes (for a given post or comment).
FRIENDS = "friends" # Facebook friends (for a given profile id).
class FacebookResult(Result):
def __repr__(self):
return "Result(id=%s)" % repr(self.id)
class Facebook(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
@property
def _token(self):
# Yields the "application access token" (stored in api.license["Facebook"]).
# With this license, we can view public content.
# To view more information, we need a "user access token" as license key.
# This token can be retrieved manually from:
# http://www.clips.ua.ac.be/media/pattern-fb.html
# Or parsed from this URL:
# https://graph.facebook.com/oauth/authorize?type=user_agent
# &client_id=332061826907464
# &redirect_uri=http%3A%2F%2Fwww.clips.ua.ac.be/media/pattern-facebook-token.html
# &scope=read_stream,user_birthday,user_likes,user_photos,friends_birthday,friends_likes
# The token is valid for a limited duration.
return URL(FACEBOOK + "oauth/access_token?", query={
"grant_type": "client_credentials",
"client_id": "332061826907464",
"client_secret": "81ff4204e73ecafcd87635a3a3683fbe"
}).download().split("=")[1]
def search(self, query, type=SEARCH, start=1, count=10, cached=False, **kwargs):
""" Returns a list of results from Facebook public status updates for the given query.
- query: string, or Result.id for NEWS and COMMENTS,
- type : SEARCH,
- start: 1,
- count: maximum 100 for SEARCH and NEWS, 1000 for COMMENTS and LIKES.
There is an hourly limit of +-600 queries (actual amount undisclosed).
"""
# Facebook.search(type=SEARCH) returns public posts + author.
# Facebook.search(type=NEWS) returns posts for the given author (id | alias | "me").
# Facebook.search(type=COMMENTS) returns comments for the given post id.
# Facebook.search(type=LIKES) returns authors for the given author, post or comments.
# An author is a Facebook user or other entity (e.g., a product page).
if type not in (SEARCH, NEWS, COMMENTS, LIKES, FRIENDS):
raise SearchEngineTypeError
if type in (SEARCH, NEWS):
max = 100
if type in (COMMENTS, LIKES):
max = 1000
if type in (FRIENDS,):
max = 10000
if not query or start < 1 or count < 1:
return Results(FACEBOOK, query, SEARCH)
if isinstance(query, FacebookResult):
query = query.id
# 1) Construct request URL.
if type == SEARCH:
url = FACEBOOK + type
url = URL(url, method=GET, query={
"q": query,
"type": "post",
"fields": ",".join(("id", "link", "message", "created_time", "from")),
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max),
})
if type in (NEWS, FEED, COMMENTS, LIKES, FRIENDS):
url = FACEBOOK + (u(query) or "me").replace(FACEBOOK, "") + "/" + type.replace("news", "feed")
url = URL(url, method=GET, query={
"access_token": self.license,
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max)
})
# 2) Parse JSON response.
kwargs.setdefault("cached", cached)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
except HTTP400BadRequest:
raise HTTP401Authentication
data = json.loads(data)
results = Results(FACEBOOK, query, SEARCH)
results.total = None
for x in data.get("data", []):
r = FacebookResult(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.text = self.format(x.get("story", x.get("message")))
r.date = self.format(x.get("created_time"))
# Store likes & comments count as int, author as (id, name)-tuple
# (by default Result will store everything as Unicode strings).
s = lambda r, k, v: dict.__setitem__(r, k, v)
s(r, "likes", \
self.format(x.get("like_count", x.get("likes", {}).get("count", 0))) + 0)
s(r, "comments", \
self.format(x.get("comments", {}).get("count", 0)) + 0)
s(r, "author", (
u(self.format(x.get("from", {}).get("id", ""))), \
u(self.format(x.get("from", {}).get("name", "")))))
# Replace Result.text with author name for likes.
if type in (LIKES, FRIENDS):
s(r, "author", (
u(self.format(x.get("id", ""))),
u(self.format(x.get("name", "")))))
r.text = \
self.format(x.get("name"))
# Replace Result.url Facebook URL with object id.
if r.url.startswith("http://www.facebook.com/photo"):
r.url = x.get("picture", r.url)
# Replace Result.url Facebook URL with full-size image.
if r.url.startswith("http://www.facebook.com/") and \
r.url.split("/")[-1].split("?")[0].isdigit():
r.url = r.url.split("/")[-1].split("?")[0].replace("_s", "_b")
results.append(r)
return results
def profile(self, id=None, **kwargs):
""" For the given author id or alias,
returns a (id, name, date of birth, gender, locale)-tuple.
"""
url = FACEBOOK + (u(id or "me")).replace(FACEBOOK, "")
url = URL(url, method=GET, query={"access_token": self.license})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise HTTP401Authentication
return (
u(data.get("id", "")),
u(data.get("name", "")),
u(data.get("birthday", "")),
u(data.get("gender", "")[:1]),
u(data.get("locale", ""))
)
PRODUCTWIKI = "http://api.productwiki.com/connect/api.aspx"
PRODUCTWIKI_LICENSE = api.license["Products"]
class Products(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or PRODUCTWIKI_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Productwiki for the given query.
Each Result.reviews is a list of (review, score)-items.
- type : SEARCH,
- start: maximum undefined,
- count: 20,
- sort : RELEVANCY.
There is no daily limit.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(PRODUCTWIKI, query, type)
# 1) Construct request URL.
url = PRODUCTWIKI+"?"
url = URL(url, method=GET, query={
"key": self.license or "",
"q": query,
"page" : start,
"op": "search",
"fields": "proscons", # "description,proscons" is heavy.
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = URL(url).download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(PRODUCTWIKI, query, type)
results.total = None
for x in data.get("products", [])[:count]:
r = Result(url=None)
r.__dict__["title"] = u(x.get("title"))
r.__dict__["text"] = u(x.get("text"))
r.__dict__["reviews"] = []
reviews = x.get("community_review") or {}
for p in reviews.get("pros", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or +1))
for p in reviews.get("cons", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or -1))
r.__dict__["score"] = int(sum(score for review, score in r.reviews))
results.append(r)
# Highest score first.
results.sort(key=lambda r: r.score, reverse=True)
return results
class Newsfeed(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
def search(self, query, type=NEWS, start=1, count=10, sort=LATEST, size=SMALL, cached=True, **kwargs):
""" Returns a list of results from the given RSS or Atom newsfeed URL.
"""
if type != NEWS:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(query, query, NEWS)
# 1) Construct request URL.
# 2) Parse RSS/Atom response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
tags = kwargs.pop("tags", [])
data = URL(query).download(cached=cached, **kwargs)
data = feedparser.parse(bytestring(data))
results = Results(query, query, NEWS)
results.total = None
for x in data["entries"][:count]:
s = "\n\n".join([v.get("value") for v in x.get("content", [])]) or x.get("summary")
r = Result(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(s)
r.date = self.format(x.get("updated"))
r.author = self.format(x.get("author"))
r.language = self.format(x.get("content") and \
x.get("content")[0].get("language") or \
data.get("language"))
for tag in tags:
# Parse custom tags.
# Newsfeed.search(tags=["dc:identifier"]) => Result.dc_identifier.
tag = tag.replace(":", "_")
r[tag] = self.format(x.get(tag))
results.append(r)
return results
feeds = {
"Nature": "http://feeds.nature.com/nature/rss/current",
"Science": "http://www.sciencemag.org/rss/podcast.xml",
"Herald Tribune": "http://www.iht.com/rss/frontpage.xml",
"TIME": "http://feeds.feedburner.com/time/topstories",
"CNN": "http://rss.cnn.com/rss/edition.rss",
}
def query(string, service=GOOGLE, **kwargs):
""" Returns the list of search query results from the given service.
For service=WIKIPEDIA, this is a single WikipediaArticle or None.
"""
service = service.lower()
if service in (GOOGLE, "google", "g"):
engine = Google
if service in (YAHOO, "yahoo", "y!"):
engine = Yahoo
if service in (BING, "bing"):
engine = Bing
if service in (TWITTER, "twitter"):
engine = Twitter
if service in (FACEBOOK, "facebook", "fb"):
engine = Facebook
if service in (WIKIA, "wikia"):
engine = Wikia
if service in (WIKIPEDIA, "wikipedia", "wp"):
engine = Wikipedia
if service in (FLICKR, "flickr"):
engine = Flickr
try:
kw = {}
for a in ("license", "throttle", "language"):
if a in kwargs:
kw[a] = kwargs.pop(a)
return engine(kw).search(string, **kwargs)
except UnboundLocalError:
raise SearchEngineError, "unknown search engine '%s'" % service
SERVICES = {
GOOGLE : Google,
YAHOO : Yahoo,
BING : Bing,
TWITTER : Twitter,
WIKIPEDIA : Wikipedia,
WIKIA : Wikia,
FLICKR : Flickr,
FACEBOOK : Facebook
}
def sort(terms=[], context="", service=GOOGLE, license=None, strict=True, reverse=False, **kwargs):
""" Returns a list of (percentage, term)-tuples for the given list of terms.
Sorts the terms in the list according to search result count.
When a context is defined, sorts according to relevancy to the context, e.g.:
sort(terms=["black", "green", "red"], context="Darth Vader") =>
yields "black" as the best candidate, because "black Darth Vader" is more common in search results.
- terms : list of search terms,
- context : term used for sorting,
- service : web service name (GOOGLE, YAHOO, BING),
- license : web service license id,
- strict : when True the query constructed from term + context is wrapped in quotes.
"""
service = SERVICES.get(service, SearchEngine)(license, language=kwargs.pop("language", None))
R = []
for word in terms:
q = reverse and context+" "+word or word+" "+context
q.strip()
q = strict and "\"%s\"" % q or q
r = service.search(q, count=1, **kwargs)
R.append(r)
s = float(sum([r.total or 1 for r in R])) or 1.0
R = [((r.total or 1)/s, r.query) for r in R]
R = sorted(R, reverse=True)
return R
SOUP = (
BeautifulSoup.BeautifulSoup,
BeautifulSoup.Tag,
BeautifulSoup.NavigableString,
BeautifulSoup.Comment
)
NODE, TEXT, COMMENT, ELEMENT, DOCUMENT = \
"node", "text", "comment", "element", "document"
class Node:
def __init__(self, html, type=NODE, **kwargs):
""" The base class for Text, Comment and Element.
All DOM nodes can be navigated in the same way (e.g. Node.parent, Node.children, ...)
"""
self.type = type
self._p = not isinstance(html, SOUP) and BeautifulSoup.BeautifulSoup(u(html), **kwargs) or html
@property
def _beautifulSoup(self):
# If you must, access the BeautifulSoup object with Node._beautifulSoup.
return self._p
def __eq__(self, other):
# Two Node objects containing the same BeautifulSoup object, are the same.
return isinstance(other, Node) and hash(self._p) == hash(other._p)
def _wrap(self, x):
# Navigating to other nodes yields either Text, Element or None.
if isinstance(x, BeautifulSoup.Comment):
return Comment(x)
if isinstance(x, BeautifulSoup.Declaration):
return Text(x)
if isinstance(x, BeautifulSoup.NavigableString):
return Text(x)
if isinstance(x, BeautifulSoup.Tag):
return Element(x)
@property
def parent(self):
return self._wrap(self._p.parent)
@property
def children(self):
return hasattr(self._p, "contents") and [self._wrap(x) for x in self._p.contents] or []
@property
def html(self):
return self.__unicode__()
@property
def source(self):
return self.__unicode__()
@property
def next_sibling(self):
return self._wrap(self._p.nextSibling)
@property
def previous_sibling(self):
return self._wrap(self._p.previousSibling)
next, previous = next_sibling, previous_sibling
def traverse(self, visit=lambda node: None):
""" Executes the visit function on this node and each of its child nodes.
"""
visit(self); [node.traverse(visit) for node in self.children]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def __getitem__(self, index):
return self.children[index]
def __repr__(self):
return "Node(type=%s)" % repr(self.type)
def __str__(self):
return bytestring(self.__unicode__())
def __unicode__(self):
return u(self._p)
class Text(Node):
""" Text represents a chunk of text without formatting in a HTML document.
For example: "the <b>cat</b>" is parsed to [Text("the"), Element("cat")].
"""
def __init__(self, string):
Node.__init__(self, string, type=TEXT)
def __repr__(self):
return "Text(%s)" % repr(self._p)
class Comment(Text):
""" Comment represents a comment in the HTML source code.
For example: "<!-- comment -->".
"""
def __init__(self, string):
Node.__init__(self, string, type=COMMENT)
def __repr__(self):
return "Comment(%s)" % repr(self._p)
class Element(Node):
def __init__(self, html):
""" Element represents an element or tag in the HTML source code.
For example: "<b>hello</b>" is a "b"-Element containing a child Text("hello").
"""
Node.__init__(self, html, type=ELEMENT)
@property
def tagname(self):
return self._p.name
tag = tagName = tagname
@property
def attributes(self):
return self._p._getAttrMap()
@property
def id(self):
return self.attributes.get("id")
def get_elements_by_tagname(self, v):
""" Returns a list of nested Elements with the given tag name.
The tag name can include a class (e.g. div.header) or an id (e.g. div#content).
"""
if isinstance(v, basestring) and "#" in v:
v1, v2 = v.split("#")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, id=v2)]
if isinstance(v, basestring) and "." in v:
v1, v2 = v.split(".")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, v2)]
return [Element(x) for x in self._p.findAll(v in ("*","") or v.lower())]
by_tag = getElementsByTagname = get_elements_by_tagname
def get_element_by_id(self, v):
""" Returns the first nested Element with the given id attribute value.
"""
return ([Element(x) for x in self._p.findAll(id=v, limit=1) or []]+[None])[0]
by_id = getElementById = get_element_by_id
def get_elements_by_classname(self, v):
""" Returns a list of nested Elements with the given class attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, v))]
by_class = getElementsByClassname = get_elements_by_classname
def get_elements_by_attribute(self, **kwargs):
""" Returns a list of nested Elements with the given attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, attrs=kwargs))]
by_attribute = getElementsByAttribute = get_elements_by_attribute
@property
def content(self):
""" Yields the element content as a unicode string.
"""
return u"".join([u(x) for x in self._p.contents])
@property
def source(self):
""" Yields the HTML source as a unicode string (tag + content).
"""
return u(self._p)
html = source
def __getattr__(self, k):
if k in self.__dict__:
return self.__dict__[k]
if k in self.attributes:
return self.attributes[k]
raise AttributeError, "'Element' object has no attribute '%s'" % k
def __repr__(self):
return "Element(tag='%s')" % bytestring(self.tagname)
class Document(Element):
def __init__(self, html, **kwargs):
""" Document is the top-level element in the Document Object Model.
It contains nested Element, Text and Comment nodes.
"""
# Aliases for BeautifulSoup optional parameters:
kwargs["selfClosingTags"] = kwargs.pop("self_closing", kwargs.get("selfClosingTags"))
Node.__init__(self, u(html).strip(), type=DOCUMENT, **kwargs)
@property
def declaration(self):
""" Yields the <!doctype> declaration, as a TEXT Node or None.
"""
for child in self.children:
if isinstance(child._p, BeautifulSoup.Declaration):
return child
@property
def head(self):
return self._wrap(self._p.head)
@property
def body(self):
return self._wrap(self._p.body)
@property
def tagname(self):
return None
tag = tagname
def __repr__(self):
return "Document()"
DOM = Document
class Link:
def __init__(self, url, text="", relation="", referrer=""):
""" A hyperlink parsed from a HTML document, in the form:
<a href="url"", title="text", rel="relation">xxx</a>.
"""
self.url, self.text, self.relation, self.referrer = \
u(url), u(text), u(relation), u(referrer),
@property
def description(self):
return self.text
def __repr__(self):
return "Link(url=%s)" % repr(self.url)
# Used for sorting in Spider.links:
def __eq__(self, link):
return self.url == link.url
def __ne__(self, link):
return self.url != link.url
def __lt__(self, link):
return self.url < link.url
def __gt__(self, link):
return self.url > link.url
class HTMLLinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def parse(self, html, url=""):
""" Returns a list of Links parsed from the given HTML string.
"""
if html is None:
return None
self._url = url
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return self._data
def handle_starttag(self, tag, attributes):
if tag == "a":
attributes = dict(attributes)
if "href" in attributes:
link = Link(url = attributes.get("href"),
text = attributes.get("title"),
relation = attributes.get("rel", ""),
referrer = self._url)
self._data.append(link)
def base(url):
""" Returns the URL domain name:
http://en.wikipedia.org/wiki/Web_crawler => en.wikipedia.org
"""
return urlparse.urlparse(url).netloc
def abs(url, base=None):
""" Returns the absolute URL:
../media + http://en.wikipedia.org/wiki/ => http://en.wikipedia.org/media
"""
if url.startswith("#") and not base is None and not base.endswith("/"):
if not re.search("[^/]/[^/]", base):
base += "/"
return urlparse.urljoin(base, url)
DEPTH = "depth"
BREADTH = "breadth"
FIFO = "fifo" # First In, First Out.
FILO = "filo" # First In, Last Out.
LIFO = "lifo" # Last In, First Out (= FILO).
class Spider:
def __init__(self, links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO):
""" A spider can be used to browse the web in an automated manner.
It visits the list of starting URLs, parses links from their content, visits those, etc.
- Links can be prioritized by overriding Spider.priority().
- Links can be ignored by overriding Spider.follow().
- Each visited link is passed to Spider.visit(), which can be overridden.
"""
self.parse = parser
self.delay = delay # Delay between visits to the same (sub)domain.
self.domains = domains # Domains the spider is allowed to visit.
self.history = {} # Domain name => time last visited.
self.visited = {} # URLs visited.
self._queue = [] # URLs scheduled for a visit: (priority, time, Link).
self._queued = {} # URLs scheduled so far, lookup dictionary.
self.QUEUE = 10000 # Increase or decrease according to available memory.
self.sort = sort
# Queue given links in given order:
for link in (isinstance(links, basestring) and [links] or links):
self.push(link, priority=1.0, sort=FIFO)
@property
def done(self):
""" Yields True if no further links are scheduled to visit.
"""
return len(self._queue) == 0
def push(self, link, priority=1.0, sort=FILO):
""" Pushes the given link to the queue.
Position in the queue is determined by priority.
Equal ranks are sorted FIFO or FILO.
With priority=1.0 and FILO, the link is inserted to the queue.
With priority=0.0 and FIFO, the link is appended to the queue.
"""
if not isinstance(link, Link):
link = Link(url=link)
dt = time.time()
dt = sort == FIFO and dt or 1 / dt
bisect.insort(self._queue, (1 - priority, dt, link))
self._queued[link.url] = True
def pop(self, remove=True):
""" Returns the next Link queued to visit and removes it from the queue.
Links on a recently visited (sub)domain are skipped until Spider.delay has elapsed.
"""
now = time.time()
for i, (priority, dt, link) in enumerate(self._queue):
if self.delay <= now - self.history.get(base(link.url), 0):
if remove is True:
self._queue.pop(i)
self._queued.pop(link.url, None)
return link
@property
def next(self):
""" Returns the next Link queued to visit (without removing it).
"""
return self.pop(remove=False)
def crawl(self, method=DEPTH, **kwargs):
""" Visits the next link in Spider._queue.
If the link is on a domain recently visited (< Spider.delay) it is skipped.
Parses the content at the link for new links and adds them to the queue,
according to their Spider.priority().
Visited links (and content) are passed to Spider.visit().
"""
link = self.pop()
if link is None:
return False
if link.url not in self.visited:
t = time.time()
url = URL(link.url)
if url.mimetype == "text/html":
try:
kwargs.setdefault("unicode", True)
html = url.download(**kwargs)
for new in self.parse(html, url=link.url):
new.url = abs(new.url, base=url.redirect or link.url)
new.url = self.normalize(new.url)
# 1) Parse new links from HTML web pages.
# 2) Schedule unknown links for a visit.
# 3) Only links that are not already queued are queued.
# 4) Only links for which Spider.follow() is True are queued.
# 5) Only links on Spider.domains are queued.
if new.url in self.visited:
continue
if new.url in self._queued:
continue
if self.follow(new) is False:
continue
if self.domains and not base(new.url).endswith(tuple(self.domains)):
continue
# 6) Limit the queue (remove tail), unless you are Google.
if self.QUEUE is not None and \
self.QUEUE * 1.25 < len(self._queue):
self._queue = self._queue[:self.QUEUE]
self._queued.clear()
self._queued.update(dict((q[2].url, True) for q in self._queue))
# 7) Position in the queue is determined by Spider.priority().
# 8) Equal ranks are sorted FIFO or FILO.
self.push(new, priority=self.priority(new, method=method), sort=self.sort)
self.visit(link, source=html)
except URLError:
# URL can not be reached (HTTP404NotFound, URLTimeout).
self.fail(link)
else:
# URL MIME-type is not HTML, don't know how to handle.
self.fail(link)
# Log the current time visited for the domain (see Spider.pop()).
# Log the URL as visited.
self.history[base(link.url)] = time.time()
self.visited[link.url] = True
return True
# Nothing happened, we already visited this link.
return False
def normalize(self, url):
""" Called from Spider.crawl() to normalize URLs.
For example: return url.split("?")[0]
"""
# All links pass through here (visited or not).
# This can be a place to count backlinks.
return url
def follow(self, link):
""" Called from Spider.crawl() to determine if it should follow this link.
For example: return "nofollow" not in link.relation
"""
return True
def priority(self, link, method=DEPTH):
""" Called from Spider.crawl() to determine the priority of this link,
as a number between 0.0-1.0. Links with higher priority are visited first.
"""
# Depth-first search dislikes external links to other (sub)domains.
external = base(link.url) != base(link.referrer)
if external is True:
if method == DEPTH:
return 0.75
if method == BREADTH:
return 0.85
return 0.80
def visit(self, link, source=None):
""" Called from Spider.crawl() when the link is crawled.
When source=None, the link is not a web page (and was not parsed),
or possibly a URLTimeout occured (content size too big).
"""
pass
def fail(self, link):
""" Called from Spider.crawl() for link whose MIME-type could not be determined,
or which raised a URLError on download.
"""
pass
Crawler = Spider
def crawl(links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO, method=DEPTH, **kwargs):
""" Returns a generator that yields (Link, source)-tuples of visited pages.
When the crawler is busy, it yields (None, None).
When the crawler is done, it yields None.
"""
# The scenarios below defines "busy":
# - crawl(delay=10, throttle=0)
# The crawler will wait 10 seconds before visiting the same subdomain.
# The crawler will not throttle downloads, so the next link is visited instantly.
# So sometimes (None, None) is returned while it waits for an available subdomain.
# - crawl(delay=0, throttle=10)
# The crawler will halt 10 seconds after each visit.
# The crawler will not delay before visiting the same subdomain.
# So usually a result is returned each crawl.next(), but each call takes 10 seconds.
# - asynchronous(crawl().next)
# AsynchronousRequest.value is set to (Link, source) once AsynchronousRequest.done=True.
# The program will not halt in the meantime (i.e., the next crawl is threaded).
crawler = Crawler(links, domains, delay, parser, sort)
bind(crawler, "visit", \
lambda crawler, link, source=None: \
setattr(crawler, "crawled", (link, source))) # Define Crawler.visit() on-the-fly.
while not crawler.done:
crawler.crawled = (None, None)
crawler.crawl(method, **kwargs)
yield crawler.crawled
class PDFParseError(Exception):
pass
class PDF:
def __init__(self, data, format=None):
""" Plaintext parsed from the given PDF data.
"""
self.content = self._parse(data, format)
@property
def string(self):
return self.content
def __unicode__(self):
return self.content
def _parse(self, data, format=None):
# The output will be ugly: it may be useful for mining but probably not for displaying.
# You can also try PDF(data, format="html") to preserve some layout information.
from pdf.pdfinterp import PDFResourceManager, process_pdf
from pdf.converter import TextConverter, HTMLConverter
from pdf.layout import LAParams
s = ""
m = PDFResourceManager()
try:
# Given data is a PDF file path.
data = os.path.exists(data) and open(data) or StringIO.StringIO(data)
except TypeError:
# Given data is a PDF string.
data = StringIO.StringIO(data)
try:
stream = StringIO.StringIO()
parser = format=="html" and HTMLConverter or TextConverter
parser = parser(m, stream, codec="utf-8", laparams=LAParams())
process_pdf(m, parser, data, set(), maxpages=0, password="")
except Exception, e:
raise PDFParseError, str(e)
s = stream.getvalue()
s = decode_utf8(s)
s = s.strip()
s = re.sub(r"([a-z])\-\n", "\\1", s) # Join hyphenated words.
s = s.replace("\n\n", "<!-- paragraph -->") # Preserve paragraph spacing.
s = s.replace("\n", " ")
s = s.replace("<!-- paragraph -->", "\n\n")
s = collapse_spaces(s)
return s
|
import os
shoop1=[
("家电类"),
("衣服类"),
("手机类"),
("车类"),
]
jiadianshoop=[
("电冰箱",20000),
("彩电",2000),
("洗衣机",400),
("脸盆",30),
("牙刷",50)
]
flag=True
long=len(jiadianshoop)
def f():
#重复代码
flag=True
long=len(jiadianshoop)
while flag:
for i in enumerate(shoop1):
weizhi=i[0]
shangping=i[1]
print(weizhi,shangping)
choose=input("请选择你要购买的商品类别")
choose=choose.strip()
if choose.isdigit():
choose=int(choose)
if choose<len(shoop1):
print("你选择范围正确")
if choose==0:
print("你选择了家电类")
while flag:
for i in enumerate(jiadianshoop):
weizhi=i[0]
wuping=i[1][0]
jiage=i[1][1]
print(weizhi,wuping,jiage)
choose2=input("请选择你要购买的物品,要结算请在物品编号最后加j,例如1,2,3,j 不结算的就不加j:")
choose2.strip()
#choose2=int(choose2)
if choose2=="q":
print("谢谢光临,欢迎下次再来")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose2=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
l=list(choose2)
end=choose2[-1]
if end=="j":
print("你选择了结算,马上结算")
for i in choose2.split(","):
if i!="j":
i=int(i)
if i<long:
#print("你输入的商品号合法")
jiage=jiadianshoop[i][1]
if jiage<=cash:
print("你的余额足够")
cash=cash-jiage #扣钱
wuping=jiadianshoop[i][0]
n=open(user,"a") #购买的历史物品按用户名历史保存
n.write(wuping+"\n")
n.close()
cash=str(cash) #转换成str形势保存
q=open("cash"+user,"w") #账户的余额历史保存
q.write(cash)
q.close()
cash=int(cash) #存好了转换会int
print("你已经成功购买,现在账户余额还剩%d元"%(cash))
#生成之前购买记录
wupingdeqjiege=jiadianshoop[i][1]
wupingdeqjiege=str(wupingdeqjiege)
wuping=str(wuping)
jilu=open("jilu"+user,"a")
#time=time.strftime('%Y-%m-%d %H:%M:%S')
#print(time)
#time=str(time)
jilu.write(wuping+" "+wupingdeqjiege+"\n")
jilu.close()
else:
print("你的余额不足,请充值")
else:
print("你输入的商品号不合法,重新输入")
elif choose==1:
print("你选择了衣服类")
elif choose==2:
print("你选择了手机类")
elif choose==3:
print("你选择了车类")
elif choose=="q":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
else:
print("你选择的商品编号不在范围之内")
elif choose=="q":
print("bye")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
else:
print("你选择的不是菜单")
#重复代码
while True:
user=input("请输入用户名:")
passwd=input("请输入密码:")
user=user.strip()
passwd=passwd.strip()
#判断用户名是否注册过
if os.path.exists(user+".txt"):
#print("你是我们网站的会员")
now_user=open(user+".txt").read()
now_passwd=open(user+".pass.txt").read()
if user==now_user and passwd == now_passwd:
print("你输入的账户密码正确,成功登陆")
if os.path.exists("cash"+user):
cash=open("cash"+user).read()
cash=int(cash)
print("欢迎回来,你是我们网站的会员,你的余额还有%d元,是否要继续充值?,选Y/N"%(cash))
choose=input("Y/N")
if choose=="y" or choose=="Y":
print("你选择了充值")
jiaqian=input("请输入你要充值的金额;")
jiaqian=jiaqian.strip()
if jiaqian.isdigit():
jiaqian=int(jiaqian)
print("你输入的金额合法")
cash=jiaqian+cash
cash=str(cash)
n=open("cash"+user,"w")
n.write(cash)
n.close()
cash=int(cash)
print("充值成功,你现在账户余额为%d"%(cash))
#重复代码
while flag:
for i in enumerate(shoop1):
weizhi=i[0]
shangping=i[1]
print(weizhi,shangping)
choose=input("请选择你要购买的商品类别")
choose=choose.strip()
if choose.isdigit():
choose=int(choose)
if choose<len(shoop1):
print("你选择范围正确")
if choose==0:
print("你选择了家电类")
while flag:
for i in enumerate(jiadianshoop):
weizhi=i[0]
wuping=i[1][0]
jiage=i[1][1]
print(weizhi,wuping,jiage)
choose2=input("请选择你要购买的物品,要结算请在物品编号最后加j,例如1,2,3,j 不结算的就不加j:")
choose2.strip()
#choose2=int(choose2)
if choose2=="q":
print("谢谢光临,欢迎下次再来")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose2=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
l=list(choose2)
end=choose2[-1]
if end=="j":
print("你选择了结算,马上结算")
for i in choose2.split(","):
if i!="j":
i=int(i)
if i<long:
#print("你输入的商品号合法")
jiage=jiadianshoop[i][1]
if jiage<=cash:
print("你的余额足够")
cash=cash-jiage #扣钱
wuping=jiadianshoop[i][0]
n=open(user,"a") #购买的历史物品按用户名历史保存
n.write(wuping+"\n")
n.close()
cash=str(cash) #转换成str形势保存
q=open("cash"+user,"w") #账户的余额历史保存
q.write(cash)
q.close()
cash=int(cash) #存好了转换会int
print("你已经成功购买,现在账户余额还剩%d元"%(cash))
#生成之前购买记录
wupingdeqjiege=jiadianshoop[i][1]
wupingdeqjiege=str(wupingdeqjiege)
wuping=str(wuping)
jilu=open("jilu"+user,"a")
#time=time.strftime('%Y-%m-%d %H:%M:%S')
#print(time)
#time=str(time)
jilu.write(wuping+" "+wupingdeqjiege+"\n")
jilu.close()
else:
print("你的余额不足,请充值")
else:
print("你输入的商品号不合法,重新输入")
elif choose==1:
print("你选择了衣服类")
elif choose==2:
print("你选择了手机类")
elif choose==3:
print("你选择了车类")
elif choose=="q":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
else:
print("你选择的商品编号不在范围之内")
elif choose=="q":
print("bye")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
else:
print("你选择的不是菜单")
#重复代码
else:
print("你输入的金额不合法")
elif choose=="n" or choose=="N":
print("你选择了不充值")
f()
else:
print("你输入的选择不合法")
else:
cash=input("请输入你要充值的金额:")
cash=cash.strip()
if cash.isdigit():
cash=int(cash)
print("你输入的金额格式正确")
else:
exit("你输入的金额格式不正确")
while flag:
for i in enumerate(shoop1):
weizhi=i[0]
shangping=i[1]
print(weizhi,shangping)
choose=input("请选择你要购买的商品类别")
choose=choose.strip()
if choose.isdigit():
choose=int(choose)
if choose<len(shoop1):
print("你选择范围正确")
if choose==0:
print("你选择了家电类")
while flag:
for i in enumerate(jiadianshoop):
weizhi=i[0]
wuping=i[1][0]
jiage=i[1][1]
print(weizhi,wuping,jiage)
choose2=input("请选择你要购买的物品,要结算请在物品编号最后加j,例如1,2,3,j 不结算的就不加j:")
choose2.strip()
#choose2=int(choose2)
if choose2=="q":
print("谢谢光临,欢迎下次再来")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose2=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
l=list(choose2)
end=choose2[-1]
if end=="j":
print("你选择了结算,马上结算")
for i in choose2.split(","):
if i!="j":
i=int(i)
if i<long:
#print("你输入的商品号合法")
jiage=jiadianshoop[i][1]
if jiage<=cash:
print("你的余额足够")
cash=cash-jiage #扣钱
wuping=jiadianshoop[i][0]
n=open(user,"a") #购买的历史物品按用户名历史保存
n.write(wuping+"\n")
n.close()
cash=str(cash) #转换成str形势保存
q=open("cash"+user,"w") #账户的余额历史保存
q.write(cash)
q.close()
cash=int(cash) #存好了转换会int
print("你已经成功购买,现在账户余额还剩%d元"%(cash))
#生成之前购买记录
wupingdeqjiege=jiadianshoop[i][1]
wupingdeqjiege=str(wupingdeqjiege)
wuping=str(wuping)
jilu=open("jilu"+user,"a")
#time=time.strftime('%Y-%m-%d %H:%M:%S')
#print(time)
#time=str(time)
jilu.write(wuping+" "+wupingdeqjiege+"\n")
jilu.close()
else:
print("你的余额不足,请充值")
else:
print("你输入的商品号不合法,重新输入")
elif choose==1:
print("你选择了衣服类")
elif choose==2:
print("你选择了手机类")
elif choose==3:
print("你选择了车类")
elif choose=="q":
print("bye")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
else:
print("你选择的商品编号不在范围之内")
elif choose=="q" :
print("bye")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
else:
print("你选择的不是菜单里面的内容")
else:
print("你输入的密码不正确,请重新输入")
else:
#写进用户名
n=open(user+".txt","w")
n.write(user)
n.close()
now_user=open(user+".txt").read()
#写进用户名结束
#写进密码
n=open(user+".pass.txt","w")
n.write(passwd)
n.close()
now_passwd=open(user+".pass.txt").read()
#写进密码结束
if os.path.exists("cash"+user):
cash=open("cash"+user).read()
cash=int(cash)
print("欢迎回来,你是我们网站的会员,你的余额还有%d元,是否要继续充值?,选Y/N"%(cash))
choose=input("Y/N")
if choose=="y" or choose=="Y":
print("你选择了充值")
jiaqian=input("请输入你要充值的金额;")
jiaqian=jiaqian.strip()
if jiaqian.isdigit():
jiaqian=int(jiaqian)
print("你输入的金额合法")
cash=jiaqian+cash
cash=str(cash)
n=open("cash"+user,"w")
n.write(cash)
n.close()
cash=int(cash)
print("充值成功,你现在账户余额为%d"%(cash))
#重复代码
while flag:
for i in enumerate(shoop1):
weizhi=i[0]
shangping=i[1]
print(weizhi,shangping)
choose=input("请选择你要购买的商品类别")
choose=choose.strip()
if choose.isdigit():
choose=int(choose)
if choose<len(shoop1):
print("你选择范围正确")
if choose==0:
print("你选择了家电类")
while flag:
for i in enumerate(jiadianshoop):
weizhi=i[0]
wuping=i[1][0]
jiage=i[1][1]
print(weizhi,wuping,jiage)
choose2=input("请选择你要购买的物品,要结算请在物品编号最后加j,例如1,2,3,j 不结算的就不加j:")
choose2.strip()
#choose2=int(choose2)
if choose2=="q":
print("谢谢光临,欢迎下次再来")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose2=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
l=list(choose2)
end=choose2[-1]
if end=="j":
print("你选择了结算,马上结算")
for i in choose2.split(","):
if i!="j":
i=int(i)
if i<long:
#print("你输入的商品号合法")
jiage=jiadianshoop[i][1]
if jiage<=cash:
print("你的余额足够")
cash=cash-jiage #扣钱
wuping=jiadianshoop[i][0]
n=open(user,"a") #购买的历史物品按用户名历史保存
n.write(wuping+"\n")
n.close()
cash=str(cash) #转换成str形势保存
q=open("cash"+user,"w") #账户的余额历史保存
q.write(cash)
q.close()
cash=int(cash) #存好了转换会int
print("你已经成功购买,现在账户余额还剩%d元"%(cash))
#生成之前购买记录
wupingdeqjiege=jiadianshoop[i][1]
wupingdeqjiege=str(wupingdeqjiege)
wuping=str(wuping)
jilu=open("jilu"+user,"a")
#time=time.strftime('%Y-%m-%d %H:%M:%S')
#print(time)
#time=str(time)
jilu.write(wuping+" "+wupingdeqjiege+"\n")
jilu.close()
else:
print("你的余额不足,请充值")
else:
print("你输入的商品号不合法,重新输入")
elif choose==1:
print("你选择了衣服类")
elif choose==2:
print("你选择了手机类")
elif choose==3:
print("你选择了车类")
elif choose=="q":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
else:
print("你选择的商品编号不在范围之内")
elif choose=="q":
print("bye")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
else:
print("你选择的不是菜单")
#重复代码
else:
print("你输入的金额不合法")
elif choose=="n" or choose=="N":
print("你选择了不充值")
else:
print("你输入的选择不合法")
else:
cash=input("请输入你要充值的金额:")
cash=cash.strip()
if cash.isdigit():
cash=int(cash)
print("你输入的金额格式正确")
else:
exit("你输入的金额格式不正确")
while flag:
for i in enumerate(shoop1):
weizhi=i[0]
shangping=i[1]
print(weizhi,shangping)
choose=input("请选择你要购买的商品类别")
choose=choose.strip()
if choose.isdigit():
choose=int(choose)
if choose<len(shoop1):
print("你选择范围正确")
if choose==0:
print("你选择了家电类")
while flag:
for i in enumerate(jiadianshoop):
weizhi=i[0]
wuping=i[1][0]
jiage=i[1][1]
print(weizhi,wuping,jiage)
choose2=input("请选择你要购买的物品,要结算请在物品编号最后加j,例如1,2,3,j 不结算的就不加j:")
choose2.strip()
#choose2=int(choose2)
if choose2=="q":
print("谢谢光临,欢迎下次再来")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
elif choose2=="c":
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
break
l=list(choose2)
end=choose2[-1]
if end=="j":
print("你选择了结算,马上结算")
for i in choose2.split(","):
if i!="j":
i=int(i)
if i<long:
#print("你输入的商品号合法")
jiage=jiadianshoop[i][1]
if jiage<=cash:
print("你的余额足够")
cash=cash-jiage #扣钱
wuping=jiadianshoop[i][0]
n=open(user,"a") #购买的历史物品按用户名历史保存
n.write(wuping+"\n")
n.close()
cash=str(cash) #转换成str形势保存
q=open("cash"+user,"w") #账户的余额历史保存
q.write(cash)
q.close()
cash=int(cash) #存好了转换会int
print("你已经成功购买,现在账户余额还剩%d元"%(cash))
#生成之前购买记录
wupingdeqjiege=jiadianshoop[i][1]
wupingdeqjiege=str(wupingdeqjiege)
wuping=str(wuping)
jilu=open("jilu"+user,"a")
#time=time.strftime('%Y-%m-%d %H:%M:%S')
#print(time)
#time=str(time)
jilu.write(wuping+" "+wupingdeqjiege+"\n")
jilu.close()
else:
print("你的余额不足,请充值")
else:
print("你输入的商品号不合法,重新输入")
elif choose==1:
print("你选择了衣服类")
elif choose==2:
print("你选择了手机类")
elif choose==3:
print("你选择了车类")
elif choose=="q":
print("bye")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
else:
print("你选择的商品编号不在范围之内")
elif choose=="q" :
print("bye")
if os.path.exists("jilu"+user):
x=open("jilu"+user).read()
print(x)
else:
print("你还没购物")
flag=False
break
else:
print("你选择的不是菜单里面的内容")
|
import os
import numpy as np
import sympy
import cirq
import recirq
@recirq.json_serializable_dataclass(namespace='recirq.readout_scan',
registry=recirq.Registry,
frozen=True)
class ReadoutScanTask:
"""Scan over Ry(theta) angles from -pi/2 to 3pi/2 tracing out a sinusoid
which is primarily affected by readout error.
See Also:
:py:func:`run_readout_scan`
Attributes:
dataset_id: A unique identifier for this dataset.
device_name: The device to run on, by name.
n_shots: The number of repetitions for each theta value.
qubit: The qubit to benchmark.
resolution_factor: We select the number of points in the linspace
so that the special points: (-1/2, 0, 1/2, 1, 3/2) * pi are
always included. The total number of theta evaluations
is resolution_factor * 4 + 1.
"""
dataset_id: str
device_name: str
n_shots: int
qubit: cirq.GridQubit
resolution_factor: int
@property
def fn(self):
n_shots = _abbrev_n_shots(n_shots=self.n_shots)
qubit = _abbrev_grid_qubit(self.qubit)
return (f'{self.dataset_id}/'
f'{self.device_name}/'
f'q-{qubit}/'
f'ry_scan_{self.resolution_factor}_{n_shots}')
def _abbrev_n_shots(n_shots: int) -> str:
"""Shorter n_shots component of a filename"""
if n_shots % 1000 == 0:
return f'{n_shots // 1000}k'
return str(n_shots)
def _abbrev_grid_qubit(qubit: cirq.GridQubit) -> str:
"""Formatted grid_qubit component of a filename"""
return f'{qubit.row}_{qubit.col}'
EXPERIMENT_NAME = 'readout-scan'
DEFAULT_BASE_DIR = os.path.expanduser(f'~/cirq-results/{EXPERIMENT_NAME}')
def run_readout_scan(task: ReadoutScanTask,
base_dir=None):
"""Execute a :py:class:`ReadoutScanTask` task."""
if base_dir is None:
base_dir = DEFAULT_BASE_DIR
if recirq.exists(task, base_dir=base_dir):
print(f"{task} already exists. Skipping.")
return
# Create a simple circuit
theta = sympy.Symbol('theta')
circuit = cirq.Circuit([
cirq.ry(theta).on(task.qubit),
cirq.measure(task.qubit, key='z')
])
# Use utilities to map sampler names to Sampler objects
sampler = recirq.get_sampler_by_name(device_name=task.device_name)
# Use a sweep over theta values.
# Set up limits so we include (-1/2, 0, 1/2, 1, 3/2) * pi
# The total number of points is resolution_factor * 4 + 1
n_special_points: int = 5
resolution_factor = task.resolution_factor
theta_sweep = cirq.Linspace(theta, -np.pi / 2, 3 * np.pi / 2,
resolution_factor * (n_special_points - 1) + 1)
thetas = np.asarray([v for ((k, v),) in theta_sweep.param_tuples()])
flat_circuit, flat_sweep = cirq.flatten_with_sweep(circuit, theta_sweep)
# Run the jobs
print(f"Collecting data for {task.qubit}", flush=True)
results = sampler.run_sweep(program=flat_circuit, params=flat_sweep,
repetitions=task.n_shots)
# Save the results
recirq.save(task=task, data={
'thetas': thetas,
'all_bitstrings': [
recirq.BitArray(np.asarray(r.measurements['z']))
for r in results]
}, base_dir=base_dir)
|
from symbol.builder import add_anchor_to_arg
from models.FPN.builder import MSRAResNet50V1FPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
from models.msrcnn.builder import MaskScoringFasterRcnn as Detector
from models.msrcnn.builder import MaskFPNRpnHead as RpnHead
from models.msrcnn.builder import MaskFasterRcnn4ConvHead as MaskHead
from models.maskrcnn.builder import BboxPostProcessor
from models.maskrcnn.process_output import process_output
from models.msrcnn.builder import MaskIoUConvHead as MaskIoUHead
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class MaskParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
resolution = 28
dim_reduced = 256
num_fg_roi = int(RpnParam.subsample_proposal.image_roi * RpnParam.subsample_proposal.fg_fraction)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
img_roi = 1000
class MaskRoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 14
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
img_roi = 100
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
mult = 1
begin_epoch = 0
end_epoch = 6 * mult
lr_iter = [60000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: process_output(x, y)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam, MaskParam)
roi_extractor = RoiExtractor(RoiParam)
mask_roi_extractor = RoiExtractor(MaskRoiParam)
bbox_head = BboxHead(BboxParam)
mask_head = MaskHead(BboxParam, MaskParam, MaskRoiParam)
bbox_post_processer = BboxPostProcessor(TestParam)
maskiou_head = MaskIoUHead(TestParam, BboxParam, MaskParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, maskiou_head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, maskiou_head, bbox_post_processer)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet-v1-50"
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
excluded_param = ["mask_fcn"]
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
# data processing
class NormParam:
mean = (122.7717, 115.9465, 102.9801) # RGB order
std = (1.0, 1.0, 1.0)
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
max_len_gt_poly = 2500
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage, Pad2DImage
from models.maskrcnn.input import PreprocessGtPoly, EncodeGtPoly, \
Resize2DImageBboxMask, Flip2DImageBboxMask, Pad2DImageBboxMask
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
PreprocessGtPoly(),
Resize2DImageBboxMask(ResizeParam),
Flip2DImageBboxMask(),
EncodeGtPoly(PadParam),
Pad2DImageBboxMask(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["im_info", "gt_bbox", "gt_poly"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
from models.msrcnn.metric import SigmoidCELossMetric, L2
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
mask_cls_metric = SigmoidCELossMetric(
"MaskCE",
["mask_loss_output"],
[]
)
iou_l2_metric = L2(
"IoUL2",
["iou_head_loss_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric, mask_cls_metric, iou_l2_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
|
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
notebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['user_keyname'] = os.environ['edge_user_name']
notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'],
notebook_config['exploratory_name'], args.uuid)
notebook_config['expected_image_name'] = '{}-{}-notebook-image'.format(notebook_config['service_base_name'],
os.environ['application'])
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
.format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['edge_user_name'])
notebook_config['security_group_name'] = '{}-{}-nb-SG'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'])
notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
# generating variables regarding EDGE proxy on Notebook instance
instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
edge_instance_name = os.environ['conf_service_base_name'] + "-" + os.environ['edge_user_name'] + '-edge'
edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
try:
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
notebook_config['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed creating ssh user 'dlab'.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
try:
logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure proxy.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
try:
logging.info('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} " \
"--keyfile {} " \
"--region {} " \
"--spark_version {} " \
"--hadoop_version {} " \
"--os_user {} " \
"--scala_version {} " \
"--r_mirror {} " \
"--exploratory_name {}".\
format(instance_hostname,
keyfile_name,
os.environ['aws_region'],
os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'],
notebook_config['dlab_ssh_user'],
os.environ['notebook_scala_version'],
os.environ['notebook_r_mirror'],
notebook_config['exploratory_name'])
try:
local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure jupyter.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": notebook_config['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
append_result("Failed installing users key.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP USER GIT CREDENTIALS]')
logging.info('[SETUP USER GIT CREDENTIALS]')
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
try:
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
append_result("Failed setup git credentials")
raise Exception
except Exception as err:
append_result("Failed to setup git credentials.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[POST CONFIGURING PROCESS]')
print('[POST CONFIGURING PROCESS')
if notebook_config['notebook_image_name'] not in [notebook_config['expected_image_name'], 'None']:
params = "--hostname {} --keyfile {} --os_user {} --nb_tag_name {} --nb_tag_value {}" \
.format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
notebook_config['tag_name'], notebook_config['instance_name'])
try:
local("~/scripts/{}.py {}".format('common_remove_remote_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to post configuring instance.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
additional_info = {
'instance_hostname': instance_hostname,
'tensor': False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(edge_instance_hostname,
keyfile_name,
notebook_config['dlab_ssh_user'],
'jupyter',
notebook_config['exploratory_name'],
json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
append_result("Failed to set edge reverse proxy template.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['shared_image_enabled'] == 'true':
try:
print('[CREATING AMI]')
ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '':
print("Looks like it's first time we configure notebook server. Creating image.")
image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
instance_name=notebook_config['instance_name'],
image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
append_result("Failed creating image.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['instance_name']))
print("Private DNS: {}".format(dns_name))
print("Private IP: {}".format(ip_address))
print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
print("Instance type: {}".format(notebook_config['instance_type']))
print("Key name: {}".format(notebook_config['key_name']))
print("User key name: {}".format(notebook_config['user_keyname']))
print("Image name: {}".format(notebook_config['notebook_image_name']))
print("Profile name: {}".format(notebook_config['role_profile_name']))
print("SG name: {}".format(notebook_config['security_group_name']))
print("Jupyter URL: {}".format(jupyter_ip_url))
print("Jupyter URL: {}".format(jupyter_dns_url))
print("Ungit URL: {}".format(ungit_ip_url))
print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
with open("/root/result.json", 'w') as result:
res = {"hostname": dns_name,
"ip": ip_address,
"instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
"master_keyname": os.environ['conf_key_name'],
"notebook_name": notebook_config['instance_name'],
"notebook_image_name": notebook_config['notebook_image_name'],
"Action": "Create new notebook server",
"exploratory_url": [
{"description": "Jupyter",
"url": jupyter_notebook_acces_url},
{"description": "Ungit",
"url": jupyter_ungit_acces_url},
{"description": "Jupyter (via tunnel)",
"url": jupyter_ip_url},
{"description": "Ungit (via tunnel)",
"url": ungit_ip_url}
]}
result.write(json.dumps(res))
|
"""
Module used after C{%(destdir)s} has been finalized to create the
initial packaging. Also contains error reporting.
"""
import codecs
import imp
import itertools
import os
import re
import site
import sre_constants
import stat
import subprocess
import sys
from conary import files, trove
from conary.build import buildpackage, filter, policy, recipe, tags, use
from conary.build import smartform
from conary.deps import deps
from conary.lib import elf, magic, util, pydeps, fixedglob, graph
from conary.build.action import TARGET_LINUX
from conary.build.action import TARGET_WINDOWS
try:
from xml.etree import ElementTree
except ImportError:
try:
from elementtree import ElementTree
except ImportError:
ElementTree = None
class _DatabaseDepCache(object):
__slots__ = ['db', 'cache']
def __init__(self, db):
self.db = db
self.cache = {}
def getProvides(self, depSetList):
ret = {}
missing = []
for depSet in depSetList:
if depSet in self.cache:
ret[depSet] = self.cache[depSet]
else:
missing.append(depSet)
newresults = self.db.getTrovesWithProvides(missing)
ret.update(newresults)
self.cache.update(newresults)
return ret
class _filterSpec(policy.Policy):
"""
Pure virtual base class from which C{ComponentSpec} and C{PackageSpec}
are derived.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = False
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def __init__(self, *args, **keywords):
self.extraFilters = []
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
"""
Call derived classes (C{ComponentSpec} or C{PackageSpec}) as::
ThisClass('<name>', 'filterexp1', 'filterexp2')
where C{filterexp} is either a regular expression or a
tuple of C{(regexp[, setmodes[, unsetmodes]])}
"""
if args:
theName = args[0]
for filterexp in args[1:]:
self.extraFilters.append((theName, filterexp))
policy.Policy.updateArgs(self, **keywords)
class _addInfo(policy.Policy):
"""
Pure virtual class for policies that add information such as tags,
requirements, and provision, to files.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = False
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
)
keywords = {
'included': {},
'excluded': {}
}
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def updateArgs(self, *args, **keywords):
"""
Call as::
C{I{ClassName}(I{info}, I{filterexp})}
or::
C{I{ClassName}(I{info}, exceptions=I{filterexp})}
where C{I{filterexp}} is either a regular expression or a
tuple of C{(regexp[, setmodes[, unsetmodes]])}
"""
if args:
args = list(args)
info = args.pop(0)
if args:
if not self.included:
self.included = {}
if info not in self.included:
self.included[info] = []
self.included[info].extend(args)
elif 'exceptions' in keywords:
# not the usual exception handling, this is an exception
if not self.excluded:
self.excluded = {}
if info not in self.excluded:
self.excluded[info] = []
self.excluded[info].append(keywords.pop('exceptions'))
else:
raise TypeError, 'no paths provided'
policy.Policy.updateArgs(self, **keywords)
def doProcess(self, recipe):
# for filters
self.rootdir = self.rootdir % recipe.macros
# instantiate filters
d = {}
for info in self.included:
newinfo = info % recipe.macros
l = []
for item in self.included[info]:
l.append(filter.Filter(item, recipe.macros))
d[newinfo] = l
self.included = d
d = {}
for info in self.excluded:
newinfo = info % recipe.macros
l = []
for item in self.excluded[info]:
l.append(filter.Filter(item, recipe.macros))
d[newinfo] = l
self.excluded = d
policy.Policy.doProcess(self, recipe)
def doFile(self, path):
fullpath = self.recipe.macros.destdir+path
if not util.isregular(fullpath) and not os.path.islink(fullpath):
return
self.runInfo(path)
def runInfo(self, path):
'pure virtual'
pass
class Config(policy.Policy):
"""
NAME
====
B{C{r.Config()}} - Mark files as configuration files
SYNOPSIS
========
C{r.Config([I{filterexp}] || [I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.Config} policy marks all files below C{%(sysconfdir)s}
(that is, C{/etc}) and C{%(taghandlerdir)s} (that is,
C{/usr/libexec/conary/tags/}), and any other files explicitly
mentioned, as configuration files.
- To mark files as exceptions, use
C{r.Config(exceptions='I{filterexp}')}.
- To mark explicit inclusions as configuration files, use:
C{r.Config('I{filterexp}')}
A file marked as a Config file cannot also be marked as a
Transient file or an InitialContents file. Conary enforces this
requirement.
EXAMPLES
========
C{r.Config(exceptions='%(sysconfdir)s/X11/xkb/xkbcomp')}
The file C{/etc/X11/xkb/xkbcomp} is marked as an exception, since it is
not actually a configuration file even though it is within the C{/etc}
(C{%(sysconfdir)s}) directory hierarchy and would be marked as a
configuration file by default.
C{r.Config('%(mmdir)s/Mailman/mm_cfg.py')}
Marks the file C{%(mmdir)s/Mailman/mm_cfg.py} as a configuration file;
it would not be automatically marked as a configuration file otherwise.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = True
requires = (
# for :config component, ComponentSpec must run after Config
# Otherwise, this policy would follow PackageSpec and just set isConfig
# on each config file
('ComponentSpec', policy.REQUIRED_SUBSEQUENT),
)
invariantinclusions = [ '%(sysconfdir)s/', '%(taghandlerdir)s/']
invariantexceptions = [ '%(userinfodir)s/', '%(groupinfodir)s' ]
def doFile(self, filename):
m = self.recipe.magic[filename]
if m and m.name == "ELF":
# an ELF file cannot be a config file, some programs put
# ELF files under /etc (X, for example), and tag handlers
# can be ELF or shell scripts; we just want tag handlers
# to be config files if they are shell scripts.
# Just in case it was not intentional, warn...
if self.macros.sysconfdir in filename:
self.info('ELF file %s found in config directory', filename)
return
fullpath = self.macros.destdir + filename
if os.path.isfile(fullpath) and util.isregular(fullpath):
if self._fileIsBinary(filename, fullpath):
self.error("binary file '%s' is marked as config" % \
filename)
self._markConfig(filename, fullpath)
def _fileIsBinary(self, path, fn, maxsize=None, decodeFailIsError=True):
limit = os.stat(fn)[stat.ST_SIZE]
if maxsize is not None and limit > maxsize:
self.warn('%s: file size %d longer than max %d',
path, limit, maxsize)
return True
# we'll consider file to be binary file if we don't find any
# good reason to mark it as text, or if we find a good reason
# to mark it as binary
foundFF = False
foundNL = False
f = open(fn, 'r')
try:
while f.tell() < limit:
buf = f.read(65536)
if chr(0) in buf:
self.warn('%s: file contains NULL byte', path)
return True
if '\xff\xff' in buf:
self.warn('%s: file contains 0xFFFF sequence', path)
return True
if '\xff' in buf:
foundFF = True
if '\n' in buf:
foundNL = True
finally:
f.close()
if foundFF and not foundNL:
self.error('%s: found 0xFF without newline', path)
utf8 = codecs.open(fn, 'r', 'utf-8')
win1252 = codecs.open(fn, 'r', 'windows-1252')
try:
try:
while utf8.tell() < limit:
utf8.read(65536)
except UnicodeDecodeError, e:
# Still want to print a warning if it is not unicode;
# Note that Code Page 1252 is considered a legacy
# encoding on Windows
self.warn('%s: %s', path, str(e))
try:
while win1252.tell() < limit:
win1252.read(65536)
except UnicodeDecodeError, e:
self.warn('%s: %s', path, str(e))
return decodeFailIsError
finally:
utf8.close()
win1252.close()
return False
def _addTrailingNewline(self, filename, fullpath):
# FIXME: This exists only for stability; there is no longer
# any need to add trailing newlines to config files. This
# also violates the rule that no files are modified after
# destdir modification has been completed.
self.warn("adding trailing newline to config file '%s'" % \
filename)
mode = os.lstat(fullpath)[stat.ST_MODE]
oldmode = None
if mode & 0600 != 0600:
# need to be able to read and write the file to fix it
oldmode = mode
os.chmod(fullpath, mode|0600)
f = open(fullpath, 'a')
f.seek(0, 2)
f.write('\n')
f.close()
if oldmode is not None:
os.chmod(fullpath, oldmode)
def _markConfig(self, filename, fullpath):
self.info(filename)
f = file(fullpath)
f.seek(0, 2)
if f.tell():
# file has contents
f.seek(-1, 2)
lastchar = f.read(1)
f.close()
if lastchar != '\n':
self._addTrailingNewline(filename, fullpath)
f.close()
self.recipe.ComponentSpec(_config=filename)
class ComponentSpec(_filterSpec):
"""
NAME
====
B{C{r.ComponentSpec()}} - Determines which component each file is in
SYNOPSIS
========
C{r.ComponentSpec([I{componentname}, I{filterexp}] || [I{packagename}:I{componentname}, I{filterexp}])}
DESCRIPTION
===========
The C{r.ComponentSpec} policy includes the filter expressions that specify
the default assignment of files to components. The expressions are
considered in the order in which they are evaluated in the recipe, and the
first match wins. After all the recipe-provided expressions are
evaluated, the default expressions are evaluated. If no expression
matches, then the file is assigned to the C{catchall} component.
Note that in the C{I{packagename}:I{componentname}} form, the C{:}
must be literal, it cannot be part of a macro.
KEYWORDS
========
B{catchall} : Specify the component name which gets all otherwise
unassigned files. Default: C{runtime}
EXAMPLES
========
C{r.ComponentSpec('manual', '%(contentdir)s/manual/')}
Uses C{r.ComponentSpec} to specify that all files below the
C{%(contentdir)s/manual/} directory are part of the C{:manual} component.
C{r.ComponentSpec('foo:bar', '%(sharedir)s/foo/')}
Uses C{r.ComponentSpec} to specify that all files below the
C{%(sharedir)s/foo/} directory are part of the C{:bar} component
of the C{foo} package, avoiding the need to invoke both the
C{ComponentSpec} and C{PackageSpec} policies.
C{r.ComponentSpec(catchall='data')}
Uses C{r.ComponentSpec} to specify that all files not otherwise specified
go into the C{:data} component instead of the default {:runtime}
component.
"""
requires = (
('Config', policy.REQUIRED_PRIOR),
('PackageSpec', policy.REQUIRED_SUBSEQUENT),
)
keywords = { 'catchall': 'runtime' }
def __init__(self, *args, **keywords):
"""
@keyword catchall: The component name which gets all otherwise
unassigned files. Default: C{runtime}
"""
_filterSpec.__init__(self, *args, **keywords)
self.configFilters = []
self.derivedFilters = []
def updateArgs(self, *args, **keywords):
if '_config' in keywords:
configPath=keywords.pop('_config')
self.recipe.PackageSpec(_config=configPath)
if args:
name = args[0]
if ':' in name:
package, name = name.split(':')
args = list(itertools.chain([name], args[1:]))
if package:
# we've got a package as well as a component, pass it on
pkgargs = list(itertools.chain((package,), args[1:]))
self.recipe.PackageSpec(*pkgargs)
_filterSpec.updateArgs(self, *args, **keywords)
def doProcess(self, recipe):
compFilters = []
self.macros = recipe.macros
self.rootdir = self.rootdir % recipe.macros
self.loadFilterDirs()
# The extras need to come before base in order to override decisions
# in the base subfilters; invariants come first for those very few
# specs that absolutely should not be overridden in recipes.
for filteritem in itertools.chain(self.invariantFilters,
self.extraFilters,
self.derivedFilters,
self.configFilters,
self.baseFilters):
if not isinstance(filteritem, (filter.Filter, filter.PathSet)):
name = filteritem[0] % self.macros
assert(name != 'source')
args, kwargs = self.filterExpArgs(filteritem[1:], name=name)
filteritem = filter.Filter(*args, **kwargs)
compFilters.append(filteritem)
# by default, everything that hasn't matched a filter pattern yet
# goes in the catchall component ('runtime' by default)
compFilters.append(filter.Filter('.*', self.macros, name=self.catchall))
# pass these down to PackageSpec for building the package
recipe.PackageSpec(compFilters=compFilters)
def loadFilterDirs(self):
invariantFilterMap = {}
baseFilterMap = {}
self.invariantFilters = []
self.baseFilters = []
# Load all component python files
for componentDir in self.recipe.cfg.componentDirs:
for filterType, map in (('invariant', invariantFilterMap),
('base', baseFilterMap)):
oneDir = os.sep.join((componentDir, filterType))
if not os.path.isdir(oneDir):
continue
for filename in os.listdir(oneDir):
fullpath = os.sep.join((oneDir, filename))
if (not filename.endswith('.py') or
not util.isregular(fullpath)):
continue
self.loadFilter(filterType, map, filename, fullpath)
# populate the lists with dependency-sorted information
for filterType, map, filterList in (
('invariant', invariantFilterMap, self.invariantFilters),
('base', baseFilterMap, self.baseFilters)):
dg = graph.DirectedGraph()
for filterName in map.keys():
dg.addNode(filterName)
filter, follows, precedes = map[filterName]
def warnMissing(missing):
self.error('%s depends on missing %s', filterName, missing)
for prior in follows:
if not prior in map:
warnMissing(prior)
dg.addEdge(prior, filterName)
for subsequent in precedes:
if not subsequent in map:
warnMissing(subsequent)
dg.addEdge(filterName, subsequent)
# test for dependency loops
depLoops = [x for x in dg.getStronglyConnectedComponents()
if len(x) > 1]
if depLoops:
self.error('dependency loop(s) in component filters: %s',
' '.join(sorted(':'.join(x)
for x in sorted(list(depLoops)))))
return
# Create a stably-sorted list of config filters where
# the filter is not empty. (An empty filter with both
# follows and precedes specified can be used to induce
# ordering between otherwise unrelated components.)
#for name in dg.getTotalOrdering(nodeSort=lambda a, b: cmp(a,b)):
for name in dg.getTotalOrdering():
filters = map[name][0]
if not filters:
continue
componentName = filters[0]
for filterExp in filters[1]:
filterList.append((componentName, filterExp))
def loadFilter(self, filterType, map, filename, fullpath):
# do not load shared libraries
desc = [x for x in imp.get_suffixes() if x[0] == '.py'][0]
f = file(fullpath)
modname = filename[:-3]
m = imp.load_module(modname, f, fullpath, desc)
f.close()
if not 'filters' in m.__dict__:
self.warn('%s missing "filters"; not a valid component'
' specification file', fullpath)
return
filters = m.__dict__['filters']
if filters and len(filters) > 1 and type(filters[1]) not in (list,
tuple):
self.error('invalid expression in %s: filters specification'
" must be ('name', ('expression', ...))", fullpath)
follows = ()
if 'follows' in m.__dict__:
follows = m.__dict__['follows']
precedes = ()
if 'precedes' in m.__dict__:
precedes = m.__dict__['precedes']
map[modname] = (filters, follows, precedes)
class PackageSpec(_filterSpec):
"""
NAME
====
B{C{r.PackageSpec()}} - Determines which package each file is in
SYNOPSIS
========
C{r.PackageSpec(I{packagename}, I{filterexp})}
DESCRIPTION
===========
The C{r.PackageSpec()} policy determines which package each file
is in. (Use C{r.ComponentSpec()} to specify the component without
specifying the package, or to specify C{I{package}:I{component}}
in one invocation.)
EXAMPLES
========
C{r.PackageSpec('openssh-server', '%(sysconfdir)s/pam.d/sshd')}
Specifies that the file C{%(sysconfdir)s/pam.d/sshd} is in the package
C{openssh-server} rather than the default (which in this case would have
been C{openssh} because this example was provided by C{openssh.recipe}).
"""
requires = (
('ComponentSpec', policy.REQUIRED_PRIOR),
)
keywords = { 'compFilters': None }
def __init__(self, *args, **keywords):
"""
@keyword compFilters: reserved for C{ComponentSpec} to pass information
needed by C{PackageSpec}.
"""
_filterSpec.__init__(self, *args, **keywords)
self.configFiles = []
self.derivedFilters = []
def updateArgs(self, *args, **keywords):
if '_config' in keywords:
self.configFiles.append(keywords.pop('_config'))
# keep a list of packages filtered for in PackageSpec in the recipe
if args:
newTrove = args[0] % self.recipe.macros
self.recipe.packages[newTrove] = True
_filterSpec.updateArgs(self, *args, **keywords)
def preProcess(self):
self.pkgFilters = []
recipe = self.recipe
self.destdir = recipe.macros.destdir
if self.exceptions:
self.warn('PackageSpec does not honor exceptions')
self.exceptions = None
if self.inclusions:
# would have an effect only with exceptions listed, so no warning...
self.inclusions = None
# userinfo and groupinfo are invariant filters, so they must come first
for infoType in ('user', 'group'):
infoDir = '%%(%sinfodir)s' % infoType % self.macros
realDir = util.joinPaths(self.destdir, infoDir)
if not os.path.isdir(realDir):
continue
for infoPkgName in os.listdir(realDir):
pkgPath = util.joinPaths(infoDir, infoPkgName)
self.pkgFilters.append( \
filter.Filter(pkgPath, self.macros,
name = 'info-%s' % infoPkgName))
# extras need to come before derived so that derived packages
# can change the package to which a file is assigned
for filteritem in itertools.chain(self.extraFilters,
self.derivedFilters):
if not isinstance(filteritem, (filter.Filter, filter.PathSet)):
name = filteritem[0] % self.macros
if not trove.troveNameIsValid(name):
self.error('%s is not a valid package name', name)
args, kwargs = self.filterExpArgs(filteritem[1:], name=name)
self.pkgFilters.append(filter.Filter(*args, **kwargs))
else:
self.pkgFilters.append(filteritem)
# by default, everything that hasn't matched a pattern in the
# main package filter goes in the package named recipe.name
self.pkgFilters.append(filter.Filter('.*', self.macros, name=recipe.name))
# OK, all the filters exist, build an autopackage object that
# knows about them
recipe.autopkg = buildpackage.AutoBuildPackage(
self.pkgFilters, self.compFilters, recipe)
self.autopkg = recipe.autopkg
def do(self):
# Walk capsule contents ignored by doFile
for filePath, _, componentName in self.recipe._iterCapsulePaths():
realPath = self.destdir + filePath
if util.exists(realPath):
# Files that do not exist on the filesystem (devices)
# are handled separately
self.autopkg.addFile(filePath, realPath, componentName)
# Walk normal files
_filterSpec.do(self)
def doFile(self, path):
# all policy classes after this require that the initial tree is built
if not self.recipe._getCapsulePathsForFile(path):
realPath = self.destdir + path
self.autopkg.addFile(path, realPath)
def postProcess(self):
# flag all config files
for confname in self.configFiles:
self.recipe.autopkg.pathMap[confname].flags.isConfig(True)
class InitialContents(policy.Policy):
"""
NAME
====
B{C{r.InitialContents()}} - Mark only explicit inclusions as initial
contents files
SYNOPSIS
========
C{InitialContents([I{filterexp}])}
DESCRIPTION
===========
By default, C{r.InitialContents()} does not apply to any files.
It is used to specify all files that Conary needs to mark as
providing only initial contents. When Conary installs or
updates one of these files, it will never replace existing
contents; it uses the provided contents only if the file does
not yet exist at the time Conary is creating it.
A file marked as an InitialContents file cannot also be marked
as a Transient file or a Config file. Conary enforces this
requirement.
EXAMPLES
========
C{r.InitialContents('%(sysconfdir)s/conary/.*gpg')}
The files C{%(sysconfdir)s/conary/.*gpg} are being marked as initial
contents files. Conary will use those contents when creating the files
the first time, but will never overwrite existing contents in those files.
"""
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('Config', policy.REQUIRED_PRIOR),
)
bucket = policy.PACKAGE_CREATION
processUnmodified = True
invariantexceptions = [ '%(userinfodir)s/', '%(groupinfodir)s' ]
invariantinclusions = ['%(localstatedir)s/run/',
'%(localstatedir)s/log/',
'%(cachedir)s/']
def postInit(self, *args, **kwargs):
self.recipe.Config(exceptions = self.invariantinclusions,
allowUnusedFilters = True)
def updateArgs(self, *args, **keywords):
policy.Policy.updateArgs(self, *args, **keywords)
self.recipe.Config(exceptions=args, allowUnusedFilters = True)
def doFile(self, filename):
fullpath = self.macros.destdir + filename
recipe = self.recipe
if os.path.isfile(fullpath) and util.isregular(fullpath):
self.info(filename)
f = recipe.autopkg.pathMap[filename]
f.flags.isInitialContents(True)
if f.flags.isConfig():
self.error(
'%s is marked as both a configuration file and'
' an initial contents file', filename)
class Transient(policy.Policy):
"""
NAME
====
B{C{r.Transient()}} - Mark files that have transient contents
SYNOPSIS
========
C{r.Transient([I{filterexp}])}
DESCRIPTION
===========
The C{r.Transient()} policy marks files as containing transient
contents. It automatically marks the two most common uses of transient
contents: python and emacs byte-compiled files
(C{.pyc}, C{.pyo}, and C{.elc} files).
Files containing transient contents are almost the opposite of
configuration files: their contents should be overwritten by
the new contents without question at update time, even if the
contents in the filesystem have changed. (Conary raises an
error if file contents have changed in the filesystem for normal
files.)
A file marked as a Transient file cannot also be marked as an
InitialContents file or a Config file. Conary enforces this
requirement.
EXAMPLES
========
C{r.Transient('%(libdir)s/firefox/extensions/')}
Marks all the files in the directory C{%(libdir)s/firefox/extensions/} as
having transient contents.
"""
bucket = policy.PACKAGE_CREATION
filetree = policy.PACKAGE
processUnmodified = True
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('Config', policy.REQUIRED_PRIOR),
('InitialContents', policy.REQUIRED_PRIOR),
)
invariantinclusions = [
r'..*\.py(c|o)$',
r'..*\.elc$',
r'%(userinfodir)s/',
r'%(groupinfodir)s'
]
def doFile(self, filename):
fullpath = self.macros.destdir + filename
if os.path.isfile(fullpath) and util.isregular(fullpath):
recipe = self.recipe
f = recipe.autopkg.pathMap[filename]
f.flags.isTransient(True)
if f.flags.isConfig() or f.flags.isInitialContents():
self.error(
'%s is marked as both a transient file and'
' a configuration or initial contents file', filename)
class TagDescription(policy.Policy):
"""
NAME
====
B{C{r.TagDescription()}} - Marks tag description files
SYNOPSIS
========
C{r.TagDescription([I{filterexp}])}
DESCRIPTION
===========
The C{r.TagDescription} class marks tag description files as
such so that conary handles them correctly. Every file in
C{%(tagdescriptiondir)s/} is marked as a tag description file by default.
No file outside of C{%(tagdescriptiondir)s/} will be considered by this
policy.
EXAMPLES
========
This policy is not called explicitly.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = False
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
)
invariantsubtrees = [ '%(tagdescriptiondir)s/' ]
def doFile(self, path):
if self.recipe._getCapsulePathsForFile(path):
return
fullpath = self.macros.destdir + path
if os.path.isfile(fullpath) and util.isregular(fullpath):
self.info('conary tag file: %s', path)
self.recipe.autopkg.pathMap[path].tags.set("tagdescription")
class TagHandler(policy.Policy):
"""
NAME
====
B{C{r.TagHandler()}} - Mark tag handler files
SYNOPSIS
========
C{r.TagHandler([I{filterexp}])}
DESCRIPTION
===========
All files in C{%(taghandlerdir)s/} are marked as a tag
handler files.
EXAMPLES
========
This policy is not called explicitly.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = False
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
)
invariantsubtrees = [ '%(taghandlerdir)s/' ]
def doFile(self, path):
if self.recipe._getCapsulePathsForFile(path):
return
fullpath = self.macros.destdir + path
if os.path.isfile(fullpath) and util.isregular(fullpath):
self.info('conary tag handler: %s', path)
self.recipe.autopkg.pathMap[path].tags.set("taghandler")
class TagSpec(_addInfo):
"""
NAME
====
B{C{r.TagSpec()}} - Apply tags defined by tag descriptions
SYNOPSIS
========
C{r.TagSpec([I{tagname}, I{filterexp}] || [I{tagname}, I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.TagSpec()} policy automatically applies tags defined by tag
descriptions in both the current system and C{%(destdir)s} to all
files in C{%(destdir)s}.
To apply tags manually (removing a dependency on the tag description
file existing when the packages is cooked), use the syntax:
C{r.TagSpec(I{tagname}, I{filterexp})}.
To set an exception to this policy, use:
C{r.TagSpec(I{tagname}, I{exceptions=filterexp})}.
EXAMPLES
========
C{r.TagSpec('initscript', '%(initdir)s/')}
Applies the C{initscript} tag to all files in the directory
C{%(initdir)s/}.
"""
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
)
def doProcess(self, recipe):
self.tagList = []
self.buildReqsComputedForTags = set()
self.suggestBuildRequires = set()
# read the system and %(destdir)s tag databases
for directory in (recipe.macros.destdir+'/etc/conary/tags/',
'/etc/conary/tags/'):
if os.path.isdir(directory):
for filename in os.listdir(directory):
path = util.joinPaths(directory, filename)
self.tagList.append(tags.TagFile(path, recipe.macros, True))
self.fullReqs = self.recipe._getTransitiveBuildRequiresNames()
_addInfo.doProcess(self, recipe)
def markTag(self, name, tag, path, tagFile=None):
# commonly, a tagdescription will nominate a file to be
# tagged, but it will also be set explicitly in the recipe,
# and therefore markTag will be called twice.
if (len(tag.split()) > 1 or
not tag.replace('-', '').replace('_', '').isalnum()):
# handlers for multiple tags require strict tag names:
# no whitespace, only alphanumeric plus - and _ characters
self.error('illegal tag name %s for file %s' %(tag, path))
return
tags = self.recipe.autopkg.pathMap[path].tags
if tag not in tags:
self.info('%s: %s', name, path)
tags.set(tag)
if tagFile and tag not in self.buildReqsComputedForTags:
self.buildReqsComputedForTags.add(tag)
db = self._getDb()
for trove in db.iterTrovesByPath(tagFile.tagFile):
troveName = trove.getName()
if troveName not in self.fullReqs:
# XXX should be error, change after bootstrap
self.warn("%s assigned by %s to file %s, so add '%s'"
' to buildRequires or call r.TagSpec()'
%(tag, tagFile.tagFile, path, troveName))
self.suggestBuildRequires.add(troveName)
def runInfo(self, path):
if self.recipe._getCapsulePathsForFile(path):
# capsules do not participate in the tag protocol
return
excludedTags = {}
for tag in self.included:
for filt in self.included[tag]:
if filt.match(path):
isExcluded = False
if tag in self.excluded:
for filt in self.excluded[tag]:
if filt.match(path):
s = excludedTags.setdefault(tag, set())
s.add(path)
isExcluded = True
break
if not isExcluded:
self.markTag(tag, tag, path)
for tag in self.tagList:
if tag.match(path):
if tag.name:
name = tag.name
else:
name = tag.tag
isExcluded = False
if tag.tag in self.excluded:
for filt in self.excluded[tag.tag]:
# exception handling is per-tag, so handled specially
if filt.match(path):
s = excludedTags.setdefault(name, set())
s.add(path)
isExcluded = True
break
if not isExcluded:
self.markTag(name, tag.tag, path, tag)
if excludedTags:
for tag in excludedTags:
self.info('ignoring tag match for %s: %s',
tag, ', '.join(sorted(excludedTags[tag])))
def postProcess(self):
if self.suggestBuildRequires:
self.info('possibly add to buildRequires: %s',
str(sorted(list(self.suggestBuildRequires))))
self.recipe.reportMissingBuildRequires(self.suggestBuildRequires)
class Properties(policy.Policy):
"""
NAME
====
B{C{r.Properties()}} - Read property definition files
SYNOPSIS
========
C{r.Properties(I{exceptions=filterexp} || [I{contents=xml},
I{package=pkg:component}] ||
[I{/path/to/file}, I{filterexp}], I{contents=ipropcontents})}
DESCRIPTION
===========
The C{r.Properties()} policy automatically parses iconfig property
definition files, making the properties available for configuration
management with iconfig.
To add configuration properties manually, use the syntax:
C{r.Properties(I{contents=ipropcontents}, I{package=pkg:component}}
Where contents is the xml string that would normally be stored in the iprop
file and package is the component where to attach the config metadata.
(NOTE: This component must exist)
or
C{r.Properties([I{/path/to/file}, I{filterexp}], I{contents=ipropcontents})
Where contents is the xml string that would normally be stored in the iprop
file and the path or filterexp matches the files that represent the
conponent that the property should be attached to.
"""
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
bucket = policy.PACKAGE_CREATION
processUnmodified = True
_supports_file_properties = True
requires = (
# We need to know what component files have been assigned to
('PackageSpec', policy.REQUIRED_PRIOR),
)
def __init__(self, *args, **kwargs):
policy.Policy.__init__(self, *args, **kwargs)
self.ipropFilters = []
self.ipropPaths = [ r'%(prefix)s/lib/iconfig/properties/.*\.iprop' ]
self.contents = []
self.paths = []
self.fileFilters = []
self.propMap = {}
def updateArgs(self, *args, **kwargs):
if 'contents' in kwargs:
contents = kwargs.pop('contents')
pkg = kwargs.pop('package', None)
if pkg is None and args:
for arg in args:
self.paths.append((arg, contents))
else:
self.contents.append((pkg, contents))
policy.Policy.updateArgs(self, *args, **kwargs)
def doProcess(self, recipe):
for filterSpec, iprop in self.paths:
self.fileFilters.append((
filter.Filter(filterSpec, recipe.macros),
iprop,
))
for ipropPath in self.ipropPaths:
self.ipropFilters.append(
filter.Filter(ipropPath, recipe.macros))
policy.Policy.doProcess(self, recipe)
def _getComponent(self, path):
componentMap = self.recipe.autopkg.componentMap
if path not in componentMap:
return
main, comp = componentMap[path].getName().split(':')
return main, comp
def doFile(self, path):
if path not in self.recipe.autopkg.pathMap:
return
for fltr, iprop in self.fileFilters:
if fltr.match(path):
main, comp = self._getComponent(path)
self._parsePropertyData(iprop, main, comp)
# Make sure any remaining files are actually in the root.
fullpath = self.recipe.macros.destdir + path
if not os.path.isfile(fullpath) or not util.isregular(fullpath):
return
# Check to see if this is an iprop file locaiton that we know about.
for fltr in self.ipropFilters:
if fltr.match(path):
break
else:
return
main, comp = self._getComponent(path)
xml = open(fullpath).read()
self._parsePropertyData(xml, main, comp)
def postProcess(self):
for pkg, content in self.contents:
pkg = pkg % self.macros
pkgName, compName = pkg.split(':')
self._parsePropertyData(content, pkgName, compName)
def _parsePropertyData(self, xml, pkgName, compName):
pkgSet = self.propMap.setdefault(xml, set())
if (pkgName, compName) in pkgSet:
return
pkgSet.add((pkgName, compName))
self.recipe._addProperty(trove._PROPERTY_TYPE_SMARTFORM, pkgName,
compName, xml)
class MakeDevices(policy.Policy):
"""
NAME
====
B{C{r.MakeDevices()}} - Make device nodes
SYNOPSIS
========
C{MakeDevices([I{path},] [I{type},] [I{major},] [I{minor},] [I{owner},] [I{groups},] [I{mode}])}
DESCRIPTION
===========
The C{r.MakeDevices()} policy creates device nodes. Conary's
policy of non-root builds requires that these nodes exist only in the
package, and not in the filesystem, as only root may actually create
device nodes.
EXAMPLES
========
C{r.MakeDevices(I{'/dev/tty', 'c', 5, 0, 'root', 'root', mode=0666, package=':dev'})}
Creates the device node C{/dev/tty}, as type 'c' (character, as opposed to
type 'b', or block) with a major number of '5', minor number of '0',
owner, and group are both the root user, and permissions are 0666.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = True
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('Ownership', policy.REQUIRED_SUBSEQUENT),
)
def __init__(self, *args, **keywords):
self.devices = []
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
"""
MakeDevices(path, devtype, major, minor, owner, group, mode=0400)
"""
if args:
args = list(args)
l = len(args)
if not ((l > 5) and (l < 9)):
self.recipe.error('MakeDevices: incorrect arguments: %r %r'
%(args, keywords))
mode = keywords.pop('mode', None)
package = keywords.pop('package', None)
if l > 6 and mode is None:
mode = args[6]
if mode is None:
mode = 0400
if l > 7 and package is None:
package = args[7]
self.devices.append(
(args[0:6], {'perms': mode, 'package': package}))
policy.Policy.updateArgs(self, **keywords)
def do(self):
for device, kwargs in self.devices:
r = self.recipe
filename = device[0]
owner = device[4]
group = device[5]
r.Ownership(owner, group, filename)
device[0] = device[0] % r.macros
r.autopkg.addDevice(*device, **kwargs)
class setModes(policy.Policy):
"""
Do not call from recipes; this is used internally by C{r.SetModes},
C{r.ParseManifest}, and unpacking derived packages. This policy
modified modes relative to the mode on the file in the filesystem.
It adds setuid/setgid bits not otherwise set/honored on files on the
filesystem, and sets user r/w/x bits if they were altered for the
purposes of accessing the files during packaging. Otherwise,
it honors the bits found on the filesystem. It does not modify
bits in capsules.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = True
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('WarnWriteable', policy.REQUIRED_SUBSEQUENT),
('ExcludeDirectories', policy.CONDITIONAL_SUBSEQUENT),
)
def __init__(self, *args, **keywords):
self.sidbits = {}
self.userbits = {}
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
"""
setModes(path(s), [sidbits=int], [userbits=int])
"""
sidbits = keywords.pop('sidbits', None)
userbits = keywords.pop('userbits', None)
for path in args:
if sidbits is not None:
self.sidbits[path] = sidbits
if userbits is not None:
self.userbits[path] = userbits
self.recipe.WarnWriteable(
exceptions=re.escape(path).replace('%', '%%'),
allowUnusedFilters = True)
policy.Policy.updateArgs(self, **keywords)
def doFile(self, path):
# Don't set modes on capsule files
if self.recipe._getCapsulePathsForFile(path):
return
# Skip files that aren't part of the package
if path not in self.recipe.autopkg.pathMap:
return
newmode = oldmode = self.recipe.autopkg.pathMap[path].inode.perms()
if path in self.userbits:
newmode = (newmode & 077077) | self.userbits[path]
if path in self.sidbits and self.sidbits[path]:
newmode |= self.sidbits[path]
self.info('suid/sgid: %s mode 0%o', path, newmode & 07777)
if newmode != oldmode:
self.recipe.autopkg.pathMap[path].inode.perms.set(newmode)
class LinkType(policy.Policy):
"""
NAME
====
B{C{r.LinkType()}} - Ensures only regular, non-configuration files are hardlinked
SYNOPSIS
========
C{r.LinkType([I{filterexp}])}
DESCRIPTION
===========
The C{r.LinkType()} policy ensures that only regular, non-configuration
files are hardlinked.
EXAMPLES
========
This policy is not called explicitly.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = True
requires = (
('Config', policy.REQUIRED_PRIOR),
('PackageSpec', policy.REQUIRED_PRIOR),
)
def do(self):
for component in self.recipe.autopkg.getComponents():
for path in sorted(component.hardlinkMap.keys()):
if self.recipe.autopkg.pathMap[path].flags.isConfig():
self.error("Config file %s has illegal hard links", path)
for path in component.badhardlinks:
self.error("Special file %s has illegal hard links", path)
class LinkCount(policy.Policy):
"""
NAME
====
B{C{r.LinkCount()}} - Restricts hardlinks across directories.
SYNOPSIS
========
C{LinkCount([I{filterexp}] | [I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.LinkCount()} policy restricts hardlinks across directories.
It is generally an error to have hardlinks across directories, except when
the packager knows that there is no reasonable chance that they will be on
separate filesystems.
In cases where the packager is certain hardlinks will not cross
filesystems, a list of regular expressions specifying files
which are excepted from this rule may be passed to C{r.LinkCount}.
EXAMPLES
========
C{r.LinkCount(exceptions='/usr/share/zoneinfo/')}
Uses C{r.LinkCount} to except zoneinfo files, located in
C{/usr/share/zoneinfo/}, from the policy against cross-directory
hardlinks.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = False
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
)
def __init__(self, *args, **keywords):
policy.Policy.__init__(self, *args, **keywords)
self.excepts = set()
def updateArgs(self, *args, **keywords):
allowUnusedFilters = keywords.pop('allowUnusedFilters', False) or \
self.allowUnusedFilters
exceptions = keywords.pop('exceptions', None)
if exceptions:
if type(exceptions) is str:
self.excepts.add(exceptions)
if not allowUnusedFilters:
self.unusedFilters['exceptions'].add(exceptions)
elif type(exceptions) in (tuple, list):
self.excepts.update(exceptions)
if not allowUnusedFilters:
self.unusedFilters['exceptions'].update(exceptions)
# FIXME: we may want to have another keyword argument
# that passes information down to the buildpackage
# that causes link groups to be broken for some
# directories but not others. We need to research
# first whether this is useful; it may not be.
def do(self):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return
filters = [(x, filter.Filter(x, self.macros)) for x in self.excepts]
for component in self.recipe.autopkg.getComponents():
for inode in component.linkGroups:
# ensure all in same directory, except for directories
# matching regexps that have been passed in
allPaths = [x for x in component.linkGroups[inode]]
for path in allPaths[:]:
for regexp, f in filters:
if f.match(path):
self.unusedFilters['exceptions'].discard(regexp)
allPaths.remove(path)
dirSet = set(os.path.dirname(x) + '/' for x in allPaths)
if len(dirSet) > 1:
self.error('files %s are hard links across directories %s',
', '.join(sorted(component.linkGroups[inode])),
', '.join(sorted(list(dirSet))))
self.error('If these directories cannot reasonably be'
' on different filesystems, disable this'
' warning by calling'
" r.LinkCount(exceptions=('%s')) or"
" equivalent"
% "', '".join(sorted(list(dirSet))))
class ExcludeDirectories(policy.Policy):
"""
NAME
====
B{C{r.ExcludeDirectories()}} - Exclude directories from package
SYNOPSIS
========
C{r.ExcludeDirectories([I{filterexp}] | [I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.ExcludeDirectories} policy causes directories to be
excluded from the package by default. Use
C{r.ExcludeDirectories(exceptions=I{filterexp})} to set exceptions to this
policy, which will cause directories matching the regular expression
C{filterexp} to be included in the package. Remember that Conary
packages cannot share files, including directories, so only one
package installed on a system at any one time can own the same
directory.
There are only three reasons to explicitly package a directory: the
directory needs permissions other than 0755, it needs non-root owner
or group, or it must exist even if it is empty.
Therefore, it should generally not be necessary to invoke this policy
directly. If your directory requires permissions other than 0755, simply
use C{r.SetMode} to specify the permissions, and the directory will be
automatically included. Similarly, if you wish to include an empty
directory with owner or group information, call C{r.Ownership} on that
empty directory,
Because C{r.Ownership} can reasonably be called on an entire
subdirectory tree and indiscriminately applied to files and
directories alike, non-empty directories with owner or group
set will be excluded from packaging unless an exception is
explicitly provided.
If you call C{r.Ownership} with a filter that applies to an
empty directory, but you do not want to package that directory,
you will have to remove the directory with C{r.Remove}.
Packages do not need to explicitly include directories to ensure
existence of a target to place a file in. Conary will appropriately
create the directory, and delete it later if the directory becomes empty.
EXAMPLES
========
C{r.ExcludeDirectories(exceptions='/tftpboot')}
Sets the directory C{/tftboot} as an exception to the
C{r.ExcludeDirectories} policy, so that the C{/tftpboot}
directory will be included in the package.
"""
bucket = policy.PACKAGE_CREATION
processUnmodified = True
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('Ownership', policy.REQUIRED_PRIOR),
('MakeDevices', policy.CONDITIONAL_PRIOR),
)
invariantinclusions = [ ('.*', stat.S_IFDIR) ]
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def doFile(self, path):
# temporarily do nothing for capsules, we might do something later
if self.recipe._getCapsulePathsForFile(path):
return
fullpath = self.recipe.macros.destdir + os.sep + path
s = os.lstat(fullpath)
mode = s[stat.ST_MODE]
if mode & 0777 != 0755:
self.info('excluding directory %s with mode %o', path, mode&0777)
elif not os.listdir(fullpath):
d = self.recipe.autopkg.pathMap[path]
if d.inode.owner.freeze() != 'root':
self.info('not excluding empty directory %s'
' because of non-root owner', path)
return
elif d.inode.group.freeze() != 'root':
self.info('not excluding empty directory %s'
' because of non-root group', path)
return
self.info('excluding empty directory %s', path)
# if its empty and we're not packaging it, there's no need for it
# to continue to exist on the filesystem to potentially confuse
# other policy actions... see CNP-18
os.rmdir(fullpath)
self.recipe.autopkg.delFile(path)
class ByDefault(policy.Policy):
"""
NAME
====
B{C{r.ByDefault()}} - Determines components to be installed by default
SYNOPSIS
========
C{r.ByDefault([I{inclusions} || C{exceptions}=I{exceptions}])}
DESCRIPTION
===========
The C{r.ByDefault()} policy determines which components should
be installed by default at the time the package is installed on the
system. The default setting for the C{ByDefault} policy is that the
C{:debug}, and C{:test} packages are not installed with the package.
The inclusions and exceptions do B{not} specify filenames. They are
either C{I{package}:I{component}} or C{:I{component}}. Inclusions
are considered before exceptions, and inclusions and exceptions are
considered in the order provided in the recipe, and first match wins.
EXAMPLES
========
C{r.ByDefault(exceptions=[':manual'])}
Uses C{r.ByDefault} to ignore C{:manual} components when enforcing the
policy.
C{r.ByDefault(exceptions=[':manual'])}
C{r.ByDefault('foo:manual')}
If these lines are in the C{bar} package, and there is both a
C{foo:manual} and a C{bar:manual} component, then the C{foo:manual}
component will be installed by default when the C{foo} package is
installed, but the C{bar:manual} component will not be installed by
default when the C{bar} package is installed.
"""
bucket = policy.PACKAGE_CREATION
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
)
filetree = policy.NO_FILES
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
invariantexceptions = [':test', ':debuginfo']
allowUnusedFilters = True
def doProcess(self, recipe):
if not self.inclusions:
self.inclusions = []
if not self.exceptions:
self.exceptions = []
recipe.setByDefaultOn(frozenset(self.inclusions))
recipe.setByDefaultOff(frozenset(self.exceptions +
self.invariantexceptions))
class _UserGroup(policy.Policy):
"""
Abstract base class that implements marking owner/group dependencies.
"""
bucket = policy.PACKAGE_CREATION
# All classes that descend from _UserGroup must run before the
# Requires policy, as they implicitly depend on it to set the
# file requirements and union the requirements up to the package.
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('Requires', policy.REQUIRED_SUBSEQUENT),
)
filetree = policy.PACKAGE
processUnmodified = True
def setUserGroupDep(self, path, info, depClass):
componentMap = self.recipe.autopkg.componentMap
if path not in componentMap:
return
pkg = componentMap[path]
f = pkg.getFile(path)
if path not in pkg.requiresMap:
pkg.requiresMap[path] = deps.DependencySet()
pkg.requiresMap[path].addDep(depClass, deps.Dependency(info, []))
class Ownership(_UserGroup):
"""
NAME
====
B{C{r.Ownership()}} - Set file ownership
SYNOPSIS
========
C{r.Ownership([I{username},] [I{groupname},] [I{filterexp}])}
DESCRIPTION
===========
The C{r.Ownership()} policy sets user and group ownership of files when
the default of C{root:root} is not appropriate.
List the ownerships in order, most specific first, ending with least
specific. The filespecs will be matched in the order that you provide them.
KEYWORDS
========
None.
EXAMPLES
========
C{r.Ownership('apache', 'apache', '%(localstatedir)s/lib/php/session')}
Sets ownership of C{%(localstatedir)s/lib/php/session} to owner
C{apache}, and group C{apache}.
"""
def __init__(self, *args, **keywords):
self.filespecs = []
self.systemusers = ('root',)
self.systemgroups = ('root',)
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
if args:
for filespec in args[2:]:
self.filespecs.append((filespec, args[0], args[1]))
policy.Policy.updateArgs(self, **keywords)
def doProcess(self, recipe):
# we must NEVER take ownership from the filesystem
assert(not self.exceptions)
self.rootdir = self.rootdir % recipe.macros
self.fileFilters = []
for (filespec, user, group) in self.filespecs:
self.fileFilters.append(
(filter.Filter(filespec, recipe.macros),
user %recipe.macros,
group %recipe.macros))
del self.filespecs
policy.Policy.doProcess(self, recipe)
def doFile(self, path):
if self.recipe._getCapsulePathsForFile(path):
return
pkgfile = self.recipe.autopkg.pathMap[path]
pkgOwner = pkgfile.inode.owner()
pkgGroup = pkgfile.inode.group()
bestOwner = pkgOwner
bestGroup = pkgGroup
for (f, owner, group) in self.fileFilters:
if f.match(path):
bestOwner, bestGroup = owner, group
break
if bestOwner != pkgOwner:
pkgfile.inode.owner.set(bestOwner)
if bestGroup != pkgGroup:
pkgfile.inode.group.set(bestGroup)
if bestOwner and bestOwner not in self.systemusers:
self.setUserGroupDep(path, bestOwner, deps.UserInfoDependencies)
if bestGroup and bestGroup not in self.systemgroups:
self.setUserGroupDep(path, bestGroup, deps.GroupInfoDependencies)
class _Utilize(_UserGroup):
"""
Pure virtual base class for C{UtilizeUser} and C{UtilizeGroup}
"""
def __init__(self, *args, **keywords):
self.filespecs = []
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
"""
call as::
UtilizeFoo(item, filespec(s)...)
List them in order, most specific first, ending with most
general; the filespecs will be matched in the order that
you provide them.
"""
item = args[0] % self.recipe.macros
if args:
for filespec in args[1:]:
self.filespecs.append((filespec, item))
policy.Policy.updateArgs(self, **keywords)
def doProcess(self, recipe):
self.rootdir = self.rootdir % recipe.macros
self.fileFilters = []
for (filespec, item) in self.filespecs:
self.fileFilters.append(
(filter.Filter(filespec, recipe.macros), item))
del self.filespecs
policy.Policy.doProcess(self, recipe)
def doFile(self, path):
for (f, item) in self.fileFilters:
if f.match(path):
self._markItem(path, item)
return
def _markItem(self, path, item):
# pure virtual
assert(False)
class UtilizeUser(_Utilize):
"""
NAME
====
B{C{r.UtilizeUser()}} - Marks files as requiring a user definition to exist
SYNOPSIS
========
C{r.UtilizeUser([I{username}, I{filterexp}])}
DESCRIPTION
===========
The C{r.UtilizeUser} policy marks files as requiring a user definition
to exist even though the file is not owned by that user.
This is particularly useful for daemons that are setuid root
ant change their user id to a user id with no filesystem permissions
after they start.
EXAMPLES
========
C{r.UtilizeUser('sshd', '%(sbindir)s/sshd')}
Marks the file C{%(sbindir)s/sshd} as requiring the user definition
'sshd' although the file is not owned by the 'sshd' user.
"""
def _markItem(self, path, user):
if not self.recipe._getCapsulePathsForFile(path):
self.info('user %s: %s' % (user, path))
self.setUserGroupDep(path, user, deps.UserInfoDependencies)
class UtilizeGroup(_Utilize):
"""
NAME
====
B{C{r.UtilizeGroup()}} - Marks files as requiring a user definition to
exist
SYNOPSIS
========
C{r.UtilizeGroup([groupname, filterexp])}
DESCRIPTION
===========
The C{r.UtilizeGroup} policy marks files as requiring a group definition
to exist even though the file is not owned by that group.
This is particularly useful for daemons that are setuid root
ant change their user id to a group id with no filesystem permissions
after they start.
EXAMPLES
========
C{r.UtilizeGroup('users', '%(sysconfdir)s/default/useradd')}
Marks the file C{%(sysconfdir)s/default/useradd} as requiring the group
definition 'users' although the file is not owned by the 'users' group.
"""
def _markItem(self, path, group):
if not self.recipe._getCapsulePathsForFile(path):
self.info('group %s: %s' % (group, path))
self.setUserGroupDep(path, group, deps.GroupInfoDependencies)
class ComponentRequires(policy.Policy):
"""
NAME
====
B{C{r.ComponentRequires()}} - Create automatic intra-package,
inter-component dependencies
SYNOPSIS
========
C{r.ComponentRequires([{'I{componentname}': I{requiringComponentSet}}] |
[{'I{packagename}': {'I{componentname}': I{requiringComponentSet}}}])}
DESCRIPTION
===========
The C{r.ComponentRequires()} policy creates automatic,
intra-package, inter-component dependencies, such as a corresponding
dependency between C{:lib} and C{:data} components.
Changes are passed in using dictionaries, both for additions that
are specific to a specific package, and additions that apply
generally to all binary packages being cooked from one recipe.
For general changes that are not specific to a package, use this syntax:
C{r.ComponentRequires({'I{componentname}': I{requiringComponentSet}})}.
For package-specific changes, you need to specify packages as well
as components:
C{r.ComponentRequires({'I{packagename}': 'I{componentname}': I{requiringComponentSet}})}.
By default, both C{:lib} and C{:runtime} components (if they exist)
require the C{:data} component (if it exists). If you call
C{r.ComponentRequires({'data': set(('lib',))})}, you limit it
so that C{:runtime} components will not require C{:data} components
for this recipe.
In recipes that create more than one binary package, you may need
to limit your changes to a single binary package. To do so, use
the package-specific syntax. For example, to remove the C{:runtime}
requirement on C{:data} only for the C{foo} package, call:
C{r.ComponentRequires({'foo': 'data': set(('lib',))})}.
Note that C{r.ComponentRequires} cannot require capability flags; use
C{r.Requires} if you need to specify requirements, including capability
flags.
EXAMPLES
========
C{r.ComponentRequires({'openssl': {'config': set(('runtime', 'lib'))}})}
Uses C{r.ComponentRequires} to create dependencies in a top-level manner
for the C{:runtime} and C{:lib} component sets to require the
C{:config} component for the C{openssl} package.
"""
bucket = policy.PACKAGE_CREATION
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('ExcludeDirectories', policy.CONDITIONAL_PRIOR),
)
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def __init__(self, *args, **keywords):
self.depMap = {
# component: components that require it if they both exist
'data': frozenset(('lib', 'runtime', 'devellib', 'cil', 'java',
'perl', 'python', 'ruby')),
'devellib': frozenset(('devel',)),
'lib': frozenset(('devel', 'devellib', 'runtime')),
'config': frozenset(('runtime', 'lib', 'devellib', 'devel')),
}
self.overridesMap = {}
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
d = args[0]
if isinstance(d[d.keys()[0]], dict): # dict of dicts
for packageName in d:
if packageName not in self.overridesMap:
# start with defaults, then override them individually
o = {}
o.update(self.depMap)
self.overridesMap[packageName] = o
self.overridesMap[packageName].update(d[packageName])
else: # dict of sets
self.depMap.update(d)
def do(self):
flags = []
if self.recipe.isCrossCompileTool():
flags.append((_getTargetDepFlag(self.macros), deps.FLAG_SENSE_REQUIRED))
components = self.recipe.autopkg.components
for packageName in [x.name for x in self.recipe.autopkg.packageMap]:
if packageName in self.overridesMap:
d = self.overridesMap[packageName]
else:
d = self.depMap
for requiredComponent in d:
for requiringComponent in d[requiredComponent]:
reqName = ':'.join((packageName, requiredComponent))
wantName = ':'.join((packageName, requiringComponent))
if (reqName in components and wantName in components and
components[reqName] and components[wantName]):
if (d == self.depMap and
reqName in self.recipe._componentReqs and
wantName in self.recipe._componentReqs):
# this is an automatically generated dependency
# which was not in the parent of a derived
# pacakge. don't add it here either
continue
# Note: this does not add dependencies to files;
# these dependencies are insufficiently specific
# to attach to files.
ds = deps.DependencySet()
depClass = deps.TroveDependencies
ds.addDep(depClass, deps.Dependency(reqName, flags))
p = components[wantName]
p.requires.union(ds)
class ComponentProvides(policy.Policy):
"""
NAME
====
B{C{r.ComponentProvides()}} - Causes each trove to explicitly provide
itself.
SYNOPSIS
========
C{r.ComponentProvides(I{flags})}
DESCRIPTION
===========
The C{r.ComponentProvides()} policy causes each trove to explicitly
provide its name. Call it to provide optional capability flags
consisting of a single string, or a list, tuple, or set of strings,
It is impossible to provide a capability flag for one component but
not another within a single package.
EXAMPLES
========
C{r.ComponentProvides("addcolumn")}
Uses C{r.ComponentProvides} in the context of the sqlite recipe, and
causes sqlite to provide itself explicitly with the capability flag
C{addcolumn}.
"""
bucket = policy.PACKAGE_CREATION
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('ExcludeDirectories', policy.CONDITIONAL_PRIOR),
)
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def __init__(self, *args, **keywords):
self.flags = set()
self.excepts = set()
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
if 'exceptions' in keywords:
exceptions = keywords.pop('exceptions')
if type(exceptions) is str:
self.excepts.add(exceptions)
elif type(exceptions) in (tuple, list):
self.excepts.update(set(exceptions))
if not args:
return
if len(args) >= 2:
# update the documentation if we ever support the
# pkgname, flags calling convention
#pkgname = args[0]
flags = args[1]
else:
flags = args[0]
if not isinstance(flags, (list, tuple, set)):
flags=(flags,)
self.flags |= set(flags)
def do(self):
self.excepts = set(re.compile(x) for x in self.excepts)
self.flags = set(x for x in self.flags
if not [y.match(x) for y in self.excepts])
if self.flags:
flags = [ (x % self.macros, deps.FLAG_SENSE_REQUIRED)
for x in self.flags ]
else:
flags = []
if self.recipe.isCrossCompileTool():
flags.append(('target-%s' % self.macros.target,
deps.FLAG_SENSE_REQUIRED))
for component in self.recipe.autopkg.components.values():
component.provides.addDep(deps.TroveDependencies,
deps.Dependency(component.name, flags))
def _getTargetDepFlag(macros):
return 'target-%s' % macros.target
class _dependency(policy.Policy):
"""
Internal class for shared code between Provides and Requires
"""
def __init__(self, *args, **kwargs):
# bootstrap keeping only one copy of these around
self.bootstrapPythonFlags = None
self.bootstrapSysPath = []
self.bootstrapPerlIncPath = []
self.bootstrapRubyLibs = []
self.cachedProviders = {}
self.pythonFlagNamespace = None
self.removeFlagsByDependencyClass = None # pre-transform
self.removeFlagsByDependencyClassMap = {}
def updateArgs(self, *args, **keywords):
removeFlagsByDependencyClass = keywords.pop(
'removeFlagsByDependencyClass', None)
if removeFlagsByDependencyClass is not None:
clsName, ignoreFlags = removeFlagsByDependencyClass
cls = deps.dependencyClassesByName[clsName]
l = self.removeFlagsByDependencyClassMap.setdefault(cls, [])
if isinstance(ignoreFlags, (list, set, tuple)):
l.append(set(ignoreFlags))
else:
l.append(re.compile(ignoreFlags))
policy.Policy.updateArgs(self, **keywords)
def preProcess(self):
self.CILPolicyRE = re.compile(r'.*mono/.*/policy.*/policy.*\.config$')
self.legalCharsRE = re.compile('[.0-9A-Za-z_+-/]')
self.pythonInterpRE = re.compile(r'\.[a-z]+-\d\dm?')
# interpolate macros, using canonical path form with no trailing /
self.sonameSubtrees = set(os.path.normpath(x % self.macros)
for x in self.sonameSubtrees)
self.pythonFlagCache = {}
self.pythonTroveFlagCache = {}
self.pythonVersionCache = {}
def _hasContents(self, m, contents):
"""
Return False if contents is set and m does not have that contents
"""
if contents and (contents not in m.contents or not m.contents[contents]):
return False
return True
def _isELF(self, m, contents=None):
"Test whether is ELF file and optionally has certain contents"
# Note: for provides, check for 'abi' not 'provides' because we
# can provide the filename even if there is no provides list
# as long as a DT_NEEDED entry has been present to set the abi
return m and m.name == 'ELF' and self._hasContents(m, contents)
def _isPython(self, path):
return path.endswith('.py') or path.endswith('.pyc')
def _isPythonModuleCandidate(self, path):
return path.endswith('.so') or self._isPython(path)
def _runPythonScript(self, binPath, destdir, libdir, scriptLines):
script = '\n'.join(scriptLines)
environ = {}
if binPath.startswith(destdir):
environ['LD_LIBRARY_PATH'] = destdir + libdir
proc = subprocess.Popen([binPath, '-Ec', script],
executable=binPath,
stdout=subprocess.PIPE,
shell=False,
env=environ,
)
stdout, _ = proc.communicate()
if proc.returncode:
raise RuntimeError("Process exited with status %s" %
(proc.returncode,))
return stdout
def _getPythonVersion(self, pythonPath, destdir, libdir):
if pythonPath not in self.pythonVersionCache:
try:
stdout = self._runPythonScript(pythonPath, destdir, libdir,
["import sys", "print('%d.%d' % sys.version_info[:2])"])
self.pythonVersionCache[pythonPath] = stdout.strip()
except (OSError, RuntimeError):
self.warn("Unable to determine Python version directly; "
"guessing based on path.")
self.pythonVersionCache[pythonPath] = self._getPythonVersionFromPath(pythonPath, destdir)
return self.pythonVersionCache[pythonPath]
def _getPythonSysPath(self, pythonPath, destdir, libdir, useDestDir=False):
"""Return the system path for the python interpreter at C{pythonPath}
@param pythonPath: Path to the target python interpreter
@param destdir: Destination root, in case of a python bootstrap
@param libdir: Destination libdir, in case of a python bootstrap
@param useDestDir: If True, look in the destdir instead.
"""
script = ["import sys, site"]
if useDestDir:
# Repoint site.py at the destdir so it picks up .pth files there.
script.extend([
"sys.path = []",
"sys.prefix = %r + sys.prefix" % (destdir,),
"sys.exec_prefix = %r + sys.exec_prefix" % (destdir,),
"site.PREFIXES = [sys.prefix, sys.exec_prefix]",
"site.addsitepackages(None)",
])
script.append(r"print('\0'.join(sys.path))")
try:
stdout = self._runPythonScript(pythonPath, destdir, libdir, script)
except (OSError, RuntimeError):
# something went wrong, don't trust any output
self.info('Could not run system python "%s", guessing sys.path...',
pythonPath)
sysPath = []
else:
sysPath = [x.strip() for x in stdout.split('\0') if x.strip()]
if not sysPath and not useDestDir:
# probably a cross-build -- let's try a decent assumption
# for the syspath.
self.info("Failed to detect system python path, using fallback")
pyVer = self._getPythonVersionFromPath(pythonPath, destdir)
if not pyVer and self.bootstrapPythonFlags is not None:
pyVer = self._getPythonVersionFromFlags(
self.bootstrapPythonFlags)
if pyVer and self.bootstrapSysPath is not None:
lib = self.recipe.macros.lib
# this list needs to include all sys.path elements that
# might be needed for python per se -- note that
# bootstrapPythonFlags and bootstrapSysPath go
# together
sysPath = self.bootstrapSysPath + [
'/usr/%s/%s' %(lib, pyVer),
'/usr/%s/%s/plat-linux2' %(lib, pyVer),
'/usr/%s/%s/lib-tk' %(lib, pyVer),
'/usr/%s/%s/lib-dynload' %(lib, pyVer),
'/usr/%s/%s/site-packages' %(lib, pyVer),
# for purelib python on x86_64
'/usr/lib/%s/site-packages' %pyVer,
]
return sysPath
def _warnPythonPathNotInDB(self, pathName):
self.warn('%s found on system but not provided by'
' system database; python requirements'
' may be generated incorrectly as a result', pathName)
return set([])
def _getPythonTroveFlags(self, pathName):
if pathName in self.pythonTroveFlagCache:
return self.pythonTroveFlagCache[pathName]
db = self._getDb()
foundPath = False
pythonFlags = set()
pythonTroveList = db.iterTrovesByPath(pathName)
if pythonTroveList:
depContainer = pythonTroveList[0]
assert(depContainer.getName())
foundPath = True
for dep in depContainer.getRequires().iterDepsByClass(
deps.PythonDependencies):
flagNames = [x[0] for x in dep.getFlags()[0]]
pythonFlags.update(flagNames)
self.pythonTroveFlagCache[pathName] = pythonFlags
if not foundPath:
self.pythonTroveFlagCache[pathName] = self._warnPythonPathNotInDB(
pathName)
return self.pythonTroveFlagCache[pathName]
def _getPythonFlags(self, pathName, bootstrapPythonFlags=None):
if pathName in self.pythonFlagCache:
return self.pythonFlagCache[pathName]
if bootstrapPythonFlags:
self.pythonFlagCache[pathName] = bootstrapPythonFlags
return self.pythonFlagCache[pathName]
db = self._getDb()
foundPath = False
# FIXME: This should be iterFilesByPath when implemented (CNY-1833)
# For now, cache all the python deps in all the files in the
# trove(s) so that we iterate over each trove only once
containingTroveList = db.iterTrovesByPath(pathName)
for containerTrove in containingTroveList:
for pathid, p, fileid, v in containerTrove.iterFileList():
if pathName == p:
foundPath = True
pythonFlags = set()
f = files.ThawFile(db.getFileStream(fileid), pathid)
for dep in f.provides().iterDepsByClass(
deps.PythonDependencies):
flagNames = [x[0] for x in dep.getFlags()[0]]
pythonFlags.update(flagNames)
self.pythonFlagCache[p] = pythonFlags
if not foundPath:
self.pythonFlagCache[pathName] = self._warnPythonPathNotInDB(
pathName)
return self.pythonFlagCache[pathName]
def _getPythonFlagsFromPath(self, pathName):
pathList = pathName.split('/')
foundLib = False
foundVer = False
flags = set()
for dirName in pathList:
if not foundVer and not foundLib and dirName.startswith('lib'):
# lib will always come before ver
foundLib = True
flags.add(dirName)
elif not foundVer and dirName.startswith('python'):
foundVer = True
flags.add(dirName[6:])
if foundLib and foundVer:
break
if self.pythonFlagNamespace:
flags = set('%s:%s' %(self.pythonFlagNamespace, x) for x in flags)
return flags
def _stringIsPythonVersion(self, s):
return not set(s).difference(set('.0123456789'))
def _getPythonVersionFromFlags(self, flags):
nameSpace = self.pythonFlagNamespace
for flag in flags:
if nameSpace and flag.startswith(nameSpace):
flag = flag[len(nameSpace):]
if self._stringIsPythonVersion(flag):
return 'python'+flag
def _getPythonVersionFromPath(self, pathName, destdir):
if destdir and pathName.startswith(destdir):
pathName = pathName[len(destdir):]
pathList = pathName.split('/')
for dirName in pathList:
if dirName.startswith('python') and self._stringIsPythonVersion(
dirName[6:]):
# python2.4 or python2.5 or python3.9 but not python.so
return dirName
return ''
def _isCIL(self, m):
return m and m.name == 'CIL'
def _isJava(self, m, contents=None):
return m and isinstance(m, (magic.jar, magic.java)) and self._hasContents(m, contents)
def _isPerlModule(self, path):
return (path.endswith('.pm') or
path.endswith('.pl') or
path.endswith('.ph'))
def _isPerl(self, path, m, f):
return self._isPerlModule(path) or (
f.inode.perms() & 0111 and m and m.name == 'script'
and 'interpreter' in m.contents
and '/bin/perl' in m.contents['interpreter'])
def _createELFDepSet(self, m, elfinfo, recipe=None, basedir=None,
soname=None, soflags=None,
libPathMap={}, getRPATH=None, path=None,
isProvides=None):
"""
Add dependencies from ELF information.
@param m: magic.ELF object
@param elfinfo: requires or provides from magic.ELF.contents
@param recipe: recipe object for calling Requires if basedir is not None
@param basedir: directory to add into dependency
@param soname: alternative soname to use
@param libPathMap: mapping from base dependency name to new dependency name
@param isProvides: whether the dependency being created is a provides
"""
abi = m.contents['abi']
elfClass = abi[0]
nameMap = {}
usesLinuxAbi = False
depSet = deps.DependencySet()
for depClass, main, flags in elfinfo:
if soflags:
flags = itertools.chain(*(flags, soflags))
flags = [ (x, deps.FLAG_SENSE_REQUIRED) for x in flags ]
if depClass == 'soname':
if '/' in main:
main = os.path.basename(main)
if getRPATH:
rpath = getRPATH(main)
if rpath:
# change the name to follow the rpath
main = '/'.join((rpath, main))
elif soname:
main = soname
if basedir:
oldname = os.path.normpath('/'.join((elfClass, main)))
main = '/'.join((basedir, main))
main = os.path.normpath('/'.join((elfClass, main)))
if basedir:
nameMap[main] = oldname
if libPathMap and main in libPathMap:
# if we have a mapping to a provided library that would be
# satisfied, then we modify the requirement to match the
# provision
provided = libPathMap[main]
requiredSet = set(x[0] for x in flags)
providedSet = set(provided.flags.keys())
if requiredSet.issubset(providedSet):
main = provided.getName()[0]
else:
pathString = ''
if path:
pathString = 'for path %s' %path
self.warn('Not replacing %s with %s because of missing %s%s',
main, provided.getName()[0],
sorted(list(requiredSet-providedSet)),
pathString)
curClass = deps.SonameDependencies
for flag in abi[1]:
if flag == 'Linux':
usesLinuxAbi = True
flags.append(('SysV', deps.FLAG_SENSE_REQUIRED))
else:
flags.append((flag, deps.FLAG_SENSE_REQUIRED))
dep = deps.Dependency(main, flags)
elif depClass == 'abi':
curClass = deps.AbiDependency
dep = deps.Dependency(main, flags)
else:
assert(0)
depSet.addDep(curClass, dep)
# This loop has to happen late so that the soname
# flag merging from multiple flag instances has happened
if nameMap:
for soDep in depSet.iterDepsByClass(deps.SonameDependencies):
newName = soDep.getName()[0]
if newName in nameMap:
oldName = nameMap[newName]
recipe.Requires(_privateDepMap=(oldname, soDep))
if usesLinuxAbi and not isProvides:
isnset = m.contents.get('isnset', None)
if elfClass == 'ELF32' and isnset == 'x86':
main = 'ELF32/ld-linux.so.2'
elif elfClass == 'ELF64' and isnset == 'x86_64':
main = 'ELF64/ld-linux-x86-64.so.2'
else:
self.error('%s: unknown ELF class %s or instruction set %s',
path, elfClass, isnset)
return depSet
flags = [('Linux', deps.FLAG_SENSE_REQUIRED),
('SysV', deps.FLAG_SENSE_REQUIRED),
(isnset, deps.FLAG_SENSE_REQUIRED)]
dep = deps.Dependency(main, flags)
depSet.addDep(curClass, dep)
return depSet
def _addDepToMap(self, path, depMap, depType, dep):
"Add a single dependency to a map, regardless of whether path was listed before"
if path not in depMap:
depMap[path] = deps.DependencySet()
depMap[path].addDep(depType, dep)
def _addDepSetToMap(self, path, depMap, depSet):
"Add a dependency set to a map, regardless of whether path was listed before"
if path in depMap:
depMap[path].union(depSet)
else:
depMap[path] = depSet
@staticmethod
def _recurseSymlink(path, destdir, fullpath=None):
"""
Recurse through symlinks in destdir and get the final path and fullpath.
If initial fullpath (or destdir+path if fullpath not specified)
does not exist, return path.
"""
if fullpath is None:
fullpath = destdir + path
while os.path.islink(fullpath):
contents = os.readlink(fullpath)
if contents.startswith('/'):
fullpath = os.path.normpath(contents)
else:
fullpath = os.path.normpath(
os.path.dirname(fullpath)+'/'+contents)
return fullpath[len(destdir):], fullpath
def _symlinkMagic(self, path, fullpath, macros, m=None):
"Recurse through symlinks and get the final path and magic"
path, _ = self._recurseSymlink(path, macros.destdir, fullpath=fullpath)
m = self.recipe.magic[path]
return m, path
def _enforceProvidedPath(self, path, fileType='interpreter',
unmanagedError=False):
key = path, fileType
if key in self.cachedProviders:
return self.cachedProviders[key]
db = self._getDb()
troveNames = [ x.getName() for x in db.iterTrovesByPath(path) ]
if not troveNames:
talk = {True: self.error, False: self.warn}[bool(unmanagedError)]
talk('%s file %s not managed by conary' %(fileType, path))
return None
troveName = sorted(troveNames)[0]
# prefer corresponding :devel to :devellib if it exists
package, component = troveName.split(':', 1)
if component in ('devellib', 'lib'):
for preferredComponent in ('devel', 'devellib'):
troveSpec = (
':'.join((package, preferredComponent)),
None, None
)
results = db.findTroves(None, [troveSpec],
allowMissing = True)
if troveSpec in results:
troveName = results[troveSpec][0][0]
break
if troveName not in self.recipe._getTransitiveBuildRequiresNames():
self.recipe.reportMissingBuildRequires(troveName)
self.cachedProviders[key] = troveName
return troveName
def _getRuby(self, macros, path):
# For bootstrapping purposes, prefer the just-built version if
# it exists
# Returns tuple: (pathToRubyInterpreter, bootstrap)
ruby = '%(ruby)s' %macros
if os.access('%(destdir)s/%(ruby)s' %macros, os.X_OK):
return '%(destdir)s/%(ruby)s' %macros, True
elif os.access(ruby, os.X_OK):
# Enforce the build requirement, since it is not in the package
self._enforceProvidedPath(ruby)
return ruby, False
else:
self.warn('%s not available for Ruby dependency discovery'
' for path %s' %(ruby, path))
return False, None
def _getRubyLoadPath(self, macros, rubyInvocation, bootstrap):
# Returns tuple of (invocationString, loadPathList)
destdir = macros.destdir
if bootstrap:
rubyLibPath = [destdir + x for x in self.bootstrapRubyLibs]
rubyInvocation = (('LD_LIBRARY_PATH=%(destdir)s%(libdir)s '
'RUBYLIB="'+':'.join(rubyLibPath)+'" '
+rubyInvocation)%macros)
rubyLoadPath = util.popen(
"%s -e 'puts $:'" %
rubyInvocation).readlines()
# get gem dir if rubygems is installed
if os.access('%(bindir)s/gem' %macros, os.X_OK):
rubyLoadPath.extend(
util.popen("%s -rubygems -e 'puts Gem.default_dir'" %
rubyInvocation).readlines())
rubyLoadPath = [ x.strip() for x in rubyLoadPath if x.startswith('/') ]
loadPathList = rubyLoadPath[:]
if bootstrap:
rubyLoadPath = [ destdir+x for x in rubyLoadPath ]
rubyInvocation = ('LD_LIBRARY_PATH=%(destdir)s%(libdir)s'
' RUBYLIB="'+':'.join(rubyLoadPath)+'"'
' %(destdir)s/%(ruby)s') % macros
return (rubyInvocation, loadPathList)
def _getRubyVersion(self, macros):
cmd = self.rubyInvocation + (" -e 'puts RUBY_VERSION'" % macros)
rubyVersion = util.popen(cmd).read()
rubyVersion = '.'.join(rubyVersion.split('.')[0:2])
return rubyVersion
def _getRubyFlagsFromPath(self, pathName, rubyVersion):
pathList = pathName.split('/')
pathList = [ x for x in pathList if x ]
foundLib = False
foundVer = False
flags = set()
for dirName in pathList:
if not foundLib and dirName.startswith('lib'):
foundLib = True
flags.add(dirName)
elif not foundVer and dirName.split('.')[:1] == rubyVersion.split('.')[:1]:
# we only compare major and minor versions due to
# ruby api version (dirName) differing from programs
# version (rubyVersion)
foundVer = True
flags.add(dirName)
if foundLib and foundVer:
break
return flags
def _getmonodis(self, macros, path):
# For bootstrapping purposes, prefer the just-built version if
# it exists
monodis = '%(monodis)s' %macros
if os.access('%(destdir)s/%(monodis)s' %macros, os.X_OK):
return ('MONO_PATH=%(destdir)s%(prefix)s/lib'
' LD_LIBRARY_PATH=%(destdir)s%(libdir)s'
' %(destdir)s/%(monodis)s' %macros)
elif os.access(monodis, os.X_OK):
# Enforce the build requirement, since it is not in the package
self._enforceProvidedPath(monodis)
return monodis
else:
self.warn('%s not available for CIL dependency discovery'
' for path %s' %(monodis, path))
return None
def _getperlincpath(self, perl, destdir):
"""
Fetch the perl @INC path, falling back to bootstrapPerlIncPath
only if perl cannot be run. All elements of the search path
will be resolved against symlinks in destdir if they exist. (CNY-2949)
"""
if not perl:
return []
p = util.popen(r"""%s -e 'print join("\n", @INC)'""" %perl)
perlIncPath = p.readlines()
# make sure that the command completed successfully
try:
rc = p.close()
perlIncPath = [x.strip() for x in perlIncPath if not x.startswith('.')]
return [self._recurseSymlink(x, destdir)[0] for x in perlIncPath]
except RuntimeError:
return [self._recurseSymlink(x, destdir)[0]
for x in self.bootstrapPerlIncPath]
def _getperl(self, macros, recipe):
"""
Find the preferred instance of perl to use, including setting
any environment variables necessary to use that perl.
Returns string for running it, the C{@INC} path, and a separate
string, if necessary, for adding to @INC.
"""
perlDestPath = '%(destdir)s%(bindir)s/perl' %macros
# not %(bindir)s so that package modifications do not affect
# the search for system perl
perlPath = '/usr/bin/perl'
destdir = macros.destdir
def _perlDestInc(destdir, perlDestInc):
return ' '.join(['-I' + destdir + x for x in perlDestInc])
if os.access(perlDestPath, os.X_OK):
# must use packaged perl if it exists
m = recipe.magic[perlDestPath[len(destdir):]] # not perlPath
if m and 'RPATH' in m.contents and m.contents['RPATH']:
# we need to prepend the destdir to each element of the RPATH
# in order to run perl in the destdir
perl = ''.join((
'export LD_LIBRARY_PATH=',
'%s%s:' %(destdir, macros.libdir),
':'.join([destdir+x
for x in m.contents['RPATH'].split(':')]),
';',
perlDestPath
))
perlIncPath = self._getperlincpath(perl, destdir)
perlDestInc = _perlDestInc(destdir, perlIncPath)
return [perl, perlIncPath, perlDestInc]
else:
# perl that does not use/need rpath
perl = 'LD_LIBRARY_PATH=%s%s %s' %(
destdir, macros.libdir, perlDestPath)
perlIncPath = self._getperlincpath(perl, destdir)
perlDestInc = _perlDestInc(destdir, perlIncPath)
return [perl, perlIncPath, perlDestInc]
elif os.access(perlPath, os.X_OK):
# system perl if no packaged perl, needs no @INC mangling
self._enforceProvidedPath(perlPath)
perlIncPath = self._getperlincpath(perlPath, destdir)
return [perlPath, perlIncPath, '']
# must be no perl at all
return ['', [], '']
def _getPython(self, macros, path):
"""
Takes a path
Returns, for that path, a tuple of
- the preferred instance of python to use
- whether that instance is in the destdir
"""
m = self.recipe.magic[path]
if m and m.name == 'script' and 'python' in m.contents['interpreter']:
pythonPath = [m.contents['interpreter']]
else:
pythonVersion = self._getPythonVersionFromPath(path, None)
# After PATH, fall back to %(bindir)s. If %(bindir)s should be
# preferred, it needs to be earlier in the PATH. Include
# unversioned python as a last resort for confusing cases.
shellPath = os.environ.get('PATH', '').split(':') + [ '%(bindir)s' ]
pythonPath = []
if pythonVersion:
pythonPath = [ os.path.join(x, pythonVersion) for x in shellPath ]
pythonPath.extend([ os.path.join(x, 'python') for x in shellPath ])
for pathElement in pythonPath:
pythonDestPath = ('%(destdir)s'+pathElement) %macros
if os.access(pythonDestPath, os.X_OK):
return (pythonDestPath, True)
for pathElement in pythonPath:
pythonDestPath = pathElement %macros
if os.access(pythonDestPath, os.X_OK):
self._enforceProvidedPath(pythonDestPath)
return (pythonDestPath, False)
# Specified python not found on system (usually because of
# bad interpreter path -- CNY-2050)
if len(pythonPath) == 1:
missingPythonPath = '%s ' % pythonPath[0]
else:
missingPythonPath = ''
self.warn('Python interpreter %snot found for %s',
missingPythonPath, path)
return (None, None)
def _stripDestDir(self, pathList, destdir):
destDirLen = len(destdir)
pathElementList = []
for pathElement in pathList:
if pathElement.startswith(destdir):
pathElementList.append(pathElement[destDirLen:])
else:
pathElementList.append(pathElement)
return pathElementList
class Provides(_dependency):
"""
NAME
====
B{C{r.Provides()}} - Creates dependency provision
SYNOPSIS
========
C{r.Provides([I{provision}, I{filterexp}] || [I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.Provides()} policy marks files as providing certain features
or characteristics, and can be called to explicitly provide things
that cannot be automatically discovered. C{r.Provides} can also override
automatic discovery, and prevent marking a file as providing things, such
as for package-private plugin modules installed in system library
directories.
A C{I{provision}} may be C{'file'} to mark a file as providing its
filename, or a dependency type. You can create a file, soname or
ABI C{I{provision}} manually; all other types are only automatically
discovered. Provisions that begin with C{file} are files, those that
start with C{soname:} are sonames, and those that start with C{abi:}
are ABIs. Other prefixes are reserved.
Soname provisions are normally discovered automatically; they need
to be provided manually only in two cases:
- If a shared library was not built with a soname at all.
- If a symbolic link to a shared library needs to provide its name
as a soname.
Note: Use {Cr.ComponentProvides} rather than C{r.Provides} to add
capability flags to components.
For unusual cases where you want to remove a provision Conary
automatically finds, you can specify C{r.Provides(exceptDeps='regexp')}
to override all provisions matching a regular expression,
C{r.Provides(exceptDeps=('filterexp', 'regexp'))}
to override provisions matching a regular expression only for files
matching filterexp, or
C{r.Provides(exceptDeps=(('filterexp', 'regexp'), ...))} to specify
multiple overrides.
EXAMPLES
========
C{r.Provides('file', '/usr/share/dict/words')}
Demonstrates using C{r.Provides} to specify the file provision
C{/usr/share/dict/words}, so that other files can now require that file.
C{r.Provides('soname: libperl.so', '%(libdir)s/perl5/.*/CORE/libperl.so')}
Demonstrates synthesizing a shared library provision for all the
libperl.so symlinks.
C{r.Provides(exceptDeps = 'java: .*')}
Demonstrates removing all java provisions.
"""
bucket = policy.PACKAGE_CREATION
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('SharedLibrary', policy.REQUIRED),
# _ELFPathProvide calls Requires to pass in discovered info
# _addCILPolicyProvides does likewise
('Requires', policy.REQUIRED_SUBSEQUENT),
)
filetree = policy.PACKAGE
invariantexceptions = (
'%(docdir)s/',
)
dbDepCacheClass = _DatabaseDepCache
def __init__(self, *args, **keywords):
_dependency.__init__(self, *args, **keywords)
self.provisions = []
self.sonameSubtrees = set()
self.sysPath = None
self.monodisPath = None
self.rubyInterpreter = None
self.rubyVersion = None
self.rubyInvocation = None
self.rubyLoadPath = None
self.perlIncPath = None
self.pythonSysPathMap = {}
self.exceptDeps = []
policy.Policy.__init__(self, *args, **keywords)
self.depCache = self.dbDepCacheClass(self._getDb())
def updateArgs(self, *args, **keywords):
if args:
for filespec in args[1:]:
self.provisions.append((filespec, args[0]))
sonameSubtrees = keywords.pop('sonameSubtrees', None)
if sonameSubtrees:
if type(sonameSubtrees) in (list, tuple):
self.sonameSubtrees.update(set(sonameSubtrees))
else:
self.sonameSubtrees.add(sonameSubtrees)
exceptDeps = keywords.pop('exceptDeps', None)
if exceptDeps:
if type(exceptDeps) is str:
exceptDeps = ('.*', exceptDeps)
assert(type(exceptDeps) == tuple)
if type(exceptDeps[0]) is tuple:
self.exceptDeps.extend(exceptDeps)
else:
self.exceptDeps.append(exceptDeps)
# The next three are called only from Requires and should override
# completely to make sure the policies are in sync
pythonFlagNamespace = keywords.pop('_pythonFlagNamespace', None)
if pythonFlagNamespace is not None:
self.pythonFlagNamespace = pythonFlagNamespace
bootstrapPythonFlags = keywords.pop('_bootstrapPythonFlags', None)
if bootstrapPythonFlags is not None:
self.bootstrapPythonFlags = bootstrapPythonFlags
bootstrapSysPath = keywords.pop('_bootstrapSysPath', None)
if bootstrapSysPath is not None:
self.bootstrapSysPath = bootstrapSysPath
bootstrapPerlIncPath = keywords.pop('_bootstrapPerlIncPath', None)
if bootstrapPerlIncPath is not None:
self.bootstrapPerlIncPath = bootstrapPerlIncPath
bootstrapRubyLibs = keywords.pop('_bootstrapRubyLibs', None)
if bootstrapRubyLibs is not None:
self.bootstrapRubyLibs = bootstrapRubyLibs
if keywords.get('removeFlagsByDependencyClass', None):
self.error('removeFlagsByDependencyClass not currently implemented for Provides (CNY-3443)')
_dependency.updateArgs(self, **keywords)
def preProcess(self):
macros = self.macros
if self.bootstrapPythonFlags is not None:
self.bootstrapPythonFlags = set(x % macros
for x in self.bootstrapPythonFlags)
if self.bootstrapSysPath:
self.bootstrapSysPath = [x % macros for x in self.bootstrapSysPath]
if self.pythonFlagNamespace is not None:
self.pythonFlagNamespace = self.pythonFlagNamespace % macros
if self.bootstrapPerlIncPath:
self.bootstrapPerlIncPath = [x % macros for x in self.bootstrapPerlIncPath]
self.rootdir = self.rootdir % macros
self.fileFilters = []
self.binDirs = frozenset(
x % macros for x in [
'%(bindir)s', '%(sbindir)s',
'%(essentialbindir)s', '%(essentialsbindir)s',
'%(libexecdir)s', ])
self.noProvDirs = frozenset(
x % macros for x in [
'%(testdir)s',
'%(debuglibdir)s',
]).union(self.binDirs)
exceptDeps = []
for fE, rE in self.exceptDeps:
try:
exceptDeps.append((filter.Filter(fE, macros),
re.compile(rE % self.macros)))
except sre_constants.error, e:
self.error('Bad regular expression %s for file spec %s: %s', rE, fE, e)
self.exceptDeps= exceptDeps
for filespec, provision in self.provisions:
self.fileFilters.append(
(filter.Filter(filespec, macros), provision % macros))
del self.provisions
_dependency.preProcess(self)
def doFile(self, path):
pkgs = self.recipe.autopkg.findComponents(path)
if not pkgs:
return
pkgFiles = [(x, x.getFile(path)) for x in pkgs]
macros = self.recipe.macros
m = self.recipe.magic[path]
fullpath = macros.destdir + path
basepath = os.path.basename(path)
dirpath = os.path.dirname(path)
if os.path.exists(fullpath):
mode = os.lstat(fullpath)[stat.ST_MODE]
# First, add in the manual provisions
self.addExplicitProvides(path, fullpath, pkgFiles, macros, m)
# Next, discover all automatically-discoverable provisions
if os.path.exists(fullpath):
if (self._isELF(m, 'abi')
and m.contents['Type'] != elf.ET_EXEC
and not [ x for x in self.noProvDirs if path.startswith(x) ]):
# we do not add elf provides for programs that won't be linked to
self._ELFAddProvide(path, m, pkgFiles, basedir=dirpath)
if dirpath in self.sonameSubtrees:
# only export filename as soname if is shlib
sm, finalpath = self._symlinkMagic(path, fullpath, macros, m)
if sm and self._isELF(sm, 'abi') and sm.contents['Type'] != elf.ET_EXEC:
# add the filename as a soname provision (CNY-699)
# note: no provides necessary
self._ELFAddProvide(path, sm, pkgFiles, soname=basepath, basedir=dirpath)
if self._isPythonModuleCandidate(path):
self._addPythonProvides(path, m, pkgFiles, macros)
rubyProv = self._isRubyModule(path, macros, fullpath)
if rubyProv:
self._addRubyProvides(path, m, pkgFiles, macros, rubyProv)
elif self._isCIL(m):
self._addCILProvides(path, m, pkgFiles, macros)
elif self.CILPolicyRE.match(path):
self._addCILPolicyProvides(path, pkgFiles, macros)
elif self._isJava(m, 'provides'):
# Cache the internal provides
if not hasattr(self.recipe, '_internalJavaDepMap'):
self.recipe._internalJavaDepMap = None
self._addJavaProvides(path, m, pkgFiles)
elif self._isPerlModule(path):
self._addPerlProvides(path, m, pkgFiles)
self.addPathDeps(path, dirpath, pkgFiles)
self.whiteOut(path, pkgFiles)
self.unionDeps(path, pkgFiles)
def whiteOut(self, path, pkgFiles):
# remove intentionally discarded provides
for pkg, f in pkgFiles:
if self.exceptDeps and path in pkg.providesMap:
depSet = deps.DependencySet()
for depClass, dep in pkg.providesMap[path].iterDeps():
for filt, exceptRe in self.exceptDeps:
if filt.match(path):
matchName = '%s: %s' %(depClass.tagName, str(dep))
if exceptRe.match(matchName):
# found one to not copy
dep = None
break
if dep is not None:
depSet.addDep(depClass, dep)
pkg.providesMap[path] = depSet
def addExplicitProvides(self, path, fullpath, pkgFiles, macros, m):
for (filter, provision) in self.fileFilters:
if filter.match(path):
self._markProvides(path, fullpath, provision, pkgFiles, macros, m)
def addPathDeps(self, path, dirpath, pkgFiles):
# Because paths can change, individual files do not provide their
# paths. However, within a trove, a file does provide its name.
# Furthermore, non-regular files can be path dependency targets
# Therefore, we have to handle this case a bit differently.
for pkg, f in pkgFiles:
if dirpath in self.binDirs and not isinstance(f, files.Directory):
# CNY-930: automatically export paths in bindirs
# CNY-1721: but not directories in bindirs
f.flags.isPathDependencyTarget(True)
if f.flags.isPathDependencyTarget():
pkg.provides.addDep(deps.FileDependencies, deps.Dependency(path))
def unionDeps(self, path, pkgFiles):
for pkg, f in pkgFiles:
if path in pkg.providesMap:
f.provides.set(pkg.providesMap[path])
pkg.provides.union(f.provides())
def _getELFinfo(self, m, soname):
if 'provides' in m.contents and m.contents['provides']:
return m.contents['provides']
else:
# we need to synthesize some provides information
return [('soname', soname, ())]
def _ELFAddProvide(self, path, m, pkgFiles, soname=None, soflags=None, basedir=None):
if basedir is None:
basedir = os.path.dirname(path)
if basedir in self.sonameSubtrees:
# do not record the basedir
basedir = None
else:
# path needs to be in the dependency, since the
# provides is too broad otherwise, so add it.
# We can only add characters from the path that are legal
# in a dependency name
basedir = ''.join(x for x in basedir if self.legalCharsRE.match(x))
elfinfo = self._getELFinfo(m, os.path.basename(path))
depSet = self._createELFDepSet(m, elfinfo,
recipe=self.recipe, basedir=basedir,
soname=soname, soflags=soflags,
path=path, isProvides=True)
for pkg, _ in pkgFiles:
self._addDepSetToMap(path, pkg.providesMap, depSet)
def _getPythonProvidesSysPath(self, path):
"""Generate an ordered list of python paths for the target package.
This includes the current system path, plus any paths added by the new
package in the destdir through .pth files or a newly built python.
@return: (sysPath, pythonVersion)
"""
pythonPath, bootstrapPython = self._getPython(self.macros, path)
if not pythonPath:
# Most likely bad interpreter path in a .py file
return (None, None)
if pythonPath in self.pythonSysPathMap:
return self.pythonSysPathMap[pythonPath]
destdir = self.macros.destdir
libdir = self.macros.libdir
pythonVersion = self._getPythonVersion(pythonPath, destdir, libdir)
# Get default sys.path from python interpreter, either the one just
# built (in the case of a python bootstrap) or from the system.
systemPaths = set(self._getPythonSysPath(pythonPath, destdir, libdir,
useDestDir=False))
# Now add paths from the destdir's site-packages, typically due to
# newly installed .pth files.
systemPaths.update(self._getPythonSysPath(pythonPath, destdir, libdir,
useDestDir=True))
# Sort in descending order so that the longest path matches first.
sysPath = sorted(self._stripDestDir(systemPaths, destdir), reverse=True)
self.pythonSysPathMap[pythonPath] = (sysPath, pythonVersion)
return self.pythonSysPathMap[pythonPath]
def _fetchPerlIncPath(self):
"""
Cache the perl @INC path, sorted longest first
"""
if self.perlIncPath is not None:
return
_, self.perlIncPath, _ = self._getperl(
self.recipe.macros, self.recipe)
self.perlIncPath.sort(key=len, reverse=True)
def _addPythonProvides(self, path, m, pkgFiles, macros):
if not self._isPythonModuleCandidate(path):
return
sysPath, pythonVersion = self._getPythonProvidesSysPath(path)
if not sysPath:
return
# Add provides for every match in sys.path. For example, PIL.Imaging
# and Imaging should both be provided since they are both reachable
# names.
for sysPathEntry in sysPath:
if not path.startswith(sysPathEntry):
continue
newDepPath = path[len(sysPathEntry)+1:]
if newDepPath.split('.')[0] == '__init__':
# we don't allow bare __init__ as a python import
# hopefully we'll find this init as a deeper import at some
# other point in the sysPath
continue
elif ('site-packages' in newDepPath
or 'lib-dynload' in newDepPath
or 'plat-linux' in newDepPath
):
# site-packages should be specifically excluded since both it
# and its parent are always in sys.path. However, invalid
# python package names in general are allowed due to certain
# cases where relative imports happen inside a hyphenated
# directory and the requires detector picks up on that.
continue
# Note that it's possible to have a false positive here. For
# example, in the PIL case if PIL/__init__.py did not exist,
# PIL.Imaging would still be provided. The odds of this causing
# problems are so small that it is not checked for here.
self._addPythonProvidesSingle(path, m, pkgFiles, macros,
newDepPath)
def _addPythonProvidesSingle(self, path, m, pkgFiles, macros, depPath):
# remove extension
depPath, extn = depPath.rsplit('.', 1)
if depPath == '__future__':
return
# remove python3 __pycache__ directory from dep
if '__pycache__/' in depPath:
depPath = depPath.replace('__pycache__/', '')
# PEP 3147 adds the interperter and version to the pyc file
depPath = self.pythonInterpRE.sub('', depPath)
if depPath.endswith('/__init__'):
depPath = depPath.replace('/__init__', '')
depPath = depPath.replace('/', '.')
depPaths = [ depPath ]
if extn == 'so':
fname = util.joinPaths(macros.destdir, path)
try:
syms = elf.getDynSym(fname)
# Does this module have an init<blah> function?
initfuncs = [ x[4:] for x in syms if x.startswith('init') ]
# This is the equivalent of dirname()
comps = depPath.rsplit('.', 1)
dpPrefix = comps[0]
if len(comps) == 1:
# Top-level python module
depPaths.extend(initfuncs)
else:
for initfunc in initfuncs:
depPaths.append('.'.join([dpPrefix, initfunc]))
except elf.error:
pass
flags = self._getPythonFlagsFromPath(path)
flags = [(x, deps.FLAG_SENSE_REQUIRED) for x in sorted(list(flags))]
for dpath in depPaths:
dep = deps.Dependency(dpath, flags)
for pkg, _ in pkgFiles:
self._addDepToMap(path, pkg.providesMap, deps.PythonDependencies, dep)
def _addOneCILProvide(self, pkgFiles, path, name, ver):
for pkg, _ in pkgFiles:
self._addDepToMap(path, pkg.providesMap, deps.CILDependencies,
deps.Dependency(name, [(ver, deps.FLAG_SENSE_REQUIRED)]))
def _addCILPolicyProvides(self, path, pkgFiles, macros):
if ElementTree is None:
return
try:
keys = {'urn': '{urn:schemas-microsoft-com:asm.v1}'}
fullpath = macros.destdir + path
tree = ElementTree.parse(fullpath)
root = tree.getroot()
identity, redirect = root.find('runtime/%(urn)sassemblyBinding/%(urn)sdependentAssembly' % keys).getchildren()
assembly = identity.get('name')
self._addOneCILProvide(pkgFiles, path, assembly,
redirect.get('oldVersion'))
self.recipe.Requires(_CILPolicyProvides={
path: (assembly, redirect.get('newVersion'))})
except:
return
def _addCILProvides(self, path, m, pkgFiles, macros):
if not m or m.name != 'CIL':
return
fullpath = macros.destdir + path
if not self.monodisPath:
self.monodisPath = self._getmonodis(macros, path)
if not self.monodisPath:
return
p = util.popen('%s --assembly %s' %(
self.monodisPath, fullpath))
name = None
ver = None
for line in [ x.strip() for x in p.readlines() ]:
if 'Name:' in line:
name = line.split()[1]
elif 'Version:' in line:
ver = line.split()[1]
p.close()
# monodis did not give us any info
if not name or not ver:
return
self._addOneCILProvide(pkgFiles, path, name, ver)
def _isRubyModule(self, path, macros, fullpath):
if not util.isregular(fullpath) or os.path.islink(fullpath):
return False
if '/ruby/' in path:
# load up ruby opportunistically; this is our first chance
if self.rubyInterpreter is None:
self.rubyInterpreter, bootstrap = self._getRuby(macros, path)
if not self.rubyInterpreter:
return False
self.rubyInvocation, self.rubyLoadPath = self._getRubyLoadPath(
macros, self.rubyInterpreter, bootstrap)
self.rubyVersion = self._getRubyVersion(macros)
# we need to look deep first
self.rubyLoadPath = sorted(list(self.rubyLoadPath),
key=len, reverse=True)
elif self.rubyInterpreter is False:
return False
for pathElement in self.rubyLoadPath:
if path.startswith(pathElement) \
and (path.endswith('.rb') or path.endswith('.so')):
if '/gems/' in path:
path = path.partition("/gems/")[-1]
if '/lib/' in path:
return path.partition('/lib/')[-1].rsplit('.', 1)[0]
else:
return path[len(pathElement)+1:].rsplit('.', 1)[0]
return False
def _addRubyProvides(self, path, m, pkgFiles, macros, prov):
flags = self._getRubyFlagsFromPath(path, self.rubyVersion)
flags = [(x, deps.FLAG_SENSE_REQUIRED) for x in sorted(list(flags))]
dep = deps.Dependency(prov, flags)
for pkg, _ in pkgFiles:
self._addDepToMap(path, pkg.providesMap, deps.RubyDependencies, dep)
def _addJavaProvides(self, path, m, pkgFiles):
if 'provides' not in m.contents or not m.contents['provides']:
return
if not hasattr(self.recipe, '_reqExceptDeps'):
self.recipe._reqExceptDeps = []
# Compile requires exceptDeps (and persist them)
if not hasattr(self.recipe, '_compiledReqExceptDeps'):
self.recipe._compiledReqExceptDeps = exceptDeps = []
macros = self.recipe.macros
for fE, rE in self.recipe._reqExceptDeps:
try:
exceptDeps.append((filter.Filter(fE, macros),
re.compile(rE % macros)))
except sre_constants.error, e:
self.error('Bad regular expression %s for file spec %s: %s',
rE, fE, e)
# We will no longer need this, we have the compiled version now
self.recipe._reqExceptDeps = []
if self.recipe._internalJavaDepMap is None:
# Instantiate the dictionary of provides from this package
self.recipe._internalJavaDepMap = internalJavaDepMap = {}
componentMap = self.recipe.autopkg.componentMap
for opath in componentMap:
om = self.recipe.magic[opath]
if not self._isJava(om, 'provides'):
continue
# The file could be a .jar, in which case it contains multiple
# classes. contents['files'] is a dict, keyed on the file name
# within the jar and with a provide and a set of requires as
# value.
internalJavaDepMap.setdefault(opath, {}).update(
om.contents['files'])
else:
internalJavaDepMap = self.recipe._internalJavaDepMap
if hasattr(self.recipe, '_internalJavaProvides'):
internalProvides = self.recipe._internalJavaProvides
else:
# We need to cache the internal java provides, otherwise we do too
# much work for each file (CNY-3372)
self.recipe._internalJavaProvides = internalProvides = set()
for opath, ofiles in internalJavaDepMap.items():
internalProvides.update(x[0] for x in ofiles.values()
if x[0] is not None)
# Now drop internal provides from individual class requires
for opath, ofiles in internalJavaDepMap.items():
for oclassName, (oclassProv, oclassReqSet) in ofiles.items():
if oclassReqSet is None:
continue
oclassReqSet.difference_update(internalProvides)
reqs = set()
if self._isJava(m, 'requires'):
# Extract this file's requires
reqs.update(m.contents['requires'])
# Remove the ones that are satisfied internally
reqs.difference_update(internalProvides)
# For now, we are only trimming the provides (and requires) for
# classes for which the requires are not satisfied, neither internally
# nor from the system Conary database. In the future we may need to
# build a dependency tree between internal classes, such that we do
# the removal transitively (class A requires class B which doesn't
# have its deps satisfied should make class A unusable). This can come
# at a later time
# CNY-3362: we don't drop provides for classes which had requires on
# classes that had their dependencies pruned. (at least not yet)
if reqs:
# Try to resolve these deps against the Conary database
depSetList = []
depSetMap = {}
for req in reqs:
depSet = deps.DependencySet()
depSet.addDep(deps.JavaDependencies, deps.Dependency(req, []))
depSetList.append(depSet)
depSetMap[depSet] = req
troves = self.depCache.getProvides(depSetList)
missingDepSets = set(depSetList) - set(troves)
missingReqs = set(depSetMap[x] for x in missingDepSets)
# White out the missing requires if exceptDeps for them are found
rExceptDeps = self.recipe._compiledReqExceptDeps
if missingReqs and rExceptDeps:
depClass = deps.JavaDependencies
filteredMissingDeps = set()
for dep in list(missingReqs):
for filt, exceptRe in rExceptDeps:
if not filt.match(path):
continue
matchName = '%s: %s' %(depClass.tagName, str(dep))
if exceptRe.match(matchName):
# found one to not copy
missingReqs.remove(dep)
filteredMissingDeps.add(dep)
break
if filteredMissingDeps:
# We need to take them out of the per-file requires
ofiles = internalJavaDepMap[path]
for _, (oclassProv, oclassReqSet) in ofiles.items():
if oclassProv is not None:
oclassReqSet.difference_update(filteredMissingDeps)
if missingReqs:
fileDeps = internalJavaDepMap[path]
# This file has unsatisfied dependencies.
# Walk its list of classes to determine which ones are not
# satisfied.
satisfiedClasses = dict((fpath, (fprov, freqs))
for (fpath, (fprov, freqs)) in fileDeps.iteritems()
if freqs is not None
and not freqs.intersection(missingReqs))
internalJavaDepMap[path] = satisfiedClasses
self.warn('Provides and requirements for file %s are disabled '
'because of unsatisfied dependencies. To re-enable '
'them, add to the recipe\'s buildRequires the '
'packages that provide the following '
'requirements: %s' %
(path, " ".join(sorted(missingReqs))))
# Add the remaining provides
fileDeps = internalJavaDepMap[path]
provs = set(fprov for fpath, (fprov, freqs) in fileDeps.iteritems()
if fprov is not None)
for prov in provs:
dep = deps.Dependency(prov, [])
for pkg, _ in pkgFiles:
self._addDepToMap(path, pkg.providesMap, deps.JavaDependencies, dep)
def _addPerlProvides(self, path, m, pkgFiles):
# do not call perl to get @INC unless we have something to do for perl
self._fetchPerlIncPath()
# It is possible that we'll want to allow user-specified
# additions to the perl search path, but if so, we need
# to path-encode those files, so we can't just prepend
# those elements to perlIncPath. We would need to end up
# with something like "perl: /path/to/foo::bar" because
# for perl scripts that don't modify @INC, they could not
# find those scripts. It is not clear that we need this
# at all, because most if not all of those cases would be
# intra-package dependencies that we do not want to export.
depPath = None
for pathPrefix in self.perlIncPath:
if path.startswith(pathPrefix):
depPath = path[len(pathPrefix)+1:]
break
if depPath is None:
return
# foo/bar/baz.pm -> foo::bar::baz
prov = '::'.join(depPath.split('/')).rsplit('.', 1)[0]
dep = deps.Dependency(prov, [])
for pkg, _ in pkgFiles:
self._addDepToMap(path, pkg.providesMap, deps.PerlDependencies, dep)
def _markProvides(self, path, fullpath, provision, pkgFiles, macros, m):
if provision.startswith("file"):
# can't actually specify what to provide, just that it provides...
for _, f in pkgFiles:
f.flags.isPathDependencyTarget(True)
elif provision.startswith("abi:"):
abistring = provision[4:].strip()
op = abistring.index('(')
abi = abistring[:op]
flags = abistring[op+1:-1].split()
flags = [ (x, deps.FLAG_SENSE_REQUIRED) for x in flags ]
dep = deps.Dependency(abi, flags)
for pkg, _ in pkgFiles:
self._addDepToMap(path, pkg.providesMap, deps.AbiDependency, dep)
elif provision.startswith("soname:"):
sm, finalpath = self._symlinkMagic(path, fullpath, macros, m)
if self._isELF(sm, 'abi'):
# Only ELF files can provide sonames.
# This is for libraries that don't really include a soname,
# but programs linked against them require a soname.
# For this reason, we do not pass 'provides' to _isELF
soname = provision[7:].strip()
soflags = []
if '(' in soname:
# get list of arbitrary flags
soname, rest = soname.split('(')
soflags.extend(rest[:-1].split())
basedir = None
if '/' in soname:
basedir, soname = soname.rsplit('/', 1)
self._ELFAddProvide(path, sm, pkgFiles, soname=soname, soflags=soflags,
basedir=basedir)
else:
self.error('Provides %s for file %s does not start with one of'
' "file", "abi:", or "soname"',
provision, path)
class Requires(_addInfo, _dependency):
"""
NAME
====
B{C{r.Requires()}} - Creates dependency requirements
SYNOPSIS
========
C{r.Requires([I{/path/to/file}, I{filterexp}] || [I{packagename:component[(FLAGS)]}, I{filterexp}] || [I{exceptions=filterexp)}])}
DESCRIPTION
===========
The C{r.Requires()} policy adds requirements for a file.
You can pass in exceptions that should not have automatic requirement
discovery done, such as example shell scripts outside of C{%(docdir)s}.
Note: Components are the only troves which can be required.
For executables executed only through wrappers that
use C{LD_LIBRARY_PATH} to find the libraries instead of
embedding an RPATH in the binary, you will need to provide
a synthetic RPATH using C{r.Requires(rpath='I{RPATH}')}
or C{r.Requires(rpath=('I{filterExp}', 'I{RPATH}'))} calls,
which are tested in the order provided.
The RPATH is a standard Unix-style path string containing one or more
directory names, separated only by colon characters, except for one
significant change: Each path component is interpreted using shell-style
globs, which are checked first in the C{%(destdir)s} and then on the
installed system. (The globs are useful for cases like perl where
statically determining the entire content of the path is difficult. Use
globs only for variable parts of paths; be as specific as you can without
using the glob feature any more than necessary.)
Executables that use C{dlopen()} to open a shared library will not
automatically have a dependency on that shared library. If the program
unconditionally requires that it be able to C{dlopen()} the shared
library, encode that requirement by manually creating the requirement
by calling C{r.Requires('soname: libfoo.so', 'filterexp')} or
C{r.Requires('soname: /path/to/libfoo.so', 'filterexp')} depending on
whether the library is in a system library directory or not. (It should be
the same as how the soname dependency is expressed by the providing
package.)
For unusual cases where a system library is not listed in C{ld.so.conf}
but is instead found through a search through special subdirectories with
architecture-specific names (such as C{i686} and C{tls}), you can pass in
a string or list of strings specifying the directory or list of
directories. with C{r.Requires(sonameSubtrees='/directoryname')}
or C{r.Requires(sonameSubtrees=['/list', '/of', '/dirs'])}
Note: These are B{not} regular expressions. They will have macro
expansion expansion performed on them.
For unusual cases where Conary finds a false or misleading dependency,
or in which you need to override a true dependency, you can specify
C{r.Requires(exceptDeps='regexp')} to override all dependencies matching
a regular expression, C{r.Requires(exceptDeps=('filterexp', 'regexp'))}
to override dependencies matching a regular expression only for files
matching filterexp, or
C{r.Requires(exceptDeps=(('filterexp', 'regexp'), ...))} to specify
multiple overrides.
EXAMPLES
========
C{r.Requires('mailbase:runtime', '%(sbindir)s/sendmail')}
Demonstrates using C{r.Requires} to specify a manual requirement of the
file C{%(sbindir)s/sendmail} to the C{:runtime} component of package
C{mailbase}.
C{r.Requires('file: %(sbindir)s/sendmail', '%(datadir)s/squirrelmail/index.php')}
Specifies that conary should require the file C{%(sbindir)s/sendmail} to
be present when trying to install C{%(datadir)s/squirrelmail/index.php}.
C{r.Requires('soname: %(libdir)/kde3/kgreet_classic.so', '%(bindir)/kdm')}
Demonstrates using C{r.Requires} to specify a manual soname requirement
of the file C{%(bindir)s/kdm} to the soname
C{%(libdir)/kde3/kgreet_classic.so}.
C{r.Requires(exceptions='/usr/share/vim/.*/doc/')}
Demonstrates using C{r.Requires} to specify that files in the
subdirectory C{/usr/share/vim/.*/doc} are excepted from being marked as
requirements.
C{r.Requires(exceptDeps='trove:$trovename')}
Uses C{r.Requires} to specify that the trove C{trovename} is excluded
from the dependencies for the package.
"""
bucket = policy.PACKAGE_CREATION
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('SharedLibrary', policy.REQUIRED_PRIOR),
# Requires depends on ELF dep path discovery previously done in Provides
('Provides', policy.REQUIRED_PRIOR),
)
filetree = policy.PACKAGE
invariantexceptions = (
'%(docdir)s/',
)
dbDepCacheClass = _DatabaseDepCache
def __init__(self, *args, **keywords):
_dependency.__init__(self, *args, **keywords)
self.bootstrapPythonFlags = set()
self.bootstrapSysPath = []
self.bootstrapPerlIncPath = []
self.bootstrapRubyLibs = []
self.pythonFlagNamespace = None
self.sonameSubtrees = set()
self._privateDepMap = {}
self.rpathFixup = []
self.exceptDeps = []
self.sysPath = None
self.monodisPath = None
self.rubyInterpreter = None
self.rubyVersion = None
self.rubyInvocation = None
self.rubyLoadPath = None
self.perlReqs = None
self.perlPath = None
self.perlIncArgs = None
self._CILPolicyProvides = {}
self.pythonSysPathMap = {}
self.pythonModuleFinderMap = {}
self.troveDeps = {}
policy.Policy.__init__(self, *args, **keywords)
self.depCache = self.dbDepCacheClass(self._getDb())
ISD = deps.InstructionSetDependency
TISD = deps.TargetInstructionSetDependency
instructionDeps = list(self.recipe._buildFlavor.iterDepsByClass(ISD))
instructionDeps += list(self.recipe._buildFlavor.iterDepsByClass(TISD))
self.allowableIsnSets = [ x.name for x in instructionDeps ]
def updateArgs(self, *args, **keywords):
# _privateDepMap is used only for Provides to talk to Requires
privateDepMap = keywords.pop('_privateDepMap', None)
if privateDepMap:
self._privateDepMap.update([privateDepMap])
sonameSubtrees = keywords.pop('sonameSubtrees', None)
if sonameSubtrees:
if type(sonameSubtrees) in (list, tuple):
self.sonameSubtrees.update(set(sonameSubtrees))
else:
self.sonameSubtrees.add(sonameSubtrees)
bootstrapPythonFlags = keywords.pop('bootstrapPythonFlags', None)
if bootstrapPythonFlags:
if type(bootstrapPythonFlags) in (list, tuple):
self.bootstrapPythonFlags.update(set(bootstrapPythonFlags))
else:
self.bootstrapPythonFlags.add(bootstrapPythonFlags)
# pass full set to Provides to share the exact same data
self.recipe.Provides(
_bootstrapPythonFlags=self.bootstrapPythonFlags)
bootstrapSysPath = keywords.pop('bootstrapSysPath', None)
if bootstrapSysPath:
if type(bootstrapSysPath) in (list, tuple):
self.bootstrapSysPath.extend(bootstrapSysPath)
else:
self.error('bootstrapSysPath must be list or tuple')
# pass full set to Provides to share the exact same data
self.recipe.Provides(
_bootstrapSysPath=self.bootstrapSysPath)
pythonFlagNamespace = keywords.pop('pythonFlagNamespace', None)
if pythonFlagNamespace is not None:
self.pythonFlagNamespace = pythonFlagNamespace
self.recipe.Provides(_pythonFlagNamespace=pythonFlagNamespace)
bootstrapPerlIncPath = keywords.pop('bootstrapPerlIncPath', None)
if bootstrapPerlIncPath:
if type(bootstrapPerlIncPath) in (list, tuple):
self.bootstrapPerlIncPath.extend(bootstrapPerlIncPath)
else:
self.error('bootstrapPerlIncPath must be list or tuple')
# pass full set to Provides to share the exact same data
self.recipe.Provides(
_bootstrapPerlIncPath=self.bootstrapPerlIncPath)
bootstrapRubyLibs = keywords.pop('bootstrapRubyLibs', None)
if bootstrapRubyLibs is not None:
if type(bootstrapRubyLibs) in (list, tuple):
self.bootstrapRubyLibs.extend(bootstrapRubyLibs)
else:
self.error('bootstrapRubyLibs must be list or tuple')
# pass full set to Provides to share the exact same data
self.recipe.Provides(
_bootstrapRubyLibs=self.bootstrapRubyLibs)
_CILPolicyProvides = keywords.pop('_CILPolicyProvides', None)
if _CILPolicyProvides:
self._CILPolicyProvides.update(_CILPolicyProvides)
rpath = keywords.pop('rpath', None)
if rpath:
if type(rpath) is str:
rpath = ('.*', rpath)
assert(type(rpath) == tuple)
self.rpathFixup.append(rpath)
exceptDeps = keywords.pop('exceptDeps', None)
if exceptDeps:
if type(exceptDeps) is str:
exceptDeps = ('.*', exceptDeps)
assert(type(exceptDeps) == tuple)
if type(exceptDeps[0]) is tuple:
self.exceptDeps.extend(exceptDeps)
else:
self.exceptDeps.append(exceptDeps)
if not hasattr(self.recipe, '_reqExceptDeps'):
self.recipe._reqExceptDeps = []
self.recipe._reqExceptDeps.extend(self.exceptDeps)
# Filter out trove deps that are not associated with a file.
if len(args) >= 2:
troves = []
component = re.compile('^[-a-zA-Z0-9]*:[a-zA-Z]+$')
for arg in args[1:]:
arg = arg % self.recipe.macros
# Make sure arg looks like a component
if not component.match(arg):
break
troves.append(arg.lstrip(':'))
else:
self.troveDeps[args[0]] = troves
args = ()
_dependency.updateArgs(self, *args, **keywords)
_addInfo.updateArgs(self, *args, **keywords)
def preProcess(self):
macros = self.macros
self.systemLibPaths = set(os.path.normpath(x % macros)
for x in self.sonameSubtrees)
self.bootstrapPythonFlags = set(x % macros
for x in self.bootstrapPythonFlags)
self.bootstrapSysPath = [x % macros for x in self.bootstrapSysPath]
if self.pythonFlagNamespace is not None:
self.pythonFlagNamespace = self.pythonFlagNamespace % macros
self.bootstrapPerlIncPath = [x % macros for x in self.bootstrapPerlIncPath]
# anything that any buildreqs have caused to go into ld.so.conf
# or ld.so.conf.d/*.conf is a system library by definition,
# but only look at paths, not (for example) "include" lines
if os.path.exists('/etc/ld.so.conf'):
self.systemLibPaths |= set(os.path.normpath(x.strip())
for x in file('/etc/ld.so.conf').readlines()
if x.startswith('/'))
for fileName in fixedglob.glob('/etc/ld.so.conf.d/*.conf'):
self.systemLibPaths |= set(os.path.normpath(x.strip())
for x in file(fileName).readlines()
if x.startswith('/'))
self.rpathFixup = [(filter.Filter(x, macros), y % macros)
for x, y in self.rpathFixup]
exceptDeps = []
for fE, rE in self.exceptDeps:
try:
exceptDeps.append((filter.Filter(fE, macros), re.compile(rE % macros)))
except sre_constants.error, e:
self.error('Bad regular expression %s for file spec %s: %s', rE, fE, e)
self.exceptDeps= exceptDeps
_dependency.preProcess(self)
def postProcess(self):
self._delPythonRequiresModuleFinder()
components = {}
for comp in self.recipe.autopkg.getComponents():
components[comp.getName()] = comp
shortName = comp.getName().split(':')[1]
# Mark copmonent names with duplicates
if shortName in components:
components[shortName] = None
else:
components[shortName] = comp
# r.Requires('foo:runtime', 'msi')
# r.Requires('foo:runtime', ':msi')
# r.Requires('foo:runtime', 'bar:msi')
depClass = deps.TroveDependencies
for info, troves in self.troveDeps.iteritems():
# Sanity check inputs.
if ':' not in info:
self.error('package dependency %s not allowed', info)
return
for trove in troves:
if trove not in components:
self.error('no component named %s', trove)
return
if components[trove] is None:
self.error('specified component name matches multiple '
'components %s', trove)
return
# Add the trove dependency.
dep = deps.Dependency(info)
for trove in troves:
components[trove].requires.addDep(depClass, dep)
def doFile(self, path):
pkgs = self.recipe.autopkg.findComponents(path)
if not pkgs:
return
pkgFiles = [(x, x.getFile(path)) for x in pkgs]
# this file object used only for tests, not for doing packaging
f = pkgFiles[0][1]
macros = self.recipe.macros
fullpath = macros.destdir + path
m = self.recipe.magic[path]
if self._isELF(m, 'requires'):
isnset = m.contents['isnset']
if isnset in self.allowableIsnSets:
# only add requirements for architectures
# that we are actually building for (this may include
# major and minor architectures)
self._addELFRequirements(path, m, pkgFiles)
# now go through explicit requirements
for info in self.included:
for filt in self.included[info]:
if filt.match(path):
self._markManualRequirement(info, path, pkgFiles, m)
# now check for automatic dependencies besides ELF
if f.inode.perms() & 0111 and m and m.name == 'script':
interp = m.contents['interpreter']
if interp.strip().startswith('/') and self._checkInclusion(interp,
path):
# no interpreter string warning is in BadInterpreterPaths
if not (os.path.exists(interp) or
os.path.exists(macros.destdir+interp)):
# this interpreter not on system, warn
# cannot be an error to prevent buildReq loops
self.warn('interpreter "%s" (referenced in %s) missing',
interp, path)
# N.B. no special handling for /{,usr/}bin/env here;
# if there has been an exception to
# NormalizeInterpreterPaths, then it is a
# real dependency on the env binary
self._addRequirement(path, interp, [], pkgFiles,
deps.FileDependencies)
if (f.inode.perms() & 0111 and m and m.name == 'script' and
os.path.basename(m.contents['interpreter']).startswith('python')):
self._addPythonRequirements(path, fullpath, pkgFiles)
elif self._isPython(path):
self._addPythonRequirements(path, fullpath, pkgFiles)
if (f.inode.perms() & 0111 and m and m.name == 'script' and
os.path.basename(m.contents['interpreter']).startswith('ruby')):
self._addRubyRequirements(path, fullpath, pkgFiles, script=True)
elif '/ruby/' in path and path.endswith('.rb'):
self._addRubyRequirements(path, fullpath, pkgFiles, script=False)
if self._isCIL(m):
if not self.monodisPath:
self.monodisPath = self._getmonodis(macros, path)
if not self.monodisPath:
return
p = util.popen('%s --assemblyref %s' %(
self.monodisPath, fullpath))
for line in [ x.strip() for x in p.readlines() ]:
if ': Version=' in line:
ver = line.split('=')[1]
elif 'Name=' in line:
name = line.split('=')[1]
self._addRequirement(path, name, [ver], pkgFiles,
deps.CILDependencies)
p.close()
elif self.CILPolicyRE.match(path):
name, ver = self._CILPolicyProvides[path]
self._addRequirement(path, name, [ver], pkgFiles, deps.CILDependencies)
if self._isJava(m, 'requires'):
self._addJavaRequirements(path, m, pkgFiles)
db = self._getDb()
if self._isPerl(path, m, f):
perlReqs = self._getPerlReqs(path, fullpath)
for req in perlReqs:
thisReq = deps.parseDep('perl: ' + req)
if db.getTrovesWithProvides([thisReq]) or [
x for x in self.recipe.autopkg.getComponents()
if x.provides.satisfies(thisReq)]:
self._addRequirement(path, req, [], pkgFiles,
deps.PerlDependencies)
self.whiteOut(path, pkgFiles)
self.unionDeps(path, pkgFiles)
def _addJavaRequirements(self, path, m, pkgFiles):
if not hasattr(self.recipe, '_internalJavaDepMap'):
self.recipe._internalJavaDepMap = {}
fileDeps = self.recipe._internalJavaDepMap.get(path, {})
reqs = set()
for fpath, (fprov, freq) in fileDeps.items():
if freq is not None:
reqs.update(freq)
for req in reqs:
self._addRequirement(path, req, [], pkgFiles,
deps.JavaDependencies)
def whiteOut(self, path, pkgFiles):
# remove intentionally discarded dependencies
for pkg, _ in pkgFiles:
if self.exceptDeps and path in pkg.requiresMap:
depSet = deps.DependencySet()
for depClass, dep in pkg.requiresMap[path].iterDeps():
for filt, exceptRe in self.exceptDeps:
if filt.match(path):
matchName = '%s: %s' %(depClass.tagName, str(dep))
if exceptRe.match(matchName):
# found one to not copy
dep = None
break
if dep is not None:
depSet.addDep(depClass, dep)
pkg.requiresMap[path] = depSet
def unionDeps(self, path, pkgFiles):
# finally, package the dependencies up
for pkg, f in pkgFiles:
if path in pkg.requiresMap:
# files should not require items they provide directly. CNY-2177
f.requires.set(pkg.requiresMap[path] - f.provides())
pkg.requires.union(f.requires())
def _addELFRequirements(self, path, m, pkgFiles):
"""
Add ELF and abi dependencies, including paths when not shlibs
"""
def appendUnique(ul, items):
for item in items:
if item not in ul:
ul.append(item)
def _canonicalRPATH(rpath, glob=False):
# normalize all elements of RPATH
l = [ util.normpath(x) for x in rpath.split(':') ] # CNY-3425
# prune system paths and relative paths from RPATH
l = [ x for x in l
if x not in self.systemLibPaths and x.startswith('/') ]
if glob:
destdir = self.macros.destdir
dlen = len(destdir)
gl = []
for item in l:
# prefer destdir elements
paths = util.braceGlob(destdir + item)
paths = [ os.path.normpath(x[dlen:]) for x in paths ]
appendUnique(gl, paths)
# then look on system
paths = util.braceGlob(item)
paths = [ os.path.normpath(x) for x in paths ]
appendUnique(gl, paths)
l = gl
return l
rpathList = []
def _findSonameInRpath(soname):
for rpath in rpathList:
destpath = '/'.join((self.macros.destdir, rpath, soname))
if os.path.exists(destpath):
return rpath
destpath = '/'.join((rpath, soname))
if os.path.exists(destpath):
return rpath
# didn't find anything
return None
# fixup should come first so that its path elements can override
# the included RPATH if necessary
if self.rpathFixup:
for f, rpath in self.rpathFixup:
if f.match(path):
# synthetic RPATH items are globbed
rpathList = _canonicalRPATH(rpath, glob=True)
break
if m and 'RPATH' in m.contents and m.contents['RPATH']:
rpathList += _canonicalRPATH(m.contents['RPATH'])
depSet = self._createELFDepSet(m, m.contents['requires'],
libPathMap=self._privateDepMap,
getRPATH=_findSonameInRpath,
path=path, isProvides=False)
for pkg, _ in pkgFiles:
self._addDepSetToMap(path, pkg.requiresMap, depSet)
def _getPythonRequiresSysPath(self, pathName):
# Generate the correct sys.path for finding the required modules.
# we use the built in site.py to generate a sys.path for the
# current system and another one where destdir is the root.
# note the below code is similar to code in Provides,
# but it creates an ordered path list with and without destdir prefix,
# while provides only needs a complete list without destdir prefix.
# Returns tuple:
# (sysPath, pythonModuleFinder, pythonVersion)
pythonPath, bootstrapPython = self._getPython(self.macros, pathName)
if not pythonPath:
return (None, None, None)
if pythonPath in self.pythonSysPathMap:
return self.pythonSysPathMap[pythonPath]
destdir = self.macros.destdir
libdir = self.macros.libdir
pythonVersion = self._getPythonVersion(pythonPath, destdir, libdir)
# Start with paths inside the destdir so that imports within a package
# are discovered correctly.
systemPaths = self._getPythonSysPath(pythonPath, destdir, libdir,
useDestDir=True)
# Now add paths from the system (or bootstrap python)
systemPaths += self._getPythonSysPath(pythonPath, destdir, libdir,
useDestDir=False)
if not bootstrapPython:
# update pythonTroveFlagCache to require correct flags
self._getPythonTroveFlags(pythonPath)
# Keep original order for use with the module finder.
sysPathForModuleFinder = list(systemPaths)
# Strip destdir and sort in descending order for converting paths to
# qualified python module names.
sysPath = sorted(set(self._stripDestDir(systemPaths, destdir)),
reverse=True)
# load module finder after sys.path is restored
# in case delayed importer is installed.
pythonModuleFinder = self._getPythonRequiresModuleFinder(
pythonPath, destdir, libdir, sysPathForModuleFinder,
bootstrapPython)
self.pythonSysPathMap[pythonPath] = (
sysPath, pythonModuleFinder, pythonVersion)
return self.pythonSysPathMap[pythonPath]
def _getPythonRequiresModuleFinder(self, pythonPath, destdir, libdir, sysPath, bootstrapPython):
if self.recipe.isCrossCompiling():
return None
if pythonPath not in self.pythonModuleFinderMap:
try:
self.pythonModuleFinderMap[pythonPath] = pydeps.moduleFinderProxy(pythonPath, destdir, libdir, sysPath, self.error)
except pydeps.ModuleFinderInitializationError, e:
if bootstrapPython:
# another case, like isCrossCompiling, where we cannot
# run pythonPath -- ModuleFinderInitializationError
# is raised before looking at any path, so should
# be consistent for any pythonPath
self.pythonModuleFinderMap[pythonPath] = None
else:
raise
return self.pythonModuleFinderMap[pythonPath]
def _delPythonRequiresModuleFinder(self):
for finder in self.pythonModuleFinderMap.values():
if finder is not None:
finder.close()
def _addPythonRequirements(self, path, fullpath, pkgFiles):
destdir = self.recipe.macros.destdir
destDirLen = len(destdir)
(sysPath, pythonModuleFinder, pythonVersion
)= self._getPythonRequiresSysPath(path)
if not sysPath:
# Probably a bad interpreter path
return
if not pythonModuleFinder:
# We cannot (reliably) determine runtime python requirements
# in the cross-compile case, so don't even try (for
# consistency).
return
pythonModuleFinder.load_file(fullpath)
data = pythonModuleFinder.getDepsForPath(fullpath)
if data['result'] != 'ok':
self.info('File %s is not a valid python file', path)
return
for depPath in data['paths']:
if not depPath:
continue
flags = None
absPath = None
if depPath.startswith(destdir):
depPath = depPath[destDirLen:]
flags = self._getPythonFlagsFromPath(depPath)
# The file providing this dependency is part of this package.
absPath = depPath
for sysPathEntry in sysPath:
if depPath.startswith(sysPathEntry):
newDepPath = depPath[len(sysPathEntry)+1:]
if newDepPath not in ('__init__', '__init__.py'):
# we don't allow bare __init__'s as dependencies.
# hopefully we'll find this at deeper level in
# in the sysPath
if flags is None:
# this is provided by the system, so we have
# to see with which flags it is provided with
flags = self._getPythonFlags(depPath,
self.bootstrapPythonFlags)
depPath = newDepPath
break
if depPath.startswith('/'):
# a python file not found in sys.path will not have been
# provided, so we must not depend on it either
return
if not (depPath.endswith('.py') or depPath.endswith('.pyc') or
depPath.endswith('.so')):
# Not something we provide, so not something we can
# require either. Drop it and go on. We have seen
# this when a script in /usr/bin has ended up in the
# requires list.
continue
if depPath.endswith('module.so'):
# Strip 'module.so' from the end, make it a candidate
cands = [ depPath[:-9] + '.so', depPath ]
cands = [ self._normalizePythonDep(x) for x in cands ]
if absPath:
depName = self._checkPackagePythonDeps(pkgFiles, absPath,
cands, flags)
else:
depName = self._checkSystemPythonDeps(cands, flags)
else:
depName = self._normalizePythonDep(depPath)
if depName == '__future__':
continue
self._addRequirement(path, depName, flags, pkgFiles,
deps.PythonDependencies)
#if data['missing']:
# self.warn("Python file %s is missing requirements: %s" % (
# path, ', '.join(data['missing'])))
def _checkPackagePythonDeps(self, pkgFiles, depPath, depNames, flags):
# Try to match depNames against all current packages
# Use the last value in depNames as the fault value
assert depNames, "No dependencies passed"
for pkg, _ in pkgFiles:
if depPath in pkg:
fileProvides = pkg[depPath][1].provides()
if flags:
flags = [ (x, deps.FLAG_SENSE_REQUIRED) for x in flags ]
# Walk the depNames list in order, pick the first dependency
# available.
for dp in depNames:
depSet = deps.DependencySet()
depSet.addDep(deps.PythonDependencies,
deps.Dependency(dp, flags))
if fileProvides.intersection(depSet):
# this dep is provided
return dp
# If we got here, the file doesn't provide this dep. Return the last
# candidate and hope for the best
return depNames[-1]
def _checkSystemPythonDeps(self, depNames, flags):
if flags:
flags = [ (x, deps.FLAG_SENSE_REQUIRED) for x in flags ]
for dp in depNames:
depSet = deps.DependencySet()
depSet.addDep(deps.PythonDependencies, deps.Dependency(dp, flags))
troves = self.depCache.getProvides([depSet])
if troves:
return dp
return depNames[-1]
def _normalizePythonDep(self, depName):
# remove extension
depName = depName.rsplit('.', 1)[0]
depName = depName.replace('/', '.')
depName = depName.replace('.__init__', '')
depName = self.pythonInterpRE.sub('', depName)
return depName
def _addRubyRequirements(self, path, fullpath, pkgFiles, script=False):
macros = self.recipe.macros
destdir = macros.destdir
destDirLen = len(destdir)
if self.rubyInterpreter is None:
self.rubyInterpreter, bootstrap = self._getRuby(macros, path)
if not self.rubyInterpreter:
return
self.rubyInvocation, self.rubyLoadPath = self._getRubyLoadPath(
macros, self.rubyInterpreter, bootstrap)
self.rubyVersion = self._getRubyVersion(macros)
elif self.rubyInterpreter is False:
return
if not script:
if not util.isregular(fullpath) or os.path.islink(fullpath):
return
foundInLoadPath = False
for pathElement in self.rubyLoadPath:
if path.startswith(pathElement):
foundInLoadPath = True
break
if not foundInLoadPath:
return
# This is a very limited hack, but will work for the 90% case
# better parsing may be written later
# Note that we only honor "require" at the beginning of
# the line and only requirements enclosed in single quotes
# to avoid conditional requirements and requirements that
# do any sort of substitution. Because most ruby packages
# contain multiple ruby modules, getting 90% of the ruby
# dependencies will find most of the required packages in
# practice
depEntries = [x.strip() for x in file(fullpath)
if x.startswith('require ') or
x.startswith('require(')]
depEntries = (x.split() for x in depEntries)
depEntries = (x[1].strip("\"'") for x in depEntries
if len(x) == 2 and x[1].startswith("'") and
x[1].endswith("'"))
depEntries = set(depEntries)
# I know of no way to ask ruby to report deps from scripts
# Unfortunately, so far it seems that there are too many
# Ruby modules which have code that runs in the body; this
# code runs slowly, has not been useful in practice for
# filtering out bogus dependencies, and has been hanging
# and causing other unintended side effects from modules
# that have code in the main body.
#if not script:
# depClosure = util.popen(r'''%s -e "require '%s'; puts $\""'''
# %(self.rubyInvocation%macros, fullpath)).readlines()
# depClosure = set([x.split('.')[0] for x in depClosure])
# # remove any entries from the guessed immediate requirements
# # that are not in the closure
# depEntries = set(x for x in depEntries if x in depClosure)
def _getDepEntryPath(depEntry):
for prefix in (destdir, ''):
for pathElement in self.rubyLoadPath:
for suffix in ('.rb', '.so'):
candidate = util.searchPath(
os.path.basename(depEntry) + suffix,
prefix + pathElement,
)
if candidate:
return candidate
return None
for depEntry in depEntries:
depEntryPath = _getDepEntryPath(depEntry)
if depEntryPath is None:
continue
if depEntryPath.startswith(destdir):
depPath = depEntryPath[destDirLen:]
else:
depPath = depEntryPath
flags = self._getRubyFlagsFromPath(depPath, self.rubyVersion)
self._addRequirement(path, depEntry, flags, pkgFiles,
deps.RubyDependencies)
def _fetchPerl(self):
"""
Cache the perl path and @INC path with -I%(destdir)s prepended to
each element if necessary
"""
if self.perlPath is not None:
return
macros = self.recipe.macros
self.perlPath, perlIncPath, perlDestInc = self._getperl(macros, self.recipe)
if perlDestInc:
self.perlIncArgs = perlDestInc
else:
self.perlIncArgs = ' '.join('-I'+x for x in perlIncPath)
def _getPerlReqs(self, path, fullpath):
if self.perlReqs is None:
self._fetchPerl()
if not self.perlPath:
# no perl == bootstrap, but print warning
self.info('Unable to find perl interpreter,'
' disabling perl: requirements')
self.perlReqs = False
return []
# get the base directory where conary lives. In a checked
# out version, this would be .../conary/conary/build/package.py
# chop off the last 3 directories to find where
# .../conary/Scandeps and .../conary/scripts/perlreqs.pl live
basedir = '/'.join(sys.modules[__name__].__file__.split('/')[:-3])
scandeps = '/'.join((basedir, 'conary/ScanDeps'))
if (os.path.exists(scandeps) and
os.path.exists('%s/scripts/perlreqs.pl' % basedir)):
perlreqs = '%s/scripts/perlreqs.pl' % basedir
else:
# we assume that conary is installed in
# $prefix/$libdir/python?.?/site-packages. Use this
# assumption to find the prefix for
# /usr/lib/conary and /usr/libexec/conary
regexp = re.compile(r'(.*)/lib(64){0,1}/python[1-9].[0-9]/site-packages')
match = regexp.match(basedir)
if not match:
# our regexp didn't work. fall back to hardcoded
# paths
prefix = '/usr'
else:
prefix = match.group(1)
# ScanDeps is not architecture specific
scandeps = '%s/lib/conary/ScanDeps' %prefix
if not os.path.exists(scandeps):
# but it might have been moved to lib64 for multilib
scandeps = '%s/lib64/conary/ScanDeps' %prefix
perlreqs = '%s/libexec/conary/perlreqs.pl' %prefix
self.perlReqs = '%s -I%s %s %s' %(
self.perlPath, scandeps, self.perlIncArgs, perlreqs)
if self.perlReqs is False:
return []
cwd = os.getcwd()
os.chdir(os.path.dirname(fullpath))
try:
p = os.popen('%s %s' %(self.perlReqs, fullpath))
finally:
try:
os.chdir(cwd)
except:
pass
reqlist = [x.strip().split('//') for x in p.readlines()]
# make sure that the command completed successfully
rc = p.close()
if rc:
# make sure that perl didn't blow up
assert(os.WIFEXITED(rc))
# Apparantly ScanDeps could not handle this input
return []
# we care only about modules right now
# throwing away the filenames for now, but we might choose
# to change that later
reqlist = [x[2] for x in reqlist if x[0] == 'module']
# foo/bar/baz.pm -> foo::bar::baz
reqlist = ['::'.join(x.split('/')).rsplit('.', 1)[0] for x in reqlist]
return reqlist
def _markManualRequirement(self, info, path, pkgFiles, m):
flags = []
if self._checkInclusion(info, path):
if info[0] == '/':
depClass = deps.FileDependencies
elif info.startswith('file:') and info[5:].strip()[0] == '/':
info = info[5:].strip()
depClass = deps.FileDependencies
elif info.startswith('soname:'):
if not m or m.name != 'ELF':
# only an ELF file can have a soname requirement
return
# we need to synthesize a dependency that encodes the
# same ABI as this binary
depClass = deps.SonameDependencies
for depType, dep, f in m.contents['requires']:
if depType == 'abi':
flags = tuple(x == 'Linux' and 'SysV' or x
for x in f) # CNY-3604
info = '%s/%s' %(dep, info.split(None, 1)[1])
info = os.path.normpath(info)
else: # by process of elimination, must be a trove
if info.startswith('group-'):
self.error('group dependency %s not allowed', info)
return
if info.startswith('fileset-'):
self.error('fileset dependency %s not allowed', info)
return
if ':' not in info:
self.error('package dependency %s not allowed', info)
return
depClass = deps.TroveDependencies
self._addRequirement(path, info, flags, pkgFiles, depClass)
def _checkInclusion(self, info, path):
if info in self.excluded:
for filt in self.excluded[info]:
# exception handling is per-requirement,
# so handled specially
if filt.match(path):
self.info('ignoring requirement match for %s: %s',
path, info)
return False
return True
def _addRequirement(self, path, info, flags, pkgFiles, depClass):
if depClass == deps.FileDependencies:
pathMap = self.recipe.autopkg.pathMap
componentMap = self.recipe.autopkg.componentMap
if (info in pathMap and not
componentMap[info][info][1].flags.isPathDependencyTarget()):
# if a package requires a file, includes that file,
# and does not provide that file, it should error out
self.error('%s requires %s, which is included but not'
' provided; use'
" r.Provides('file', '%s')", path, info, info)
return
# in some cases, we get literal "(flags)" from the recipe
if '(' in info:
flagindex = info.index('(')
flags = set(info[flagindex+1:-1].split() + list(flags))
info = info.split('(')[0]
# CNY-3443
if depClass in self.removeFlagsByDependencyClassMap:
flags = set(flags)
for ignoreItem in self.removeFlagsByDependencyClassMap[depClass]:
if isinstance(ignoreItem, set):
ignoreFlags = ignoreItem
else:
ignoreFlags = set(f for f in flags if ignoreItem.match(f))
flags -= ignoreFlags
if flags:
flags = [ (x, deps.FLAG_SENSE_REQUIRED) for x in flags ]
for pkg, _ in pkgFiles:
# we may need to create a few more DependencySets.
if path not in pkg.requiresMap:
pkg.requiresMap[path] = deps.DependencySet()
pkg.requiresMap[path].addDep(depClass, deps.Dependency(info, flags))
class _basePluggableRequires(Requires):
"""
Base class for pluggable Requires policies.
"""
# This set of policies get executed before the Requires policy,
# and inherits the Requires' ordering constraints
requires = list(Requires.requires) + [
('Requires', policy.REQUIRED_SUBSEQUENT),
]
def preProcess(self):
# We want to inherit the exceptions from the Requires class, so we
# need to peek into the Required policy object. We can still pass
# explicit exceptions into the pluggable sub-policies, and they will
# only apply to the sub-policy.
exceptions = self.recipe._policyMap['Requires'].exceptions
if exceptions:
Requires.updateArgs(self, exceptions=exceptions,
allowUnusedFilters = True)
Requires.preProcess(self)
def reportErrors(self, *args, **kwargs):
return self.recipe._policyMap['Requires'].reportErrors(*args, **kwargs)
def error(self, *args, **kwargs):
return self.recipe._policyMap['Requires'].error(*args, **kwargs)
def warn(self, *args, **kwargs):
return self.recipe._policyMap['Requires'].warn(*args, **kwargs)
def info(self, *args, **kwargs):
return self.recipe._policyMap['Requires'].info(*args, **kwargs)
def _addClassName(self, *args, **kwargs):
return self.recipe._policyMap['Requires']._addClassName(*args, **kwargs)
def doFile(self, path):
pkgs = self.recipe.autopkg.findComponents(path)
if not pkgs:
return
pkgFiles = [(x, x.getFile(path)) for x in pkgs]
macros = self.recipe.macros
fullpath = macros.destdir + path
self.addPluggableRequirements(path, fullpath, pkgFiles, macros)
self.whiteOut(path, pkgFiles)
self.unionDeps(path, pkgFiles)
def addPluggableRequirements(self, path, fullpath, pkgFiles, macros):
"""Override in subclasses"""
pass
class RemoveSelfProvidedRequires(policy.Policy):
"""
This policy is used to remove component requirements when they are provided
by the component itself.
Do not call it directly; it is for internal use only.
"""
bucket = policy.PACKAGE_CREATION
requires = (
('Requires', policy.REQUIRED_PRIOR),
)
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def do(self):
if use.Use.bootstrap._get():
return
for comp in self.recipe.autopkg.getComponents():
comp.requires -= comp.provides
class Flavor(policy.Policy):
"""
NAME
====
B{C{r.Flavor()}} - Controls the Flavor mechanism
SYNOPSIS
========
C{r.Flavor([I{filterexp}] | [I{exceptions=filterexp}])}
DESCRIPTION
===========
The C{r.Flavor} policy marks files with the appropriate Flavor.
To except a file's flavor from being marked, use:
C{r.Flavor(exceptions='I{filterexp}')}.
EXAMPLES
========
C{r.Flavor(exceptions='%(crossprefix)s/lib/gcc-lib/.*')}
Files in the directory C{%(crossprefix)s/lib/gcc-lib} are being excepted
from having their Flavor marked, because they are not flavored for
the system on which the trove is being installed.
"""
bucket = policy.PACKAGE_CREATION
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('Requires', policy.REQUIRED_PRIOR),
# For example: :lib component contains only a single packaged empty
# directory, which must be artificially flavored for multilib
('ExcludeDirectories', policy.REQUIRED_PRIOR),
)
filetree = policy.PACKAGE
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def preProcess(self):
self.libRe = re.compile(
'^(%(libdir)s'
'|/%(lib)s'
'|%(x11prefix)s/%(lib)s'
'|%(krbprefix)s/%(lib)s)(/|$)' %self.recipe.macros)
self.libReException = re.compile('^/usr/(lib|%(lib)s)/(python|ruby).*$')
self.baseIsnset = use.Arch.getCurrentArch()._name
self.baseArchFlavor = use.Arch.getCurrentArch()._toDependency()
self.archFlavor = use.createFlavor(None, use.Arch._iterUsed())
self.packageFlavor = deps.Flavor()
self.troveMarked = False
self.componentMap = self.recipe.autopkg.componentMap
ISD = deps.InstructionSetDependency
TISD = deps.TargetInstructionSetDependency
instructionDeps = list(self.recipe._buildFlavor.iterDepsByClass(ISD))
instructionDeps += list(self.recipe._buildFlavor.iterDepsByClass(TISD))
self.allowableIsnSets = [ x.name for x in instructionDeps ]
def postProcess(self):
# If this is a Windows package, include the flavor from the windows
# helper.
if (self._getTarget() == TARGET_WINDOWS and
hasattr(self.recipe, 'winHelper')):
flavorStr = self.recipe.winHelper.flavor
if flavorStr:
self.packageFlavor.union(deps.parseFlavor(flavorStr))
# all troves need to share the same flavor so that we can
# distinguish them later
for pkg in self.recipe.autopkg.components.values():
pkg.flavor.union(self.packageFlavor)
def hasLibInPath(self, path):
return self.libRe.match(path) and not self.libReException.match(path)
def hasLibInDependencyFlag(self, path, f):
for depType in (deps.PythonDependencies, deps.RubyDependencies):
for dep in ([x for x in f.requires.deps.iterDepsByClass(depType)] +
[x for x in f.provides.deps.iterDepsByClass(depType)]):
flagNames = [x[0] for x in dep.getFlags()[0]]
flagNames = [x for x in flagNames if x.startswith('lib')]
if flagNames:
return True
return False
def doFile(self, path):
autopkg = self.recipe.autopkg
pkg = autopkg.findComponent(path)
if pkg is None:
return
f = pkg.getFile(path)
m = self.recipe.magic[path]
if m and m.name == 'ELF' and 'isnset' in m.contents:
isnset = m.contents['isnset']
elif self.hasLibInPath(path) or self.hasLibInDependencyFlag(path, f):
# all possible paths in a %(lib)s-derived path get default
# instruction set assigned if they don't have one already
if f.hasContents:
isnset = self.baseIsnset
else:
# this file can't be marked by arch, but the troves
# and package must be. (e.g. symlinks and empty directories)
# we don't need to union in the base arch flavor more
# than once.
if self.troveMarked:
return
self.packageFlavor.union(self.baseArchFlavor)
self.troveMarked = True
return
else:
return
flv = deps.Flavor()
flv.addDep(deps.InstructionSetDependency, deps.Dependency(isnset, []))
# get the Arch.* dependencies
# set the flavor for the file to match that discovered in the
# magic - but do not let that propagate up to the flavor of
# the package - instead the package will have the flavor that
# it was cooked with. This is to avoid unnecessary or extra files
# causing the entire package from being flavored inappropriately.
# Such flavoring requires a bunch of Flavor exclusions to fix.
# Note that we need to set all shared paths between containers
# to share flavors and ensure that fileIds are the same
for pkg in autopkg.findComponents(path):
f = pkg.getFile(path)
f.flavor.set(flv)
# get the Arch.* dependencies
flv.union(self.archFlavor)
if isnset in self.allowableIsnSets:
self.packageFlavor.union(flv)
class _ProcessInfoPackage(policy.UserGroupBasePolicy):
bucket = policy.PACKAGE_CREATION
requires = (
('PackageSpec', policy.REQUIRED_PRIOR),
('ComponentSpec', policy.REQUIRED_PRIOR),
('Provides', policy.CONDITIONAL_PRIOR),
('Requires', policy.CONDITIONAL_PRIOR),
('Config', policy.CONDITIONAL_PRIOR),
('InitialContents', policy.CONDITIONAL_PRIOR)
)
def preProcess(self):
if self.exceptions:
self.error('%s does not honor exceptions' % self.__class__.__name__)
self.exceptions = None
if self.inclusions:
self.inclusions = None
def doFile(self, path):
expectedName = 'info-%s:%s' % (os.path.basename(path), self.component)
comp = self.recipe.autopkg.componentMap[path]
compName = comp.name
if not isinstance(comp.getFile(path), files.RegularFile):
self.error("Only regular files may appear in '%s'" % expectedName)
return
if len(comp) > 1:
badPaths = [x for x in comp if x != path]
self.error("The following files are not allowed in '%s': '%s'" % \
(compName, "', '".join(badPaths)))
else:
fileObj = comp[path][1]
for tag in fileObj.tags():
self.error("TagSpec '%s' is not allowed for %s" % \
(tag, expectedName))
fileObj.tags.set('%s-info' % self.component)
fileObj.flags.isTransient(True)
self.parseError = False
self.addProvides(path)
if not self.parseError:
self.addRequires(path)
def parseInfoFile(self, path):
infoname = "info-%s:%s" % (os.path.basename(path), self.component)
data = {}
try:
data = dict([x.strip().split('=', 1) \
for x in open(path).readlines()])
extraKeys = set(data.keys()).difference(self.legalKeys)
if extraKeys:
for key in extraKeys:
self.error("%s is not is not a valid value for %s" % \
(key, infoname))
self.parseError = True
except ValueError:
self.error("Unable to parse info file for '%s'" % infoname)
self.parseError = True
return data
def addProvides(self, path):
realpath, fileObj = self.recipe.autopkg.findComponent(path)[path]
data = self.parseInfoFile(realpath)
pkg = self.recipe.autopkg.componentMap[path]
infoname = os.path.basename(path)
if path in pkg.providesMap:
# only deps related to userinfo/troveinfo are allowed
self.error("Illegal provision for 'info-%s:%s': '%s'" % \
(infoname, self.component, str(pkg.providesMap[path])))
pkg.providesMap[path] = deps.DependencySet()
depSet = self.getProvides(infoname, data)
fileObj.provides.set(depSet)
pkg.providesMap[path].union(depSet)
pkg.provides.union(depSet)
def addRequires(self, path):
realpath, fileObj = self.recipe.autopkg.findComponent(path)[path]
data = self.parseInfoFile(realpath)
pkg = self.recipe.autopkg.componentMap[path]
infoname = os.path.basename(path)
if path in pkg.requiresMap:
# only deps related to userinfo/troveinfo are allowed
self.error("Illegal requirement on 'info-%s:%s': '%s'" % \
(infoname, self.component, str(pkg.requiresMap[path])))
pkg.requiresMap[path] = deps.DependencySet()
depSet = self.getRequires(infoname, data)
fileObj.requires.set(depSet)
pkg.requiresMap[path].union(depSet)
pkg.requires.union(depSet)
class ProcessUserInfoPackage(_ProcessInfoPackage):
"""
NAME
====
B{C{r.ProcessUserInfoPackage()}} - Set dependencies and tags for User
info packages
SYNOPSIS
========
C{r.ProcessUserInfoPackage()}
DESCRIPTION
===========
The C{r.ProcessUserInfoPackage} policy automatically sets up provides
and requries, as well as tags for user info files create by the
C{r.User} build action.
This policy is not intended to be invoked from recipes. Do not use it.
"""
invariantsubtrees = ['%(userinfodir)s']
component = 'user'
legalKeys = ['PREFERRED_UID', 'GROUP', 'GROUPID', 'HOMEDIR', 'COMMENT',
'SHELL', 'SUPPLEMENTAL', 'PASSWORD']
def parseInfoFile(self, path):
if self.recipe._getCapsulePathsForFile(path):
return {}
data = _ProcessInfoPackage.parseInfoFile(self, path)
if data:
supplemental = data.get('SUPPLEMENTAL')
if supplemental is not None:
data['SUPPLEMENTAL'] = supplemental.split(',')
return data
def getProvides(self, infoname, data):
depSet = deps.DependencySet()
groupname = data.get('GROUP', infoname)
depSet.addDep(deps.UserInfoDependencies,
deps.Dependency(infoname, []))
if self.recipe._provideGroup.get(infoname, True):
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(groupname, []))
return depSet
def getRequires(self, infoname, data):
groupname = data.get('GROUP', infoname)
supp = data.get('SUPPLEMENTAL', [])
depSet = deps.DependencySet()
for grpDep in supp:
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(grpDep, []))
if not self.recipe._provideGroup.get(infoname):
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(groupname, []))
return depSet
class ProcessGroupInfoPackage(_ProcessInfoPackage):
"""
NAME
====
B{C{r.ProcessGroupInfoPackage()}} - Set dependencies and tags for Group
info packages
SYNOPSIS
========
C{r.ProcessGroupInfoPackage()}
DESCRIPTION
===========
The C{r.ProcessGroupInfoPackage} policy automatically sets up provides
and requries, as well as tags for group info files create by the
C{r.Group} and C{r.SupplementalGroup} build actions.
This policy is not intended to be invoked from recipes. Do not use it.
"""
invariantsubtrees = ['%(groupinfodir)s']
component = 'group'
legalKeys = ['PREFERRED_GID', 'USER']
def getProvides(self, groupname, data):
depSet = deps.DependencySet()
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(groupname, []))
return depSet
def getRequires(self, groupname, data):
infoname = data.get('USER')
depSet = deps.DependencySet()
if infoname:
depSet.addDep(deps.UserInfoDependencies,
deps.Dependency(infoname, []))
return depSet
class reportExcessBuildRequires(policy.Policy):
"""
NAME
====
B{C{r.reportExcessBuildRequires()}} - suggest items to remove from C{buildRequires} list
SYNOPSIS
========
C{r.reportExcessBuildRequires('required:component')}
C{r.reportExcessBuildRequires(['list:of', 'required:components'])}
DESCRIPTION
===========
The C{r.reportExcessBuildRequires()} policy is used to report
together all suggestions for possible items to remove from the
C{buildRequires} list.
The suggestions provided by this policy are build requirements
listed in the recipe's C{buildRequires} list for which Conary
has not specifically discovered a need. Build requirement
discovery is not perfect, which means that even though this
policy prints a warning that a build requirement might not be
necessary, Conary does not know that it is definitely not needed.
These are only hints. If you are not sure whether a component
should be removed from the C{buildRequires} list, it is safer
to leave it in the list. This is because an extra component
in the C{buildRequires} list is very unlikely to cause trouble,
but a truly missing component causes failure (by definition).
Because dependencies on C{:runtime} components are the least
likely dependencies to be discovered automatically, this policy
currently does not recommend removing any C{:runtime} components.
EXAMPLES
========
This policy is normally called only internally by other Conary
policies. However, a recipe can report build requirements
that are known by the recipe maintainer to be required but
which Conary does not discover automatically by passing a
list of these components. For example, if this policy
says that C{foo:devel} and C{blah:perl} are possible extra
build requirements, but you know that they are required in
order to correctly build the included software, you can
turn off the warnings like this:
C{r.reportExcessBuildRequires(['foo:devel', 'blah:perl'])}
This will tell the C{reportExcessBuildRequires} policy that
C{foo:devel} and C{blah:perl} are known to be required to
build the package.
No regular expressions are honored.
"""
bucket = policy.ERROR_REPORTING
processUnmodified = True
filetree = policy.NO_FILES
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def __init__(self, *args, **keywords):
self.found = set()
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
for arg in args:
if type(arg) in (list, tuple, set):
self.found.update(arg)
else:
self.found.add(arg)
def do(self):
# If absolutely no buildRequires were found automatically,
# assume that the buildRequires list has been carefully crafted
# for some reason that the buildRequires enforcement policy
# doesn't yet support, and don't warn that all of the listed
# buildRequires might be excessive.
if self.found and self.recipe._logFile:
r = self.recipe
def getReqNames(key):
return set(x.split('=')[0] for x in r._recipeRequirements[key])
recipeReqs = getReqNames('buildRequires')
superReqs = getReqNames('buildRequiresSuper')
foundPackages = set(x.split(':')[0] for x in self.found)
superClosure = r._getTransitiveDepClosure(superReqs)
foundClosure = r._getTransitiveDepClosure(self.found)
def removeCore(candidates):
# conary, python, and setup are always required; gcc
# is often an implicit requirement, and sqlite:lib is
# listed explicitly make bootstrapping easier
return set(x for x in candidates if
not x.startswith('conary')
and not x.startswith('python:')
and not x.startswith('gcc:')
and not x in ('libgcc:devellib',
'setup:runtime',
'sqlite:lib'))
def removeSome(candidates):
# at this point, we don't have good enough detection
# of :runtime in particular to recommend getting rid
# of it
return set(x for x in removeCore(candidates) if
not x.endswith(':runtime'))
def removeDupComponents(candidates):
# If any component is required, we don't really need
# to flag others as excessive in superclass excess
return set(x for x in candidates
if x.split(':')[0] not in foundPackages)
# for superclass reqs
excessSuperReqs = superReqs - foundClosure
if excessSuperReqs:
# note that as this is for debugging only, we do not
# remove runtime requirements
deDupedSuperReqs = sorted(list(
removeDupComponents(removeCore(excessSuperReqs))))
if deDupedSuperReqs:
self._reportExcessSuperclassBuildRequires(deDupedSuperReqs)
excessReqs = recipeReqs - self.found
redundantReqs = recipeReqs.intersection(superClosure)
if excessReqs or redundantReqs:
excessBuildRequires = sorted(list(
removeSome(excessReqs.union(redundantReqs))))
# all potential excess build requires might have
# been removed by removeSome
if excessBuildRequires:
self._reportExcessBuildRequires(excessBuildRequires)
def _reportExcessBuildRequires(self, reqList):
self.recipe._logFile.reportExcessBuildRequires(
sorted(list(reqList)))
def _reportExcessSuperclassBuildRequires(self, reqList):
self.recipe._logFile.reportExcessSuperclassBuildRequires(
sorted(list(reqList)))
class reportMissingBuildRequires(policy.Policy):
"""
This policy is used to report together all suggestions for
additions to the C{buildRequires} list.
Do not call it directly; it is for internal use only.
"""
bucket = policy.ERROR_REPORTING
processUnmodified = True
filetree = policy.NO_FILES
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def __init__(self, *args, **keywords):
self.errors = set()
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
for arg in args:
if type(arg) in (list, tuple, set):
self.errors.update(arg)
else:
self.errors.add(arg)
def do(self):
if self.errors and self.recipe._logFile:
self.recipe._logFile.reportMissingBuildRequires(
sorted(list(self.errors)))
class reportErrors(policy.Policy, policy.GroupPolicy):
"""
This policy is used to report together all package errors.
Do not call it directly; it is for internal use only.
"""
bucket = policy.ERROR_REPORTING
processUnmodified = True
filetree = policy.NO_FILES
groupError = False
supported_targets = (TARGET_LINUX, TARGET_WINDOWS)
def __init__(self, *args, **keywords):
self.errors = []
policy.Policy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
"""
Called once, with printf-style arguments, for each warning.
"""
self.errors.append(args[0] %tuple(args[1:]))
groupError = keywords.pop('groupError', None)
if groupError is not None:
self.groupError = groupError
def do(self):
if self.errors:
msg = self.groupError and 'Group' or 'Package'
raise policy.PolicyError, ('%s Policy errors found:\n%%s' % msg) \
% "\n".join(self.errors)
class _TroveScript(policy.PackagePolicy):
processUnmodified = False
keywords = { 'contents' : None }
_troveScriptName = None
def __init__(self, *args, **keywords):
policy.PackagePolicy.__init__(self, *args, **keywords)
def updateArgs(self, *args, **keywords):
if args:
troveNames = args
else:
troveNames = [ self.recipe.name ]
self.troveNames = troveNames
policy.PackagePolicy.updateArgs(self, **keywords)
def do(self):
if not self.contents:
return
# Build component map
availTroveNames = dict((x.name, None) for x in
self.recipe.autopkg.getComponents())
availTroveNames.update(self.recipe.packages)
troveNames = set(self.troveNames) & set(availTroveNames)
# We don't support compatibility classes for troves (yet)
self.recipe._addTroveScript(troveNames, self.contents,
self._troveScriptName, None)
class ScriptPreUpdate(_TroveScript):
_troveScriptName = 'preUpdate'
class ScriptPostUpdate(_TroveScript):
_troveScriptName = 'postUpdate'
class ScriptPreInstall(_TroveScript):
_troveScriptName = 'preInstall'
class ScriptPostInstall(_TroveScript):
_troveScriptName = 'postInstall'
class ScriptPreErase(_TroveScript):
_troveScriptName = 'preErase'
class ScriptPostErase(_TroveScript):
_troveScriptName = 'postErase'
class ScriptPreRollback(_TroveScript):
_troveScriptName = 'preRollback'
class ScriptPostRollback(_TroveScript):
_troveScriptName = 'postRollback'
|
import unittest
import pytest
from libweasyl import ratings
from weasyl.test import db_utils
from weasyl import character
@pytest.mark.usefixtures('db')
class SelectCountTestCase(unittest.TestCase):
def setUp(self):
self.user1 = db_utils.create_user()
self.user2 = db_utils.create_user()
self.friend1 = db_utils.create_user()
db_utils.create_friendship(self.user1, self.friend1)
self.count = 20
self.pivot = 5
s = db_utils.create_characters(self.count, self.user1, ratings.GENERAL.code)
self.pivotid = s[self.pivot]
def test_count_backid(self):
self.assertEqual(
self.count - self.pivot - 1,
character.select_count(self.user1, ratings.GENERAL.code, backid=self.pivotid))
def test_count_nextid(self):
self.assertEqual(
self.pivot,
character.select_count(self.user1, ratings.GENERAL.code, nextid=self.pivotid))
def test_see_friends_character(self):
"""
Should be able to see a friend's friends-only character in a listing.
"""
c = db_utils.create_character(self.friend1, friends_only=True)
self.assertEqual(
self.count + 1,
character.select_count(self.user1, ratings.GENERAL.code))
self.assertEqual(
c,
character.select_list(self.user1, ratings.GENERAL.code, 100)[0]['charid'])
def test_cannot_see_non_friends_character(self):
"""
Should not be able to see a non-friend's friends-ony character in a listing.
"""
db_utils.create_character(self.user2, friends_only=True)
self.assertEqual(
self.count,
character.select_count(self.user1, ratings.GENERAL.code))
def test_can_see_own_blocktag_character(self):
"""
Can see your own character in a listing even with a blocked tag.
"""
block_tagid = db_utils.create_tag("blocked")
db_utils.create_blocktag(self.user1, block_tagid, ratings.GENERAL.code)
charid = db_utils.create_character(self.user1, name="My blocktag character")
db_utils.create_character_tag(block_tagid, charid)
# A journal that we should NOT see.
other_charid = db_utils.create_character(self.user2, name="Other user's blocktag character")
db_utils.create_character_tag(block_tagid, other_charid)
self.assertEqual(
charid,
character.select_list(self.user1, ratings.GENERAL.code, 100)[0]['charid'])
def test_can_see_own_rating_character(self):
"""
Can see your own character in a listing even when it's above your max rating.
"""
charid = db_utils.create_character(self.user1, rating=ratings.EXPLICIT.code)
db_utils.create_character(self.user2, rating=ratings.EXPLICIT.code)
self.assertEqual(
charid,
character.select_list(self.user1, ratings.GENERAL.code, 100)[0]['charid'])
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import unittest
from mri import MriServer
from mri.dispatch import MriServerDispatch
class TestMriServer(unittest.TestCase):
def test_new_dispatch(self):
server = MriServer("http://www.httpbin.com", "testuser", "testpass")
task = {"title": "TEST", "id": "000112233"}
dispatch = server.new_dispatch(task)
test_against = MriServerDispatch(task, "http://www.httpbin.com", "testuser", "testpass")
self.assertEqual(dispatch, test_against)
if __name__ == '__main__':
unittest.main()
|
from JumpScale import j
descr = """
This jumpscript returns network info
"""
category = "monitoring"
organization = "jumpscale"
author = "kristof@incubaid.com"
license = "bsd"
version = "1.0"
roles = []
def action():
return j.sal.nettools.getNetworkInfo()
if __name__ == "__main__":
print(action())
|
"""Unit tests."""
import mock
import pandas
import pytest
from google.api_core import exceptions
from google.auth.credentials import AnonymousCredentials
from google.cloud import automl_v1beta1
from google.cloud.automl_v1beta1.proto import data_types_pb2
PROJECT = "project"
REGION = "region"
LOCATION_PATH = "projects/{}/locations/{}".format(PROJECT, REGION)
class TestTablesClient(object):
def tables_client(
self, client_attrs={}, prediction_client_attrs={}, gcs_client_attrs={}
):
client_mock = mock.Mock(**client_attrs)
prediction_client_mock = mock.Mock(**prediction_client_attrs)
gcs_client_mock = mock.Mock(**gcs_client_attrs)
return automl_v1beta1.TablesClient(
client=client_mock,
prediction_client=prediction_client_mock,
gcs_client=gcs_client_mock,
project=PROJECT,
region=REGION,
)
def test_list_datasets_empty(self):
client = self.tables_client(
{
"list_datasets.return_value": [],
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_datasets()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_datasets.assert_called_with(LOCATION_PATH)
assert ds == []
def test_list_datasets_not_empty(self):
datasets = ["some_dataset"]
client = self.tables_client(
{
"list_datasets.return_value": datasets,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_datasets()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_datasets.assert_called_with(LOCATION_PATH)
assert len(ds) == 1
assert ds[0] == "some_dataset"
def test_get_dataset_no_value(self):
dataset_actual = "dataset"
client = self.tables_client({}, {})
with pytest.raises(ValueError):
dataset = client.get_dataset()
client.auto_ml_client.get_dataset.assert_not_called()
def test_get_dataset_name(self):
dataset_actual = "dataset"
client = self.tables_client({"get_dataset.return_value": dataset_actual}, {})
dataset = client.get_dataset(dataset_name="my_dataset")
client.auto_ml_client.get_dataset.assert_called_with("my_dataset")
assert dataset == dataset_actual
def test_get_no_dataset(self):
client = self.tables_client(
{"get_dataset.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_name="my_dataset")
client.auto_ml_client.get_dataset.assert_called_with("my_dataset")
def test_get_dataset_from_empty_list(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_display_name="my_dataset")
def test_get_dataset_from_list_not_found(self):
client = self.tables_client(
{"list_datasets.return_value": [mock.Mock(display_name="not_it")]}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_display_name="my_dataset")
def test_get_dataset_from_list(self):
client = self.tables_client(
{
"list_datasets.return_value": [
mock.Mock(display_name="not_it"),
mock.Mock(display_name="my_dataset"),
]
},
{},
)
dataset = client.get_dataset(dataset_display_name="my_dataset")
assert dataset.display_name == "my_dataset"
def test_get_dataset_from_list_ambiguous(self):
client = self.tables_client(
{
"list_datasets.return_value": [
mock.Mock(display_name="my_dataset"),
mock.Mock(display_name="not_my_dataset"),
mock.Mock(display_name="my_dataset"),
]
},
{},
)
with pytest.raises(ValueError):
client.get_dataset(dataset_display_name="my_dataset")
def test_create_dataset(self):
client = self.tables_client(
{
"location_path.return_value": LOCATION_PATH,
"create_dataset.return_value": mock.Mock(display_name="name"),
},
{},
)
metadata = {"metadata": "values"}
dataset = client.create_dataset("name", metadata=metadata)
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.create_dataset.assert_called_with(
LOCATION_PATH, {"display_name": "name", "tables_dataset_metadata": metadata}
)
assert dataset.display_name == "name"
def test_delete_dataset(self):
dataset = mock.Mock()
dataset.configure_mock(name="name")
client = self.tables_client({"delete_dataset.return_value": None}, {})
client.delete_dataset(dataset=dataset)
client.auto_ml_client.delete_dataset.assert_called_with("name")
def test_delete_dataset_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
client.delete_dataset(dataset_display_name="not_found")
client.auto_ml_client.delete_dataset.assert_not_called()
def test_delete_dataset_name(self):
client = self.tables_client({"delete_dataset.return_value": None}, {})
client.delete_dataset(dataset_name="name")
client.auto_ml_client.delete_dataset.assert_called_with("name")
def test_export_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.export_data(dataset_display_name="name", gcs_input_uris="uri")
client.auto_ml_client.export_data.assert_not_called()
def test_export_gcs_uri(self):
client = self.tables_client({"export_data.return_value": None}, {})
client.export_data(dataset_name="name", gcs_output_uri_prefix="uri")
client.auto_ml_client.export_data.assert_called_with(
"name", {"gcs_destination": {"output_uri_prefix": "uri"}}
)
def test_export_bq_uri(self):
client = self.tables_client({"export_data.return_value": None}, {})
client.export_data(dataset_name="name", bigquery_output_uri="uri")
client.auto_ml_client.export_data.assert_called_with(
"name", {"bigquery_destination": {"output_uri": "uri"}}
)
def test_import_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.import_data(dataset_display_name="name", gcs_input_uris="uri")
client.auto_ml_client.import_data.assert_not_called()
def test_import_pandas_dataframe(self):
client = self.tables_client(
gcs_client_attrs={
"bucket_name": "my_bucket",
"upload_pandas_dataframe.return_value": "uri",
}
)
dataframe = pandas.DataFrame({})
client.import_data(
project=PROJECT,
region=REGION,
dataset_name="name",
pandas_dataframe=dataframe,
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri"]}}
)
def test_import_pandas_dataframe_init_gcs(self):
client = automl_v1beta1.TablesClient(
client=mock.Mock(),
prediction_client=mock.Mock(),
project=PROJECT,
region=REGION,
credentials=AnonymousCredentials(),
)
dataframe = pandas.DataFrame({})
patch = mock.patch(
"google.cloud.automl_v1beta1.tables.tables_client.gcs_client.GcsClient",
bucket_name="my_bucket",
)
with patch as MockGcsClient:
mockInstance = MockGcsClient.return_value
mockInstance.upload_pandas_dataframe.return_value = "uri"
client.import_data(dataset_name="name", pandas_dataframe=dataframe)
assert client.gcs_client is mockInstance
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri"]}}
)
def test_import_gcs_uri(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", gcs_input_uris="uri")
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri"]}}
)
def test_import_gcs_uris(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", gcs_input_uris=["uri", "uri"])
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri", "uri"]}}
)
def test_import_bq_uri(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", bigquery_input_uri="uri")
client.auto_ml_client.import_data.assert_called_with(
"name", {"bigquery_source": {"input_uri": "uri"}}
)
def test_list_table_specs(self):
client = self.tables_client({"list_table_specs.return_value": None}, {})
client.list_table_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with("name")
def test_list_table_specs_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("not found")}, {}
)
with pytest.raises(exceptions.NotFound):
client.list_table_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with("name")
def test_get_table_spec(self):
client = self.tables_client({}, {})
client.get_table_spec("name")
client.auto_ml_client.get_table_spec.assert_called_with("name")
def test_get_column_spec(self):
client = self.tables_client({}, {})
client.get_column_spec("name")
client.auto_ml_client.get_column_spec.assert_called_with("name")
def test_list_column_specs(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [],
},
{},
)
client.list_column_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
def test_update_column_spec_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.update_column_spec(dataset_name="name", column_spec_name="column2")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_not_called()
def test_update_column_spec_display_name_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.update_column_spec(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_not_called()
def test_update_column_spec_name_no_args(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column/2", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(dataset_name="name", column_spec_name="column/2")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{"name": "column/2", "data_type": {"type_code": "type_code"}}
)
def test_update_column_spec_no_args(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name", column_spec_display_name="column"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{"name": "column", "data_type": {"type_code": "type_code"}}
)
def test_update_column_spec_nullable(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name", column_spec_display_name="column", nullable=True
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{
"name": "column",
"data_type": {"type_code": "type_code", "nullable": True},
}
)
def test_update_column_spec_type_code(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name",
column_spec_display_name="column",
type_code="type_code2",
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{"name": "column", "data_type": {"type_code": "type_code2"}}
)
def test_update_column_spec_type_code_nullable(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name",
nullable=True,
column_spec_display_name="column",
type_code="type_code2",
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{
"name": "column",
"data_type": {"type_code": "type_code2", "nullable": True},
}
)
def test_update_column_spec_type_code_nullable_false(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name",
nullable=False,
column_spec_display_name="column",
type_code="type_code2",
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{
"name": "column",
"data_type": {"type_code": "type_code2", "nullable": False},
}
)
def test_set_target_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.set_target_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_target_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_target_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_target_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="2",
weight_column_spec_id="2",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_target_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
def test_set_weight_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
try:
client.set_weight_column(
dataset_name="name", column_spec_display_name="column2"
)
except exceptions.NotFound:
pass
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_weight_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_weight_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_weight_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/2", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="1",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_weight_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
def test_clear_weight_column(self):
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client({"get_dataset.return_value": dataset_mock}, {})
client.clear_weight_column(dataset_name="name")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": None,
"ml_use_column_spec_id": "3",
},
}
)
def test_set_test_train_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_test_train_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_test_train_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/3", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="2",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
def test_clear_test_train_column(self):
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="2",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client({"get_dataset.return_value": dataset_mock}, {})
client.clear_test_train_column(dataset_name="name")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": None,
},
}
)
def test_set_time_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/3", display_name="column")
dataset_mock = mock.Mock()
dataset_mock.configure_mock(name="dataset")
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_time_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_table_spec.assert_called_with(
{"name": "table", "time_column_spec_id": "3"}
)
def test_clear_time_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
dataset_mock = mock.Mock()
dataset_mock.configure_mock(name="dataset")
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
},
{},
)
client.clear_time_column(dataset_name="name")
client.auto_ml_client.update_table_spec.assert_called_with(
{"name": "table", "time_column_spec_id": None}
)
def test_get_model_evaluation(self):
client = self.tables_client({}, {})
ds = client.get_model_evaluation(model_evaluation_name="x")
client.auto_ml_client.get_model_evaluation.assert_called_with("x")
def test_list_model_evaluations_empty(self):
client = self.tables_client({"list_model_evaluations.return_value": []}, {})
ds = client.list_model_evaluations(model_name="model")
client.auto_ml_client.list_model_evaluations.assert_called_with("model")
assert ds == []
def test_list_model_evaluations_not_empty(self):
evaluations = ["eval"]
client = self.tables_client(
{
"list_model_evaluations.return_value": evaluations,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_model_evaluations(model_name="model")
client.auto_ml_client.list_model_evaluations.assert_called_with("model")
assert len(ds) == 1
assert ds[0] == "eval"
def test_list_models_empty(self):
client = self.tables_client(
{
"list_models.return_value": [],
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_models()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_models.assert_called_with(LOCATION_PATH)
assert ds == []
def test_list_models_not_empty(self):
models = ["some_model"]
client = self.tables_client(
{
"list_models.return_value": models,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_models()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_models.assert_called_with(LOCATION_PATH)
assert len(ds) == 1
assert ds[0] == "some_model"
def test_get_model_name(self):
model_actual = "model"
client = self.tables_client({"get_model.return_value": model_actual}, {})
model = client.get_model(model_name="my_model")
client.auto_ml_client.get_model.assert_called_with("my_model")
assert model == model_actual
def test_get_no_model(self):
client = self.tables_client(
{"get_model.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_model(model_name="my_model")
client.auto_ml_client.get_model.assert_called_with("my_model")
def test_get_model_from_empty_list(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.get_model(model_display_name="my_model")
def test_get_model_from_list_not_found(self):
client = self.tables_client(
{"list_models.return_value": [mock.Mock(display_name="not_it")]}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_model(model_display_name="my_model")
def test_get_model_from_list(self):
client = self.tables_client(
{
"list_models.return_value": [
mock.Mock(display_name="not_it"),
mock.Mock(display_name="my_model"),
]
},
{},
)
model = client.get_model(model_display_name="my_model")
assert model.display_name == "my_model"
def test_get_model_from_list_ambiguous(self):
client = self.tables_client(
{
"list_models.return_value": [
mock.Mock(display_name="my_model"),
mock.Mock(display_name="not_my_model"),
mock.Mock(display_name="my_model"),
]
},
{},
)
with pytest.raises(ValueError):
client.get_model(model_display_name="my_model")
def test_delete_model(self):
model = mock.Mock()
model.configure_mock(name="name")
client = self.tables_client({"delete_model.return_value": None}, {})
client.delete_model(model=model)
client.auto_ml_client.delete_model.assert_called_with("name")
def test_delete_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
client.delete_model(model_display_name="not_found")
client.auto_ml_client.delete_model.assert_not_called()
def test_delete_model_name(self):
client = self.tables_client({"delete_model.return_value": None}, {})
client.delete_model(model_name="name")
client.auto_ml_client.delete_model.assert_called_with("name")
def test_deploy_model_no_args(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.deploy_model()
client.auto_ml_client.deploy_model.assert_not_called()
def test_deploy_model(self):
client = self.tables_client({}, {})
client.deploy_model(model_name="name")
client.auto_ml_client.deploy_model.assert_called_with("name")
def test_deploy_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.deploy_model(model_display_name="name")
client.auto_ml_client.deploy_model.assert_not_called()
def test_undeploy_model(self):
client = self.tables_client({}, {})
client.undeploy_model(model_name="name")
client.auto_ml_client.undeploy_model.assert_called_with("name")
def test_undeploy_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.undeploy_model(model_display_name="name")
client.auto_ml_client.undeploy_model.assert_not_called()
def test_create_model(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/2", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
"location_path.return_value": LOCATION_PATH,
},
{},
)
client.create_model(
"my_model", dataset_name="my_dataset", train_budget_milli_node_hours=1000
)
client.auto_ml_client.create_model.assert_called_with(
LOCATION_PATH,
{
"display_name": "my_model",
"dataset_id": "my_dataset",
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
},
)
def test_create_model_include_columns(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock1 = mock.Mock()
column_spec_mock1.configure_mock(name="column/1", display_name="column1")
column_spec_mock2 = mock.Mock()
column_spec_mock2.configure_mock(name="column/2", display_name="column2")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [
column_spec_mock1,
column_spec_mock2,
],
"location_path.return_value": LOCATION_PATH,
},
{},
)
client.create_model(
"my_model",
dataset_name="my_dataset",
include_column_spec_names=["column1"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.create_model.assert_called_with(
LOCATION_PATH,
{
"display_name": "my_model",
"dataset_id": "my_dataset",
"tables_model_metadata": {
"train_budget_milli_node_hours": 1000,
"input_feature_column_specs": [column_spec_mock1],
},
},
)
def test_create_model_exclude_columns(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock1 = mock.Mock()
column_spec_mock1.configure_mock(name="column/1", display_name="column1")
column_spec_mock2 = mock.Mock()
column_spec_mock2.configure_mock(name="column/2", display_name="column2")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [
column_spec_mock1,
column_spec_mock2,
],
"location_path.return_value": LOCATION_PATH,
},
{},
)
client.create_model(
"my_model",
dataset_name="my_dataset",
exclude_column_spec_names=["column1"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.create_model.assert_called_with(
LOCATION_PATH,
{
"display_name": "my_model",
"dataset_id": "my_dataset",
"tables_model_metadata": {
"train_budget_milli_node_hours": 1000,
"input_feature_column_specs": [column_spec_mock2],
},
},
)
def test_create_model_invalid_hours_small(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model", dataset_name="my_dataset", train_budget_milli_node_hours=1
)
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_hours_large(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model",
dataset_name="my_dataset",
train_budget_milli_node_hours=1000000,
)
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_no_dataset(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model("my_model", train_budget_milli_node_hours=1000)
client.auto_ml_client.get_dataset.assert_not_called()
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_include_exclude(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model",
dataset_name="my_dataset",
include_column_spec_names=["a"],
exclude_column_spec_names=["b"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.get_dataset.assert_not_called()
client.auto_ml_client.create_model.assert_not_called()
def test_predict_from_array(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec = mock.Mock(display_name="a", data_type=data_type)
model_metadata = mock.Mock(input_feature_column_specs=[column_spec])
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(["1"], model_name="my_model")
client.prediction_client.predict.assert_called_with(
"my_model", {"row": {"values": [{"string_value": "1"}]}}, None
)
def test_predict_from_dict(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict({"a": "1", "b": "2"}, model_name="my_model")
client.prediction_client.predict.assert_called_with(
"my_model",
{"row": {"values": [{"string_value": "1"}, {"string_value": "2"}]}},
None,
)
def test_predict_from_dict_with_feature_importance(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(
{"a": "1", "b": "2"}, model_name="my_model", feature_importance=True
)
client.prediction_client.predict.assert_called_with(
"my_model",
{"row": {"values": [{"string_value": "1"}, {"string_value": "2"}]}},
{"feature_importance": "true"},
)
def test_predict_from_dict_missing(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict({"a": "1"}, model_name="my_model")
client.prediction_client.predict.assert_called_with(
"my_model",
{"row": {"values": [{"string_value": "1"}, {"null_value": 0}]}},
None,
)
def test_predict_all_types(self):
float_type = mock.Mock(type_code=data_types_pb2.FLOAT64)
timestamp_type = mock.Mock(type_code=data_types_pb2.TIMESTAMP)
string_type = mock.Mock(type_code=data_types_pb2.STRING)
array_type = mock.Mock(type_code=data_types_pb2.ARRAY)
struct_type = mock.Mock(type_code=data_types_pb2.STRUCT)
category_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_float = mock.Mock(display_name="float", data_type=float_type)
column_spec_timestamp = mock.Mock(
display_name="timestamp", data_type=timestamp_type
)
column_spec_string = mock.Mock(display_name="string", data_type=string_type)
column_spec_array = mock.Mock(display_name="array", data_type=array_type)
column_spec_struct = mock.Mock(display_name="struct", data_type=struct_type)
column_spec_category = mock.Mock(
display_name="category", data_type=category_type
)
column_spec_null = mock.Mock(display_name="null", data_type=category_type)
model_metadata = mock.Mock(
input_feature_column_specs=[
column_spec_float,
column_spec_timestamp,
column_spec_string,
column_spec_array,
column_spec_struct,
column_spec_category,
column_spec_null,
]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(
{
"float": 1.0,
"timestamp": "EST",
"string": "text",
"array": [1],
"struct": {"a": "b"},
"category": "a",
"null": None,
},
model_name="my_model",
)
client.prediction_client.predict.assert_called_with(
"my_model",
{
"row": {
"values": [
{"number_value": 1.0},
{"string_value": "EST"},
{"string_value": "text"},
{"list_value": [1]},
{"struct_value": {"a": "b"}},
{"string_value": "a"},
{"null_value": 0},
]
}
},
None,
)
def test_predict_from_array_missing(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec = mock.Mock(display_name="a", data_type=data_type)
model_metadata = mock.Mock(input_feature_column_specs=[column_spec])
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
with pytest.raises(ValueError):
client.predict([], model_name="my_model")
client.prediction_client.predict.assert_not_called()
def test_batch_predict_pandas_dataframe(self):
client = self.tables_client(
gcs_client_attrs={
"bucket_name": "my_bucket",
"upload_pandas_dataframe.return_value": "gs://input",
}
)
dataframe = pandas.DataFrame({})
client.batch_predict(
project=PROJECT,
region=REGION,
model_name="my_model",
pandas_dataframe=dataframe,
gcs_output_uri_prefix="gs://output",
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
def test_batch_predict_pandas_dataframe_init_gcs(self):
client = automl_v1beta1.TablesClient(
client=mock.Mock(),
prediction_client=mock.Mock(),
project=PROJECT,
region=REGION,
credentials=AnonymousCredentials(),
)
dataframe = pandas.DataFrame({})
patch = mock.patch(
"google.cloud.automl_v1beta1.tables.tables_client.gcs_client.GcsClient",
bucket_name="my_bucket",
)
with patch as MockGcsClient:
mockInstance = MockGcsClient.return_value
mockInstance.upload_pandas_dataframe.return_value = "gs://input"
dataframe = pandas.DataFrame({})
client.batch_predict(
model_name="my_model",
pandas_dataframe=dataframe,
gcs_output_uri_prefix="gs://output",
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
def test_batch_predict_gcs(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
def test_batch_predict_bigquery(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
bigquery_input_uri="bq://input",
bigquery_output_uri="bq://output",
)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"bigquery_source": {"input_uri": "bq://input"}},
{"bigquery_destination": {"output_uri": "bq://output"}},
)
def test_batch_predict_mixed(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
bigquery_output_uri="bq://output",
)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"bigquery_destination": {"output_uri": "bq://output"}},
)
def test_batch_predict_missing_input_gcs_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris=None,
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_input_bigquery_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
bigquery_input_uri=None,
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_output_gcs_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix=None,
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_output_bigquery_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
bigquery_output_uri=None,
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_model(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.batch_predict(
model_display_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_no_model(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
gcs_input_uris="gs://input", gcs_output_uri_prefix="gs://output"
)
client.auto_ml_client.list_models.assert_not_called()
client.prediction_client.batch_predict.assert_not_called()
def test_auto_ml_client_credentials(self):
credentials_mock = mock.Mock()
patch_auto_ml_client = mock.patch(
"google.cloud.automl_v1beta1.gapic.auto_ml_client.AutoMlClient"
)
with patch_auto_ml_client as MockAutoMlClient:
client = automl_v1beta1.TablesClient(credentials=credentials_mock)
_, auto_ml_client_kwargs = MockAutoMlClient.call_args
assert "credentials" in auto_ml_client_kwargs
assert auto_ml_client_kwargs["credentials"] == credentials_mock
def test_prediction_client_credentials(self):
credentials_mock = mock.Mock()
patch_prediction_client = mock.patch(
"google.cloud.automl_v1beta1.gapic.prediction_service_client.PredictionServiceClient"
)
with patch_prediction_client as MockPredictionClient:
client = automl_v1beta1.TablesClient(credentials=credentials_mock)
_, prediction_client_kwargs = MockPredictionClient.call_args
assert "credentials" in prediction_client_kwargs
assert prediction_client_kwargs["credentials"] == credentials_mock
def test_prediction_client_client_info(self):
client_info_mock = mock.Mock()
patch_prediction_client = mock.patch(
"google.cloud.automl_v1beta1.gapic.prediction_service_client.PredictionServiceClient"
)
with patch_prediction_client as MockPredictionClient:
client = automl_v1beta1.TablesClient(client_info=client_info_mock)
_, prediction_client_kwargs = MockPredictionClient.call_args
assert "client_info" in prediction_client_kwargs
assert prediction_client_kwargs["client_info"] == client_info_mock
|
from rest_framework import status
from rest_framework.exceptions import APIException, ParseError
def json_api_exception_handler(exc, context):
""" Custom exception handler that returns errors object as an array """
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
# Error objects may have the following members. Title removed to avoid clash with node "title" errors.
top_level_error_keys = ['id', 'links', 'status', 'code', 'detail', 'source', 'meta']
errors = []
if response:
message = response.data
if isinstance(message, dict):
for error_key, error_description in message.iteritems():
if error_key in top_level_error_keys:
errors.append({error_key: error_description})
else:
if isinstance(error_description, basestring):
error_description = [error_description]
errors.extend([{'source': {'pointer': '/data/attributes/' + error_key}, 'detail': reason}
for reason in error_description])
else:
if isinstance(message, basestring):
message = [message]
errors.extend([{'detail': error} for error in message])
response.data = {'errors': errors}
return response
class Gone(APIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
class InvalidFilterError(ParseError):
"""Raised when client passes an invalid filter in the querystring."""
default_detail = 'Querystring contains an invalid filter.'
|
import requests
url = "https://maps.googleapis.com/maps/api/geocode/json?place_id=ChIJd8BlQ2BZwokRAFUEcm_qrcA&key=YOUR_API_KEY"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
|
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from game import app
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000)
IOLoop.instance().start()
|
import logging
from keystoneclient import base
from keystoneclient import exceptions
from keystoneclient.i18n import _, _LW
from keystoneclient import utils
LOG = logging.getLogger(__name__)
class User(base.Resource):
"""Represents an Identity user.
Attributes:
* id: a uuid that identifies the user
"""
pass
class UserManager(base.CrudManager):
"""Manager class for manipulating Identity users."""
resource_class = User
collection_key = 'users'
key = 'user'
def _require_user_and_group(self, user, group):
if not (user and group):
msg = _('Specify both a user and a group')
raise exceptions.ValidationError(msg)
@utils.positional(1, enforcement=utils.positional.WARN)
def create(self, name, domain=None, project=None, password=None,
email=None, description=None, enabled=True,
default_project=None, **kwargs):
"""Create a user.
.. warning::
The project argument is deprecated, use default_project instead.
If both default_project and project is provided, the default_project
will be used.
"""
if project:
LOG.warning(_LW("The project argument is deprecated, "
"use default_project instead."))
default_project_id = base.getid(default_project) or base.getid(project)
user_data = base.filter_none(name=name,
domain_id=base.getid(domain),
default_project_id=default_project_id,
password=password,
email=email,
description=description,
enabled=enabled,
**kwargs)
return self._create('/users', {'user': user_data}, 'user',
log=not bool(password))
@utils.positional(enforcement=utils.positional.WARN)
def list(self, project=None, domain=None, group=None, default_project=None,
**kwargs):
"""List users.
If project, domain or group are provided, then filter
users with those attributes.
If ``**kwargs`` are provided, then filter users with
attributes matching ``**kwargs``.
.. warning::
The project argument is deprecated, use default_project instead.
If both default_project and project is provided, the default_project
will be used.
"""
if project:
LOG.warning(_LW("The project argument is deprecated, "
"use default_project instead."))
default_project_id = base.getid(default_project) or base.getid(project)
if group:
base_url = '/groups/%s' % base.getid(group)
else:
base_url = None
return super(UserManager, self).list(
base_url=base_url,
domain_id=base.getid(domain),
default_project_id=default_project_id,
**kwargs)
def get(self, user):
return super(UserManager, self).get(
user_id=base.getid(user))
@utils.positional(enforcement=utils.positional.WARN)
def update(self, user, name=None, domain=None, project=None, password=None,
email=None, description=None, enabled=None,
default_project=None, **kwargs):
"""Update a user.
.. warning::
The project argument is deprecated, use default_project instead.
If both default_project and project is provided, the default_project
will be used.
"""
if project:
LOG.warning(_LW("The project argument is deprecated, "
"use default_project instead."))
default_project_id = base.getid(default_project) or base.getid(project)
user_data = base.filter_none(name=name,
domain_id=base.getid(domain),
default_project_id=default_project_id,
password=password,
email=email,
description=description,
enabled=enabled,
**kwargs)
return self._update('/users/%s' % base.getid(user),
{'user': user_data},
'user',
method='PATCH',
log=False)
def update_password(self, old_password, new_password):
"""Update the password for the user the token belongs to."""
if not (old_password and new_password):
msg = _('Specify both the current password and a new password')
raise exceptions.ValidationError(msg)
if old_password == new_password:
msg = _('Old password and new password must be different.')
raise exceptions.ValidationError(msg)
params = {'user': {'password': new_password,
'original_password': old_password}}
base_url = '/users/%s/password' % self.api.user_id
return self._update(base_url, params, method='POST', log=False,
endpoint_filter={'interface': 'public'})
def add_to_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).put(
base_url=base_url,
user_id=base.getid(user))
def check_in_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).head(
base_url=base_url,
user_id=base.getid(user))
def remove_from_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).delete(
base_url=base_url,
user_id=base.getid(user))
def delete(self, user):
return super(UserManager, self).delete(
user_id=base.getid(user))
|
from .client import SearchServiceClient
from .async_client import SearchServiceAsyncClient
__all__ = (
"SearchServiceClient",
"SearchServiceAsyncClient",
)
|
import unittest
from typing import List, Dict
import google.api_core.exceptions
from google.cloud.bigtable.column_family import MaxVersionsGCRule
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.table import ClusterState
from parameterized import parameterized
from airflow import AirflowException
from airflow.contrib.operators.gcp_bigtable_operator import \
BigtableInstanceDeleteOperator, \
BigtableTableDeleteOperator, \
BigtableTableCreateOperator, \
BigtableTableWaitForReplicationSensor, \
BigtableClusterUpdateOperator, \
BigtableInstanceCreateOperator
from tests.compat import mock
PROJECT_ID = 'test_project_id'
INSTANCE_ID = 'test-instance-id'
CLUSTER_ID = 'test-cluster-id'
CLUSTER_ZONE = 'us-central1-f'
GCP_CONN_ID = 'test-gcp-conn-id'
NODES = 5
TABLE_ID = 'test-table-id'
INITIAL_SPLIT_KEYS = [] # type: List
EMPTY_COLUMN_FAMILIES = {} # type: Dict
class BigtableInstanceCreateTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', CLUSTER_ID, CLUSTER_ZONE),
('main_cluster_id', PROJECT_ID, INSTANCE_ID, '', CLUSTER_ZONE),
('main_cluster_zone', PROJECT_ID, INSTANCE_ID, CLUSTER_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id,
main_cluster_id,
main_cluster_zone, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableInstanceCreateOperator(
project_id=project_id,
instance_id=instance_id,
main_cluster_id=main_cluster_id,
main_cluster_zone=main_cluster_zone,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_instance_that_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op = BigtableInstanceCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_instance.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_instance_that_exists_empty_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op = BigtableInstanceCreateOperator(
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_instance.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = BigtableInstanceCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.create_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_instance.assert_called_once_with(
cluster_nodes=None,
cluster_storage_type=None,
instance_display_name=None,
instance_id=INSTANCE_ID,
instance_labels=None,
instance_type=None,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
project_id=PROJECT_ID,
replica_cluster_id=None,
replica_cluster_zone=None,
timeout=None
)
class BigtableClusterUpdateTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', CLUSTER_ID, NODES),
('cluster_id', PROJECT_ID, INSTANCE_ID, '', NODES),
('nodes', PROJECT_ID, INSTANCE_ID, CLUSTER_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id,
cluster_id, nodes, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableClusterUpdateOperator(
project_id=project_id,
instance_id=instance_id,
cluster_id=cluster_id,
nodes=nodes,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_but_instance_does_not_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_ID))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_but_instance_does_not_exists_empty_project_id(self,
mock_hook):
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_ID))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_that_does_not_exists(self, mock_hook):
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.update_cluster.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Cluster not found."))
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Dependency: cluster '{}' does not exist for instance '{}'.".format(
CLUSTER_ID, INSTANCE_ID)
)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_called_once_with(
instance=instance, cluster_id=CLUSTER_ID, nodes=NODES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_that_does_not_exists_empty_project_id(self, mock_hook):
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.update_cluster.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Cluster not found."))
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Dependency: cluster '{}' does not exist for instance '{}'.".format(
CLUSTER_ID, INSTANCE_ID)
)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_called_once_with(
instance=instance, cluster_id=CLUSTER_ID, nodes=NODES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
op = BigtableClusterUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.update_cluster.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_called_once_with(
instance=instance, cluster_id=CLUSTER_ID, nodes=NODES)
class BigtableInstanceDeleteTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_delete_execute(self, mock_hook):
op = BigtableInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_delete_execute_empty_project_id(self, mock_hook):
op = BigtableInstanceDeleteOperator(
instance_id=INSTANCE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID)
@parameterized.expand([
('instance_id', PROJECT_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableInstanceDeleteOperator(
project_id=project_id,
instance_id=instance_id,
task_id="id"
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_instance_that_doesnt_exists(self, mock_hook):
op = BigtableInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.delete_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Instance not found."))
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_instance_that_doesnt_exists_empty_project_id(self, mock_hook):
op = BigtableInstanceDeleteOperator(
instance_id=INSTANCE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.delete_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Instance not found."))
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
op = BigtableInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.delete_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID)
class BigtableTableDeleteTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_delete_execute(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
@parameterized.expand([
('instance_id', PROJECT_ID, '', TABLE_ID),
('table_id', PROJECT_ID, INSTANCE_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, table_id,
mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableTableDeleteOperator(
project_id=project_id,
instance_id=instance_id,
table_id=table_id,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_table_that_doesnt_exists(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found."))
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_table_that_doesnt_exists_empty_project_id(self, mock_hook):
op = BigtableTableDeleteOperator(
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found."))
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_table_when_instance_doesnt_exists(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_ID))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_table.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
class BigtableTableCreateTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_execute(self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_table.assert_called_once_with(
instance=instance,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES)
@parameterized.expand([
('instance_id', PROJECT_ID, '', TABLE_ID),
('table_id', PROJECT_ID, INSTANCE_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, table_id,
mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableTableCreateOperator(
project_id=project_id,
instance_id=instance_id,
table_id=table_id,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_instance_not_exists(self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Dependency: instance '{}' does not exist in project '{}'.".format(
INSTANCE_ID, PROJECT_ID)
)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists(self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.get_column_families_for_table.return_value = \
EMPTY_COLUMN_FAMILIES
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_table.assert_called_once_with(
instance=instance,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists_empty_project_id(self, mock_hook):
op = BigtableTableCreateOperator(
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.get_column_families_for_table.return_value = \
EMPTY_COLUMN_FAMILIES
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_table.assert_called_once_with(
instance=instance,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists_with_different_column_families_ids_in_the_table(
self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.get_column_families_for_table.return_value = {
"existing_family": None}
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Table '{}' already exists with different Column Families.".format(TABLE_ID)
)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists_with_different_column_families_gc_rule_in__table(
self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families={"cf-id": MaxVersionsGCRule(1)},
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
cf_mock = mock.Mock()
cf_mock.gc_rule = mock.Mock(return_value=MaxVersionsGCRule(2))
mock_hook.return_value.get_column_families_for_table.return_value = {
"cf-id": cf_mock
}
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Table '{}' already exists with different Column Families.".format(TABLE_ID)
)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
class BigtableWaitForTableReplicationTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', TABLE_ID),
('table_id', PROJECT_ID, INSTANCE_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, table_id,
mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableTableWaitForReplicationSensor(
project_id=project_id,
instance_id=instance_id,
table_id=table_id,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_no_instance(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
self.assertFalse(op.poke(None))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_no_table(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.get_cluster_states_for_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found."))
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
self.assertFalse(op.poke(None))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_not_ready(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.get_cluster_states_for_table.return_value = {
"cl-id": ClusterState(0)
}
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
self.assertFalse(op.poke(None))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_ready(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.get_cluster_states_for_table.return_value = {
"cl-id": ClusterState(4)
}
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
self.assertTrue(op.poke(None))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
|
"""
Resource Scheduling Offhours
============================
Custodian provides for time based filters, that allow for taking periodic
action on a resource, with resource schedule customization based on tag values.
A common use is offhours scheduling for asgs and instances.
Features
========
- Flexible offhours scheduling with opt-in, opt-out selection, and timezone
support.
- Resume during offhours support.
- Can be combined with other filters to get a particular set (
resources with tag, vpc, etc).
- Can be combined with arbitrary actions
Policy Configuration
====================
We provide an `onhour` and `offhour` time filter, each should be used in a
different policy, they support the same configuration options:
- **weekends**: default true, whether to leave resources off for the weekend
- **weekend-only**: default false, whether to turn the resource off only on
the weekend
- **default_tz**: which timezone to utilize when evaluating time **(REQUIRED)**
- **tag**: which resource tag name to use for per-resource configuration
(schedule and timezone overrides and opt-in/opt-out); default is
``maid_offhours``.
- **opt-out**: Determines the behavior for resources which do not have a tag
matching the one specified for **tag**. Values can be either ``false`` (the
default) where the policy operates on an opt-in basis and resources must have
the tag in order to be acted on by the policy, or ``true`` where the policy
operates on an opt-out basis, and resources without the tag are acted on by
the policy.
- **onhour**: the default time to start/run resources, specified as 0-23
- **offhour**: the default time to stop/suspend resources, specified as 0-23
This example policy overrides most of the defaults for an offhour policy:
.. code-block:: yaml
policies:
- name: offhours-stop
resource: ec2
filters:
- type: offhour
weekends: false
default_tz: pt
tag: downtime
opt-out: true
onhour: 8
offhour: 20
Tag Based Configuration
=======================
Resources can use a special tag to override the default configuration on a
per-resource basis. Note that the name of the tag is configurable via the
``tag`` option in the policy; the examples below use the default tag name,
``maid_offhours``.
The value of the tag must be one of the following:
- **(empty)** or **on** - An empty tag value or a value of "on" implies night
and weekend offhours using the default time zone configured in the policy
(tz=est if unspecified) and the default onhour and offhour values configured
in the policy.
- **off** - If offhours is configured to run in opt-out mode, this tag can be
specified to disable offhours on a given instance. If offhours is configured
to run in opt-in mode, this tag will have no effect (the resource will still
be opted out).
- a semicolon-separated string composed of one or more of the following
components, which override the defaults specified in the policy:
* ``tz=<timezone>`` to evaluate with a resource-specific timezone, where
``<timezone>`` is either one of the supported timezone aliases defined in
:py:attr:`c7n.filters.offhours.Time.TZ_ALIASES` (such as ``pt``) or the name
of a geographic timezone identifier in
[IANA's tzinfo database](https://www.iana.org/time-zones), such as
``Americas/Los_Angeles``. *(Note all timezone aliases are
referenced to a locality to ensure taking into account local daylight
savings time, if applicable.)*
* ``off=(time spec)`` and/or ``on=(time spec)`` matching time specifications
supported by :py:class:`c7n.filters.offhours.ScheduleParser` as described
in the next section.
ScheduleParser Time Specifications
----------------------------------
Each time specification follows the format ``(days,hours)``. Multiple time
specifications can be combined in square-bracketed lists, i.e.
``[(days,hours),(days,hours),(days,hours)]``.
**Examples**::
# up mon-fri from 7am-7pm; eastern time
off=(M-F,19);on=(M-F,7)
# up mon-fri from 6am-9pm; up sun from 10am-6pm; pacific time
off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt
**Possible values**:
+------------+----------------------+
| field | values |
+============+======================+
| days | M, T, W, H, F, S, U |
+------------+----------------------+
| hours | 0, 1, 2, ..., 22, 23 |
+------------+----------------------+
Days can be specified in a range (ex. M-F).
Policy examples
===============
Turn ec2 instances on and off
.. code-block:: yaml
policies:
- name: offhours-stop
resource: ec2
filters:
- type: offhour
actions:
- stop
- name: offhours-start
resource: ec2
filters:
- type: onhour
actions:
- start
Here's doing the same with auto scale groups
.. code-block:: yaml
policies:
- name: asg-offhours-stop
resource: asg
filters:
- offhour
actions:
- suspend
- name: asg-onhours-start
resource: asg
filters:
- onhour
actions:
- resume
Additional policy examples and resource-type-specific information can be seen in
the :ref:`EC2 Offhours <ec2offhours>` and :ref:`ASG Offhours <asgoffhours>`
use cases.
Resume During Offhours
======================
These policies are evaluated hourly; during each run (once an hour),
cloud-custodian will act on **only** the resources tagged for that **exact**
hour. In other words, if a resource has an offhours policy of
stopping/suspending at 23:00 Eastern daily and starting/resuming at 06:00
Eastern daily, and you run cloud-custodian once an hour via Lambda, that
resource will only be stopped once a day sometime between 23:00 and 23:59, and
will only be started once a day sometime between 06:00 and 06:59. If the current
hour does not *exactly* match the hour specified in the policy, nothing will be
done at all.
As a result of this, if custodian stops an instance or suspends an ASG and you
need to start/resume it, you can safely do so manually and custodian won't touch
it again until the next day.
ElasticBeanstalk, EFS and Other Services with Tag Value Restrictions
====================================================================
A number of AWS services have restrictions on the characters that can be used
in tag values, such as `ElasticBeanstalk <http://docs.aws.amazon.com/elasticbean
stalk/latest/dg/using-features.tagging.html>`_ and `EFS <http://docs.aws.amazon.
com/efs/latest/ug/API_Tag.html>`_. In particular, these services do not allow
parenthesis, square brackets, commas, or semicolons, or empty tag values. This
proves to be problematic with the tag-based schedule configuration described
above. The best current workaround is to define a separate policy with a unique
``tag`` name for each unique schedule that you want to use, and then tag
resources with that tag name and a value of ``on``. Note that this can only be
used in opt-in mode, not opt-out.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import logging
from os.path import join
from dateutil import zoneinfo
from c7n.filters import Filter, FilterValidationError
from c7n.utils import type_schema, dumps
log = logging.getLogger('custodian.offhours')
def brackets_removed(u):
return u.translate({ord('['): None, ord(']'): None})
def parens_removed(u):
return u.translate({ord('('): None, ord(')'): None})
class Time(Filter):
schema = {
'type': 'object',
'properties': {
'tag': {'type': 'string'},
'default_tz': {'type': 'string'},
'weekends': {'type': 'boolean'},
'weekends-only': {'type': 'boolean'},
'opt-out': {'type': 'boolean'},
}
}
time_type = None
# Defaults and constants
DEFAULT_TAG = "maid_offhours"
DEFAULT_TZ = 'et'
TZ_ALIASES = {
'pdt': 'America/Los_Angeles',
'pt': 'America/Los_Angeles',
'pst': 'America/Los_Angeles',
'ast': 'America/Phoenix',
'at': 'America/Phoenix',
'est': 'America/New_York',
'edt': 'America/New_York',
'et': 'America/New_York',
'cst': 'America/Chicago',
'cdt': 'America/Chicago',
'ct': 'America/Chicago',
'mst': 'America/Denver',
'mdt': 'America/Denver',
'mt': 'America/Denver',
'gmt': 'Etc/GMT',
'gt': 'Etc/GMT',
'bst': 'Europe/London',
'ist': 'Europe/Dublin',
'cet': 'Europe/Berlin',
# Technically IST (Indian Standard Time), but that's the same as Ireland
'it': 'Asia/Kolkata',
'jst': 'Asia/Tokyo',
'kst': 'Asia/Seoul',
'sgt': 'Asia/Singapore',
'aet': 'Australia/Sydney',
'brt': 'America/Sao_Paulo'
}
def __init__(self, data, manager=None):
super(Time, self).__init__(data, manager)
self.default_tz = self.data.get('default_tz', self.DEFAULT_TZ)
self.weekends = self.data.get('weekends', True)
self.weekends_only = self.data.get('weekends-only', False)
self.opt_out = self.data.get('opt-out', False)
self.tag_key = self.data.get('tag', self.DEFAULT_TAG).lower()
self.default_schedule = self.get_default_schedule()
self.parser = ScheduleParser(self.default_schedule)
self.id_key = None
self.opted_out = []
self.parse_errors = []
self.enabled_count = 0
def validate(self):
if self.get_tz(self.default_tz) is None:
raise FilterValidationError(
"Invalid timezone specified %s" % self.default_tz)
hour = self.data.get("%shour" % self.time_type, self.DEFAULT_HR)
if hour not in self.parser.VALID_HOURS:
raise FilterValidationError("Invalid hour specified %s" % hour)
return self
def process(self, resources, event=None):
resources = super(Time, self).process(resources)
if self.parse_errors and self.manager and self.manager.log_dir:
self.log.warning("parse errors %d", len(self.parse_errors))
with open(join(
self.manager.log_dir, 'parse_errors.json'), 'w') as fh:
dumps(self.parse_errors, fh=fh)
self.parse_errors = []
if self.opted_out and self.manager and self.manager.log_dir:
self.log.debug("disabled count %d", len(self.opted_out))
with open(join(
self.manager.log_dir, 'opted_out.json'), 'w') as fh:
dumps(self.opted_out, fh=fh)
self.opted_out = []
return resources
def __call__(self, i):
value = self.get_tag_value(i)
# Sigh delayed init, due to circle dep, process/init would be better
# but unit testing is calling this direct.
if self.id_key is None:
self.id_key = (
self.manager is None and 'InstanceId' or self.manager.get_model().id)
# The resource tag is not present, if we're not running in an opt-out
# mode, we're done.
if value is False:
if not self.opt_out:
return False
value = "" # take the defaults
# Resource opt out, track and record
if 'off' == value:
self.opted_out.append(i)
return False
else:
self.enabled_count += 1
try:
return self.process_resource_schedule(i, value, self.time_type)
except:
log.exception(
"%s failed to process resource:%s value:%s",
self.__class__.__name__, i[self.id_key], value)
return False
def process_resource_schedule(self, i, value, time_type):
"""Does the resource tag schedule and policy match the current time."""
rid = i[self.id_key]
# this is to normalize trailing semicolons which when done allows
# dateutil.parser.parse to process: value='off=(m-f,1);' properly.
# before this normalization, some cases would silently fail.
value = ';'.join(filter(None, value.split(';')))
if self.parser.has_resource_schedule(value, time_type):
schedule = self.parser.parse(value)
elif self.parser.keys_are_valid(value):
# respect timezone from tag
raw_data = self.parser.raw_data(value)
if 'tz' in raw_data:
schedule = dict(self.default_schedule)
schedule['tz'] = raw_data['tz']
else:
schedule = self.default_schedule
else:
schedule = None
if schedule is None:
log.warning(
"Invalid schedule on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
tz = self.get_tz(schedule['tz'])
if not tz:
log.warning(
"Could not resolve tz on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
now = datetime.datetime.now(tz).replace(
minute=0, second=0, microsecond=0)
return self.match(now, schedule)
def match(self, now, schedule):
time = schedule.get(self.time_type, ())
for item in time:
days, hour = item.get("days"), item.get('hour')
if now.weekday() in days and now.hour == hour:
return True
return False
def get_tag_value(self, i):
"""Get the resource's tag value specifying its schedule."""
# Look for the tag, Normalize tag key and tag value
found = False
for t in i.get('Tags', ()):
if t['Key'].lower() == self.tag_key:
found = t['Value']
break
if found is False:
return False
# enforce utf8, or do translate tables via unicode ord mapping
value = found.lower().encode('utf8').decode('utf8')
# Some folks seem to be interpreting the docs quote marks as
# literal for values.
value = value.strip("'").strip('"')
return value
@classmethod
def get_tz(cls, tz):
return zoneinfo.gettz(cls.TZ_ALIASES.get(tz, tz))
def get_default_schedule(self):
raise NotImplementedError("use subclass")
class OffHour(Time):
schema = type_schema(
'offhour', rinherit=Time.schema, required=['offhour', 'default_tz'],
offhour={'type': 'integer', 'minimum': 0, 'maximum': 23})
time_type = "off"
DEFAULT_HR = 19
def get_default_schedule(self):
default = {'tz': self.default_tz, self.time_type: [
{'hour': self.data.get(
"%shour" % self.time_type, self.DEFAULT_HR)}]}
if self.weekends_only:
default[self.time_type][0]['days'] = [4]
elif self.weekends:
default[self.time_type][0]['days'] = tuple(range(5))
else:
default[self.time_type][0]['days'] = tuple(range(7))
return default
class OnHour(Time):
schema = type_schema(
'onhour', rinherit=Time.schema, required=['onhour', 'default_tz'],
onhour={'type': 'integer', 'minimum': 0, 'maximum': 23})
time_type = "on"
DEFAULT_HR = 7
def get_default_schedule(self):
default = {'tz': self.default_tz, self.time_type: [
{'hour': self.data.get(
"%shour" % self.time_type, self.DEFAULT_HR)}]}
if self.weekends_only:
# turn on monday
default[self.time_type][0]['days'] = [0]
elif self.weekends:
default[self.time_type][0]['days'] = tuple(range(5))
else:
default[self.time_type][0]['days'] = tuple(range(7))
return default
class ScheduleParser(object):
"""Parses tag values for custom on/off hours schedules.
At the minimum the ``on`` and ``off`` values are required. Each of
these must be seperated by a ``;`` in the format described below.
**Schedule format**::
# up mon-fri from 7am-7pm; eastern time
off=(M-F,19);on=(M-F,7)
# up mon-fri from 6am-9pm; up sun from 10am-6pm; pacific time
off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt
**Possible values**:
+------------+----------------------+
| field | values |
+============+======================+
| days | M, T, W, H, F, S, U |
+------------+----------------------+
| hours | 0, 1, 2, ..., 22, 23 |
+------------+----------------------+
Days can be specified in a range (ex. M-F).
If the timezone is not supplied, it is assumed ET (eastern time), but this
default can be configurable.
**Parser output**:
The schedule parser will return a ``dict`` or ``None`` (if the schedule is
invalid)::
# off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt
{
off: [
{ days: "M-F", hour: 21 },
{ days: "U", hour: 18 }
],
on: [
{ days: "M-F", hour: 6 },
{ days: "U", hour: 10 }
],
tz: "pt"
}
"""
DAY_MAP = {'m': 0, 't': 1, 'w': 2, 'h': 3, 'f': 4, 's': 5, 'u': 6}
VALID_HOURS = tuple(range(24))
def __init__(self, default_schedule):
self.default_schedule = default_schedule
self.cache = {}
@staticmethod
def raw_data(tag_value):
"""convert the tag to a dictionary, taking values as is
This method name and purpose are opaque... and not true.
"""
data = {}
pieces = []
for p in tag_value.split(' '):
pieces.extend(p.split(';'))
# parse components
for piece in pieces:
kv = piece.split('=')
# components must by key=value
if not len(kv) == 2:
continue
key, value = kv
data[key] = value
return data
def keys_are_valid(self, tag_value):
"""test that provided tag keys are valid"""
for key in ScheduleParser.raw_data(tag_value):
if key not in ('on', 'off', 'tz'):
return False
return True
def parse(self, tag_value):
# check the cache
if tag_value in self.cache:
return self.cache[tag_value]
schedule = {}
if not self.keys_are_valid(tag_value):
return None
# parse schedule components
pieces = tag_value.split(';')
for piece in pieces:
kv = piece.split('=')
# components must by key=value
if not len(kv) == 2:
return None
key, value = kv
if key != 'tz':
value = self.parse_resource_schedule(value)
if value is None:
return None
schedule[key] = value
# add default timezone, if none supplied or blank
if not schedule.get('tz'):
schedule['tz'] = self.default_schedule['tz']
# cache
self.cache[tag_value] = schedule
return schedule
@staticmethod
def has_resource_schedule(tag_value, time_type):
raw_data = ScheduleParser.raw_data(tag_value)
# note time_type is set to 'on' or 'off' and raw_data is a dict
return time_type in raw_data
def parse_resource_schedule(self, lexeme):
parsed = []
exprs = brackets_removed(lexeme).split(',(')
for e in exprs:
tokens = parens_removed(e).split(',')
# custom hours must have two parts: (<days>, <hour>)
if not len(tokens) == 2:
return None
if not tokens[1].isdigit():
return None
hour = int(tokens[1])
if hour not in self.VALID_HOURS:
return None
days = self.expand_day_range(tokens[0])
if not days:
return None
parsed.append({'days': days, 'hour': hour})
return parsed
def expand_day_range(self, days):
# single day specified
if days in self.DAY_MAP:
return [self.DAY_MAP[days]]
day_range = [d for d in map(self.DAY_MAP.get, days.split('-'))
if d is not None]
if not len(day_range) == 2:
return None
# support wrap around days aka friday-monday = 4,5,6,0
if day_range[0] > day_range[1]:
return list(range(day_range[0], 7)) + list(range(day_range[1] + 1))
return list(range(min(day_range), max(day_range) + 1))
|
from flask import Flask, Response, make_response
from video_stream_handler import stream_handler
import logging
import cv2
logging.basicConfig(level=logging.DEBUG)
thetav = None
app = Flask(__name__, static_url_path='/public', static_folder='../')
@app.route('/video_feed')
def video_feed():
cap = cv2.VideoCapture(0)
# cap.set(3, 3840)
# cap.set(4, 1920)
return Response(stream_handler(cap), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
|
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('dash', '0002_remove_post_origin'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='id',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
]
|
"""Container for Google Cloud Bigtable Cells and Streaming Row Contents."""
import copy
import six
from gcloud._helpers import _datetime_from_microseconds
from gcloud._helpers import _to_bytes
class Cell(object):
"""Representation of a Google Cloud Bigtable Cell.
:type value: bytes
:param value: The value stored in the cell.
:type timestamp: :class:`datetime.datetime`
:param timestamp: The timestamp when the cell was stored.
:type labels: list
:param labels: (Optional) List of strings. Labels applied to the cell.
"""
def __init__(self, value, timestamp, labels=()):
self.value = value
self.timestamp = timestamp
self.labels = list(labels)
@classmethod
def from_pb(cls, cell_pb):
"""Create a new cell from a Cell protobuf.
:type cell_pb: :class:`._generated.data_pb2.Cell`
:param cell_pb: The protobuf to convert.
:rtype: :class:`Cell`
:returns: The cell corresponding to the protobuf.
"""
timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros)
if cell_pb.labels:
return cls(cell_pb.value, timestamp, labels=cell_pb.labels)
else:
return cls(cell_pb.value, timestamp)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.value == self.value and
other.timestamp == self.timestamp and
other.labels == self.labels)
def __ne__(self, other):
return not self.__eq__(other)
class PartialCellData(object):
"""Representation of partial cell in a Google Cloud Bigtable Table.
These are expected to be updated directly from a
:class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse`
:type row_key: bytes
:param row_key: The key for the row holding the (partial) cell.
:type family_name: str
:param family_name: The family name of the (partial) cell.
:type qualifier: bytes
:param qualifier: The column qualifier of the (partial) cell.
:type timestamp_micros: int
:param timestamp_micros: The timestamp (in microsecods) of the
(partial) cell.
:type labels: list of str
:param labels: labels assigned to the (partial) cell
:type value: bytes
:param value: The (accumulated) value of the (partial) cell.
"""
def __init__(self, row_key, family_name, qualifier, timestamp_micros,
labels=(), value=b''):
self.row_key = row_key
self.family_name = family_name
self.qualifier = qualifier
self.timestamp_micros = timestamp_micros
self.labels = labels
self.value = value
def append_value(self, value):
"""Append bytes from a new chunk to value.
:type value: bytes
:param value: bytes to append
"""
self.value += value
class PartialRowData(object):
"""Representation of partial row in a Google Cloud Bigtable Table.
These are expected to be updated directly from a
:class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse`
:type row_key: bytes
:param row_key: The key for the row holding the (partial) data.
"""
def __init__(self, row_key):
self._row_key = row_key
self._cells = {}
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other._row_key == self._row_key and
other._cells == self._cells)
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
"""Convert the cells to a dictionary.
This is intended to be used with HappyBase, so the column family and
column qualiers are combined (with ``:``).
:rtype: dict
:returns: Dictionary containing all the data in the cells of this row.
"""
result = {}
for column_family_id, columns in six.iteritems(self._cells):
for column_qual, cells in six.iteritems(columns):
key = (_to_bytes(column_family_id) + b':' +
_to_bytes(column_qual))
result[key] = cells
return result
@property
def cells(self):
"""Property returning all the cells accumulated on this partial row.
:rtype: dict
:returns: Dictionary of the :class:`Cell` objects accumulated. This
dictionary has two-levels of keys (first for column families
and second for column names/qualifiers within a family). For
a given column, a list of :class:`Cell` objects is stored.
"""
return copy.deepcopy(self._cells)
@property
def row_key(self):
"""Getter for the current (partial) row's key.
:rtype: bytes
:returns: The current (partial) row's key.
"""
return self._row_key
class InvalidReadRowsResponse(RuntimeError):
"""Exception raised to to invalid response data from back-end."""
class InvalidChunk(RuntimeError):
"""Exception raised to to invalid chunk data from back-end."""
class PartialRowsData(object):
"""Convenience wrapper for consuming a ``ReadRows`` streaming response.
:type response_iterator:
:class:`grpc.framework.alpha._reexport._CancellableIterator`
:param response_iterator: A streaming iterator returned from a
``ReadRows`` request.
"""
START = "Start" # No responses yet processed.
NEW_ROW = "New row" # No cells yet complete for row
ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row
CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row
def __init__(self, response_iterator):
self._response_iterator = response_iterator
# Fully-processed rows, keyed by `row_key`
self._rows = {}
# Counter for responses pulled from iterator
self._counter = 0
# Maybe cached from previous response
self._last_scanned_row_key = None
# In-progress row, unset until first response, after commit/reset
self._row = None
# Last complete row, unset until first commit
self._previous_row = None
# In-progress cell, unset until first response, after completion
self._cell = None
# Last complete cell, unset until first completion, after new row
self._previous_cell = None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other._response_iterator == self._response_iterator
def __ne__(self, other):
return not self.__eq__(other)
@property
def state(self):
"""State machine state.
:rtype: str
:returns: name of state corresponding to currrent row / chunk
processing.
"""
if self._last_scanned_row_key is None:
return self.START
if self._row is None:
assert self._cell is None
assert self._previous_cell is None
return self.NEW_ROW
if self._cell is not None:
return self.CELL_IN_PROGRESS
if self._previous_cell is not None:
return self.ROW_IN_PROGRESS
return self.NEW_ROW # row added, no chunk yet processed
@property
def rows(self):
"""Property returning all rows accumulated from the stream.
:rtype: dict
:returns: row_key -> :class:`PartialRowData`.
"""
# NOTE: To avoid duplicating large objects, this is just the
# mutable private data.
return self._rows
def cancel(self):
"""Cancels the iterator, closing the stream."""
self._response_iterator.cancel()
def consume_next(self):
"""Consume the next ``ReadRowsResponse`` from the stream.
Parse the response and its chunks into a new/existing row in
:attr:`_rows`
"""
response = six.next(self._response_iterator)
self._counter += 1
if self._last_scanned_row_key is None: # first response
if response.last_scanned_row_key:
raise InvalidReadRowsResponse()
self._last_scanned_row_key = response.last_scanned_row_key
row = self._row
cell = self._cell
for chunk in response.chunks:
self._validate_chunk(chunk)
if chunk.reset_row:
row = self._row = None
cell = self._cell = self._previous_cell = None
continue
if row is None:
row = self._row = PartialRowData(chunk.row_key)
if cell is None:
cell = self._cell = PartialCellData(
chunk.row_key,
chunk.family_name.value,
chunk.qualifier.value,
chunk.timestamp_micros,
chunk.labels,
chunk.value)
self._copy_from_previous(cell)
else:
cell.append_value(chunk.value)
if chunk.commit_row:
self._save_current_row()
row = cell = None
continue
if chunk.value_size == 0:
self._save_current_cell()
cell = None
def consume_all(self, max_loops=None):
"""Consume the streamed responses until there are no more.
This simply calls :meth:`consume_next` until there are no
more to consume.
:type max_loops: int
:param max_loops: (Optional) Maximum number of times to try to consume
an additional ``ReadRowsResponse``. You can use this
to avoid long wait times.
"""
curr_loop = 0
if max_loops is None:
max_loops = float('inf')
while curr_loop < max_loops:
curr_loop += 1
try:
self.consume_next()
except StopIteration:
break
@staticmethod
def _validate_chunk_status(chunk):
"""Helper for :meth:`_validate_chunk_row_in_progress`, etc."""
# No reseet with other keys
if chunk.reset_row:
_raise_if(chunk.row_key)
_raise_if(chunk.HasField('family_name'))
_raise_if(chunk.HasField('qualifier'))
_raise_if(chunk.timestamp_micros)
_raise_if(chunk.labels)
_raise_if(chunk.value_size)
_raise_if(chunk.value)
# No commit with value size
_raise_if(chunk.commit_row and chunk.value_size > 0)
# No negative value_size (inferred as a general constraint).
_raise_if(chunk.value_size < 0)
def _validate_chunk_new_row(self, chunk):
"""Helper for :meth:`_validate_chunk`."""
assert self.state == self.NEW_ROW
_raise_if(chunk.reset_row)
_raise_if(not chunk.row_key)
_raise_if(not chunk.family_name)
_raise_if(not chunk.qualifier)
# This constraint is not enforced in the Go example.
_raise_if(chunk.value_size > 0 and chunk.commit_row is not False)
# This constraint is from the Go example, not the spec.
_raise_if(self._previous_row is not None and
chunk.row_key <= self._previous_row.row_key)
def _same_as_previous(self, chunk):
"""Helper for :meth:`_validate_chunk_row_in_progress`"""
previous = self._previous_cell
return (chunk.row_key == previous.row_key and
chunk.family_name == previous.family_name and
chunk.qualifier == previous.qualifier and
chunk.labels == previous.labels)
def _validate_chunk_row_in_progress(self, chunk):
"""Helper for :meth:`_validate_chunk`"""
assert self.state == self.ROW_IN_PROGRESS
self._validate_chunk_status(chunk)
if not chunk.HasField('commit_row') and not chunk.reset_row:
_raise_if(not chunk.timestamp_micros or not chunk.value)
_raise_if(chunk.row_key and
chunk.row_key != self._row.row_key)
_raise_if(chunk.HasField('family_name') and
not chunk.HasField('qualifier'))
previous = self._previous_cell
_raise_if(self._same_as_previous(chunk) and
chunk.timestamp_micros <= previous.timestamp_micros)
def _validate_chunk_cell_in_progress(self, chunk):
"""Helper for :meth:`_validate_chunk`"""
assert self.state == self.CELL_IN_PROGRESS
self._validate_chunk_status(chunk)
self._copy_from_current(chunk)
def _validate_chunk(self, chunk):
"""Helper for :meth:`consume_next`."""
if self.state == self.NEW_ROW:
self._validate_chunk_new_row(chunk)
if self.state == self.ROW_IN_PROGRESS:
self._validate_chunk_row_in_progress(chunk)
if self.state == self.CELL_IN_PROGRESS:
self._validate_chunk_cell_in_progress(chunk)
def _save_current_cell(self):
"""Helper for :meth:`consume_next`."""
row, cell = self._row, self._cell
family = row._cells.setdefault(cell.family_name, {})
qualified = family.setdefault(cell.qualifier, [])
complete = Cell.from_pb(self._cell)
qualified.append(complete)
self._cell, self._previous_cell = None, cell
def _copy_from_current(self, chunk):
"""Helper for :meth:`consume_next`."""
current = self._cell
if current is not None:
if not chunk.row_key:
chunk.row_key = current.row_key
if not chunk.HasField('family_name'):
chunk.family_name.value = current.family_name
if not chunk.HasField('qualifier'):
chunk.qualifier.value = current.qualifier
if not chunk.timestamp_micros:
chunk.timestamp_micros = current.timestamp_micros
if not chunk.labels:
chunk.labels.extend(current.labels)
def _copy_from_previous(self, cell):
"""Helper for :meth:`consume_next`."""
previous = self._previous_cell
if previous is not None:
if not cell.row_key:
cell.row_key = previous.row_key
if not cell.family_name:
cell.family_name = previous.family_name
if not cell.qualifier:
cell.qualifier = previous.qualifier
def _save_current_row(self):
"""Helper for :meth:`consume_next`."""
if self._cell:
self._save_current_cell()
self._rows[self._row.row_key] = self._row
self._row, self._previous_row = None, self._row
self._previous_cell = None
def _raise_if(predicate, *args):
"""Helper for validation methods."""
if predicate:
raise InvalidChunk(*args)
|
'''This module provides helper functions for Gnome/GLib related
functionality such as gobject-introspection and gresources.'''
import build
import os, sys
import subprocess
from coredata import MesonException
import mlog
class GnomeModule:
def compile_resources(self, state, args, kwargs):
cmd = ['glib-compile-resources', '@INPUT@', '--generate']
if 'source_dir' in kwargs:
d = os.path.join(state.build_to_src, state.subdir, kwargs.pop('source_dir'))
cmd += ['--sourcedir', d]
if 'c_name' in kwargs:
cmd += ['--c-name', kwargs.pop('c_name')]
cmd += ['--target', '@OUTPUT@']
kwargs['command'] = cmd
output_c = args[0] + '.c'
output_h = args[0] + '.h'
kwargs['input'] = args[1]
kwargs['output'] = output_c
target_c = build.CustomTarget(args[0]+'_c', state.subdir, kwargs)
kwargs['output'] = output_h
target_h = build.CustomTarget(args[0] + '_h', state.subdir, kwargs)
return [target_c, target_h]
def generate_gir(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gir takes one argument')
girtarget = args[0]
while hasattr(girtarget, 'held_object'):
girtarget = girtarget.held_object
if not isinstance(girtarget, (build.Executable, build.SharedLibrary)):
raise MesonException('Gir target must be an executable or shared library')
pkgstr = subprocess.check_output(['pkg-config', '--cflags', 'gobject-introspection-1.0'])
pkgargs = pkgstr.decode().strip().split()
ns = kwargs.pop('namespace')
nsversion = kwargs.pop('nsversion')
libsources = kwargs.pop('sources')
girfile = '%s-%s.gir' % (ns, nsversion)
depends = [girtarget]
scan_command = ['g-ir-scanner', '@INPUT@']
scan_command += pkgargs
scan_command += ['--namespace='+ns, '--nsversion=' + nsversion, '--warn-all',
'--output', '@OUTPUT@']
for incdirs in girtarget.include_dirs:
for incdir in incdirs.get_incdirs():
scan_command += ['-I%s' % os.path.join(state.environment.get_source_dir(), incdir)]
if 'link_with' in kwargs:
link_with = kwargs.pop('link_with')
for link in link_with:
lib = link.held_object
scan_command += ['-l%s' % lib.name]
if isinstance(lib, build.SharedLibrary):
scan_command += ['-L%s' %
os.path.join(state.environment.get_build_dir(),
lib.subdir)]
depends.append(lib)
if 'includes' in kwargs:
includes = kwargs.pop('includes')
if isinstance(includes, str):
scan_command += ['--include=%s' % includes]
elif isinstance(includes, list):
scan_command += ['--include=%s' % inc for inc in includes]
else:
raise MesonException('Gir includes must be str or list')
if state.global_args.get('c'):
scan_command += ['--cflags-begin']
scan_command += state.global_args['c']
scan_command += ['--cflags-end']
if kwargs.get('symbol_prefix'):
sym_prefix = kwargs.pop('symbol_prefix')
if not isinstance(sym_prefix, str):
raise MesonException('Gir symbol prefix must be str')
scan_command += ['--symbol-prefix=%s' % sym_prefix]
if kwargs.get('identifier_prefix'):
identifier_prefix = kwargs.pop('identifier_prefix')
if not isinstance(identifier_prefix, str):
raise MesonException('Gir identifier prefix must be str')
scan_command += ['--identifier-prefix=%s' % identifier_prefix]
if kwargs.get('export_packages'):
pkgs = kwargs.pop('export_packages')
if isinstance(pkgs, str):
scan_command += ['--pkg-export=%s' % pkgs]
elif isinstance(pkgs, list):
scan_command += ['--pkg-export=%s' % pkg for pkg in pkgs]
else:
raise MesonException('Gir export packages must be str or list')
deps = None
if 'dependencies' in kwargs:
deps = kwargs.pop('dependencies')
if not isinstance (deps, list):
deps = [deps]
for dep in deps:
girdir = dep.held_object.get_variable ("girdir")
if girdir:
scan_command += ["--add-include-path=%s" % girdir]
inc_dirs = None
if kwargs.get('include_directories'):
inc_dirs = kwargs.pop('include_directories')
if isinstance(inc_dirs.held_object, build.IncludeDirs):
scan_command += ['--add-include-path=%s' % inc for inc in inc_dirs.held_object.get_incdirs()]
else:
raise MesonException('Gir include dirs should be include_directories()')
if isinstance(girtarget, build.Executable):
scan_command += ['--program', girtarget]
elif isinstance(girtarget, build.SharedLibrary):
scan_command += ["-L", os.path.join (state.environment.get_build_dir(), girtarget.subdir)]
libname = girtarget.get_basename()
scan_command += ['--library', libname]
scankwargs = {'output' : girfile,
'input' : libsources,
'command' : scan_command,
'depends' : depends,
}
if kwargs.get('install'):
scankwargs['install'] = kwargs['install']
scankwargs['install_dir'] = os.path.join(state.environment.get_datadir(), 'gir-1.0')
scan_target = GirTarget(girfile, state.subdir, scankwargs)
typelib_output = '%s-%s.typelib' % (ns, nsversion)
typelib_cmd = ['g-ir-compiler', scan_target, '--output', '@OUTPUT@']
if inc_dirs:
typelib_cmd += ['--includedir=%s' % inc for inc in
inc_dirs.held_object.get_incdirs()]
if deps:
for dep in deps:
girdir = dep.held_object.get_variable ("girdir")
if girdir:
typelib_cmd += ["--includedir=%s" % girdir]
kwargs['output'] = typelib_output
kwargs['command'] = typelib_cmd
# Note that this can't be libdir, because e.g. on Debian it points to
# lib/x86_64-linux-gnu but the girepo dir is always under lib.
kwargs['install_dir'] = 'lib/girepository-1.0'
typelib_target = TypelibTarget(typelib_output, state.subdir, kwargs)
return [scan_target, typelib_target]
def compile_schemas(self, state, args, kwargs):
if len(args) != 0:
raise MesonException('Compile_schemas does not take positional arguments.')
srcdir = os.path.join(state.build_to_src, state.subdir)
outdir = state.subdir
cmd = ['glib-compile-schemas', '--targetdir', outdir, srcdir]
kwargs['command'] = cmd
kwargs['input'] = []
kwargs['output'] = 'gschemas.compiled'
if state.subdir == '':
targetname = 'gsettings-compile'
else:
targetname = 'gsettings-compile-' + state.subdir
target_g = build.CustomTarget(targetname, state.subdir, kwargs)
return target_g
def gtkdoc(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gtkdoc must have one positional argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Gtkdoc arg must be string.')
if not 'src_dir' in kwargs:
raise MesonException('Keyword argument src_dir missing.')
main_file = kwargs.get('main_sgml', '')
if not isinstance(main_file, str):
raise MesonException('Main sgml keyword argument must be a string.')
main_xml = kwargs.get('main_xml', '')
if not isinstance(main_xml, str):
raise MesonException('Main xml keyword argument must be a string.')
if main_xml != '':
if main_file != '':
raise MesonException('You can only specify main_xml or main_sgml, not both.')
main_file = main_xml
src_dir = kwargs['src_dir']
targetname = modulename + '-doc'
command = os.path.normpath(os.path.join(os.path.split(__file__)[0], "../gtkdochelper.py"))
args = [state.environment.get_source_dir(),
state.environment.get_build_dir(),
state.subdir,
os.path.normpath(os.path.join(state.subdir, src_dir)),
main_file,
modulename]
res = [build.RunTarget(targetname, command, args, state.subdir)]
if kwargs.get('install', True):
res.append(build.InstallScript([command] + args))
return res
def gdbus_codegen(self, state, args, kwargs):
if len(args) != 2:
raise MesonException('Gdbus_codegen takes two arguments, name and xml file.')
namebase = args[0]
xml_file = args[1]
cmd = ['gdbus-codegen']
if 'interface_prefix' in kwargs:
cmd += ['--interface-prefix', kwargs.pop('interface_prefix')]
if 'namespace' in kwargs:
cmd += ['--c-namespace', kwargs.pop('namespace')]
cmd += ['--generate-c-code', os.path.join(state.subdir, namebase), '@INPUT@']
outputs = [namebase + '.c', namebase + '.h']
custom_kwargs = {'input' : xml_file,
'output' : outputs,
'command' : cmd
}
return build.CustomTarget(namebase + '-gdbus', state.subdir, custom_kwargs)
def initialize():
mlog.log('Warning, glib compiled dependencies will not work until this upstream issue is fixed:',
mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=745754'))
return GnomeModule()
class GirTarget(build.CustomTarget):
def __init__(self, name, subdir, kwargs):
super().__init__(name, subdir, kwargs)
class TypelibTarget(build.CustomTarget):
def __init__(self, name, subdir, kwargs):
super().__init__(name, subdir, kwargs)
|
from muntjac.ui.vertical_layout import VerticalLayout
from muntjac.ui.menu_bar import MenuBar, ICommand
from muntjac.terminal.external_resource import ExternalResource
class MenuBarItemStylesExample(VerticalLayout):
def __init__(self):
super(MenuBarItemStylesExample, self).__init__()
self._menubar = MenuBar()
menuCommand = MenuCommand(self)
# Save reference to individual items so we can add sub-menu items to
# them
f = self._menubar.addItem('File', None)
newItem = f.addItem('New', None)
f.addItem('Open f...', menuCommand)
f.addSeparator()
# Add a style name for a menu item, then use CSS to alter the visuals
f.setStyleName('file')
newItem.addItem('File', menuCommand)
newItem.addItem('Folder', menuCommand)
newItem.addItem('Project...', menuCommand)
f.addItem('Close', menuCommand)
f.addItem('Close All', menuCommand).setStyleName('close-all')
f.addSeparator()
f.addItem('Save', menuCommand)
f.addItem('Save As...', menuCommand)
f.addItem('Save All', menuCommand)
edit = self._menubar.addItem('Edit', None)
edit.addItem('Undo', menuCommand)
edit.addItem('Redo', menuCommand).setEnabled(False)
edit.addSeparator()
edit.addItem('Cut', menuCommand)
edit.addItem('Copy', menuCommand)
edit.addItem('Paste', menuCommand)
edit.addSeparator()
find = edit.addItem('Find/Replace', menuCommand)
# Actions can be added inline as well, of course
find.addItem('Google Search', SearchCommand(self))
find.addSeparator()
find.addItem('Find/Replace...', menuCommand)
find.addItem('Find Next', menuCommand)
find.addItem('Find Previous', menuCommand)
view = self._menubar.addItem('View', None)
view.addItem('Show/Hide Status Bar', menuCommand)
view.addItem('Customize Toolbar...', menuCommand)
view.addSeparator()
view.addItem('Actual Size', menuCommand)
view.addItem('Zoom In', menuCommand)
view.addItem('Zoom Out', menuCommand)
self.addComponent(self._menubar)
class SearchCommand(ICommand):
def __init__(self, c):
self._c = c
def menuSelected(self, selectedItem):
er = ExternalResource('http://www.google.com')
self._c.getWindow().open(er)
class MenuCommand(ICommand):
def __init__(self, c):
self._c = c
def menuSelected(self, selectedItem):
self._c.getWindow().showNotification('Action '
+ selectedItem.getText())
|
import types
from datetime import datetime, timedelta
from django.utils.timezone import now as timezone_now
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.upload import create_attachment
from zerver.models import Message, Realm, Recipient, UserProfile, UserMessage, ArchivedUserMessage, \
ArchivedMessage, Attachment, ArchivedAttachment
from zerver.lib.retention import get_expired_messages, move_message_to_archive
from typing import Any, List
from six.moves import range
class TestRetentionLib(ZulipTestCase):
"""
Test receiving expired messages retention tool.
"""
def setUp(self):
# type: () -> None
super(TestRetentionLib, self).setUp()
self.zulip_realm = self._set_realm_message_retention_value('zulip', 30)
self.mit_realm = self._set_realm_message_retention_value('zephyr', 100)
@staticmethod
def _set_realm_message_retention_value(realm_str, retention_period):
# type: (str, int) -> Realm
realm = Realm.objects.get(string_id=realm_str)
realm.message_retention_days = retention_period
realm.save()
return realm
@staticmethod
def _change_messages_pub_date(msgs_ids, pub_date):
# type: (List[int], datetime) -> Any
messages = Message.objects.filter(id__in=msgs_ids).order_by('id')
messages.update(pub_date=pub_date)
return messages
def _make_mit_messages(self, message_quantity, pub_date):
# type: (int, datetime) -> Any
# send messages from mit.edu realm and change messages pub date
sender = self.mit_user('espuser')
recipient = self.mit_user('starnine')
msgs_ids = [self.send_message(sender.email, recipient.email, Recipient.PERSONAL) for i in
range(message_quantity)]
mit_messages = self._change_messages_pub_date(msgs_ids, pub_date)
return mit_messages
def test_expired_messages_result_type(self):
# type: () -> None
# Check return type of get_expired_message method.
result = get_expired_messages()
self.assertIsInstance(result, types.GeneratorType)
def test_no_expired_messages(self):
# type: () -> None
result = list(get_expired_messages())
self.assertFalse(result)
def test_expired_messages_in_each_realm(self):
# type: () -> None
# Check result realm messages order and result content
# when all realm has expired messages.
expired_mit_messages = self._make_mit_messages(3, timezone_now() - timedelta(days=101))
self._make_mit_messages(4, timezone_now() - timedelta(days=50))
zulip_messages_ids = Message.objects.order_by('id').filter(
sender__realm=self.zulip_realm).values_list('id', flat=True)[3:10]
expired_zulip_messages = self._change_messages_pub_date(zulip_messages_ids,
timezone_now() - timedelta(days=31))
# Iterate by result
expired_messages_result = [messages_list for messages_list in get_expired_messages()]
self.assertEqual(len(expired_messages_result), 2)
# Check mit.edu realm expired messages.
self.assertEqual(len(expired_messages_result[0]['expired_messages']), 3)
self.assertEqual(expired_messages_result[0]['realm_id'], self.mit_realm.id)
# Check zulip.com realm expired messages.
self.assertEqual(len(expired_messages_result[1]['expired_messages']), 7)
self.assertEqual(expired_messages_result[1]['realm_id'], self.zulip_realm.id)
# Compare expected messages ids with result messages ids.
self.assertEqual(
sorted([message.id for message in expired_mit_messages]),
[message.id for message in expired_messages_result[0]['expired_messages']]
)
self.assertEqual(
sorted([message.id for message in expired_zulip_messages]),
[message.id for message in expired_messages_result[1]['expired_messages']]
)
def test_expired_messages_in_one_realm(self):
# type: () -> None
# Check realm with expired messages and messages
# with one day to expiration data.
expired_mit_messages = self._make_mit_messages(5, timezone_now() - timedelta(days=101))
actual_mit_messages = self._make_mit_messages(3, timezone_now() - timedelta(days=99))
expired_messages_result = list(get_expired_messages())
expired_mit_messages_ids = [message.id for message in expired_mit_messages]
expired_mit_messages_result_ids = [message.id for message in
expired_messages_result[0]['expired_messages']]
actual_mit_messages_ids = [message.id for message in actual_mit_messages]
self.assertEqual(len(expired_messages_result), 1)
self.assertEqual(len(expired_messages_result[0]['expired_messages']), 5)
self.assertEqual(expired_messages_result[0]['realm_id'], self.mit_realm.id)
# Compare expected messages ids with result messages ids.
self.assertEqual(
sorted(expired_mit_messages_ids),
expired_mit_messages_result_ids
)
# Check actual mit.edu messages are not contained in expired messages list
self.assertEqual(
set(actual_mit_messages_ids) - set(expired_mit_messages_ids),
set(actual_mit_messages_ids)
)
class TestMoveMessageToArchive(ZulipTestCase):
def setUp(self):
# type: () -> None
super(TestMoveMessageToArchive, self).setUp()
self.sender = 'hamlet@zulip.com'
self.recipient = 'cordelia@zulip.com'
def _create_attachments(self):
# type: () -> None
sample_size = 10
dummy_files = [
('zulip.txt', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt', sample_size),
('temp_file.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py', sample_size),
('abc.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py', sample_size)
]
user_profile = self.example_user('hamlet')
for file_name, path_id, size in dummy_files:
create_attachment(file_name, path_id, user_profile, size)
def _check_messages_before_archiving(self, msg_id):
# type: (int) -> List
user_messages_ids_before = list(UserMessage.objects.filter(
message_id=msg_id).order_by('id').values_list('id', flat=True))
self.assertEqual(ArchivedUserMessage.objects.count(), 0)
self.assertEqual(ArchivedMessage.objects.count(), 0)
return user_messages_ids_before
def _check_messages_after_archiving(self, msg_id, user_msgs_ids_before):
# type: (int, List[int]) -> None
self.assertEqual(ArchivedMessage.objects.filter(id=msg_id).count(), 1)
self.assertEqual(Message.objects.filter(id=msg_id).count(), 0)
self.assertEqual(UserMessage.objects.filter(message_id=msg_id).count(), 0)
arc_user_messages_ids_after = list(ArchivedUserMessage.objects.filter(
message_id=msg_id).order_by('id').values_list('id', flat=True))
self.assertEqual(arc_user_messages_ids_after, user_msgs_ids_before)
def test_personal_message_archiving(self):
# type: ()-> None
msg_id = self.send_message(self.sender, [self.recipient], Recipient.PERSONAL)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
def test_stream_message_archiving(self):
# type: ()-> None
msg_id = self.send_message(self.sender, "Verona", Recipient.STREAM)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
def test_archiving_message_second_time(self):
# type: ()-> None
msg_id = self.send_message(self.sender, "Verona", Recipient.STREAM)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
with self.assertRaises(Message.DoesNotExist):
move_message_to_archive(message_id=msg_id)
def test_archiving_message_with_attachment(self):
# type: () -> None
self._create_attachments()
body = """Some files here ...[zulip.txt](
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py
"""
msg_id = self.send_message(self.sender, [self.recipient], Recipient.PERSONAL, body)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
attachments_ids_before = list(Attachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(ArchivedAttachment.objects.count(), 0)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
self.assertEqual(Attachment.objects.count(), 0)
arc_attachments_ids_after = list(ArchivedAttachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(attachments_ids_before, arc_attachments_ids_after)
def test_archiving_message_with_shared_attachment(self):
# type: () -> None
# Check do not removing attachments which is used in other messages.
self._create_attachments()
body = """Some files here ...[zulip.txt](
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py
"""
msg_id = self.send_message(self.sender, [self.recipient], Recipient.PERSONAL, body)
msg_id_shared_attachments = self.send_message(self.recipient, [self.sender],
Recipient.PERSONAL, body)
user_messages_ids_before = self._check_messages_before_archiving(msg_id)
attachments_ids_before = list(Attachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(ArchivedAttachment.objects.count(), 0)
move_message_to_archive(message_id=msg_id)
self._check_messages_after_archiving(msg_id, user_messages_ids_before)
self.assertEqual(Attachment.objects.count(), 3)
arc_attachments_ids_after = list(ArchivedAttachment.objects.filter(
messages__id=msg_id).order_by("id").values_list("id", flat=True))
self.assertEqual(attachments_ids_before, arc_attachments_ids_after)
move_message_to_archive(message_id=msg_id_shared_attachments)
self.assertEqual(Attachment.objects.count(), 0)
|
""" Base classes for DB backend implementation test
"""
import datetime
from unittest import mock
from oslo_utils import timeutils
from aodh import storage
from aodh.storage import models as alarm_models
from aodh.tests import constants
from aodh.tests.functional import db as tests_db
ALARM_TYPE = 'gnocchi_aggregation_by_metrics_threshold'
METRIC_IDS = ['41869681-5776-46d6-91ed-cccc43b6e4e3',
'a1fb80f4-c242-4f57-87c6-68f47521059e']
class DBTestBase(tests_db.TestBase):
@staticmethod
def create_side_effect(method, exception_type, test_exception):
def side_effect(*args, **kwargs):
if test_exception.pop():
raise exception_type
else:
return method(*args, **kwargs)
return side_effect
def setUp(self):
super(DBTestBase, self).setUp()
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39)
class AlarmTestBase(DBTestBase):
def add_some_alarms(self):
alarms = [alarm_models.Alarm(alarm_id='r3d',
enabled=True,
type=ALARM_TYPE,
name='red-alert',
description='my red-alert',
timestamp=datetime.datetime(2015, 7,
2, 10, 25),
user_id='me',
project_id='and-da-boys',
state="insufficient data",
state_reason="insufficient data",
state_timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=['http://nowhere/alarms'],
insufficient_data_actions=[],
repeat_actions=False,
time_constraints=[dict(name='testcons',
start='0 11 * * *',
duration=300)],
rule=dict(comparison_operator='eq',
threshold=36,
aggregation_method='count',
evaluation_periods=1,
granularity=60,
metrics=METRIC_IDS),
severity='low'
),
alarm_models.Alarm(alarm_id='0r4ng3',
enabled=True,
type=ALARM_TYPE,
name='orange-alert',
description='a orange',
timestamp=datetime.datetime(2015, 7,
2, 10, 40),
user_id='me',
project_id='and-da-boys',
state="insufficient data",
state_reason="insufficient data",
state_timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=['http://nowhere/alarms'],
insufficient_data_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(comparison_operator='gt',
threshold=75,
aggregation_method='avg',
evaluation_periods=1,
granularity=60,
metrics=METRIC_IDS),
severity='low'
),
alarm_models.Alarm(alarm_id='y3ll0w',
enabled=False,
type=ALARM_TYPE,
name='yellow-alert',
description='yellow',
timestamp=datetime.datetime(2015, 7,
2, 10, 10),
user_id='me',
project_id='and-da-boys',
state="insufficient data",
state_reason="insufficient data",
state_timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=['http://nowhere/alarms'],
insufficient_data_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(comparison_operator='lt',
threshold=10,
aggregation_method='min',
evaluation_periods=1,
granularity=60,
metrics=METRIC_IDS),
severity='low'
)]
for a in alarms:
self.alarm_conn.create_alarm(a)
class AlarmTest(AlarmTestBase):
def test_empty(self):
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual([], alarms)
def test_list(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(3, len(alarms))
def test_list_ordered_by_timestamp(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(len(alarms), 3)
alarm_l = [a.timestamp for a in alarms]
alarm_l_ordered = [datetime.datetime(2015, 7, 2, 10, 40),
datetime.datetime(2015, 7, 2, 10, 25),
datetime.datetime(2015, 7, 2, 10, 10)]
self.assertEqual(alarm_l_ordered, alarm_l)
def test_list_enabled(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(enabled=True))
self.assertEqual(2, len(alarms))
def test_list_disabled(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(enabled=False))
self.assertEqual(1, len(alarms))
def test_list_by_type(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(type=ALARM_TYPE))
self.assertEqual(3, len(alarms))
def test_list_excluded_by_name(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(name={'ne': 'yellow-alert'}))
self.assertEqual(2, len(alarms))
alarm_names = sorted([a.name for a in alarms])
self.assertEqual(['orange-alert', 'red-alert'], alarm_names)
def test_add(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(3, len(alarms))
metrics = sorted([a.rule['metrics'] for a in alarms])
self.assertEqual([METRIC_IDS, METRIC_IDS, METRIC_IDS], metrics)
def test_update(self):
self.add_some_alarms()
metrics = ['6841c175-d7c4-4bc2-bc7a-1c7832271b8f',
'bc1efaa5-93b4-4518-8337-18519917c15a']
orange = list(self.alarm_conn.get_alarms(name='orange-alert'))[0]
orange.enabled = False
orange.state = alarm_models.Alarm.ALARM_INSUFFICIENT_DATA
orange.rule['metrics'] = metrics
updated = self.alarm_conn.update_alarm(orange)
self.assertFalse(updated.enabled)
self.assertEqual(alarm_models.Alarm.ALARM_INSUFFICIENT_DATA,
updated.state)
self.assertEqual(metrics, updated.rule['metrics'])
def test_update_llu(self):
llu = alarm_models.Alarm(alarm_id='llu',
enabled=True,
type=ALARM_TYPE,
name='llu',
description='llu',
timestamp=constants.MIN_DATETIME,
user_id='bla',
project_id='ffo',
state="insufficient data",
state_reason="insufficient data",
state_timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=[],
insufficient_data_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(comparison_operator='lt',
threshold=34,
aggregation_method='max',
evaluation_periods=1,
granularity=60,
metrics=METRIC_IDS)
)
updated = self.alarm_conn.create_alarm(llu)
updated.state = alarm_models.Alarm.ALARM_OK
updated.description = ':)'
self.alarm_conn.update_alarm(updated)
all = list(self.alarm_conn.get_alarms())
self.assertEqual(1, len(all))
def test_update_deleted_alarm_failed(self):
self.add_some_alarms()
alarm1 = list(self.alarm_conn.get_alarms(name='orange-alert'))[0]
self.alarm_conn.delete_alarm(alarm1.alarm_id)
survivors = list(self.alarm_conn.get_alarms())
self.assertEqual(2, len(survivors))
alarm1.state = alarm_models.Alarm.ALARM_ALARM
self.assertRaises(storage.AlarmNotFound,
self.alarm_conn.update_alarm, alarm1)
survivors = list(self.alarm_conn.get_alarms())
self.assertEqual(2, len(survivors))
def test_delete(self):
self.add_some_alarms()
victim = list(self.alarm_conn.get_alarms(name='orange-alert'))[0]
self.alarm_conn.delete_alarm(victim.alarm_id)
survivors = list(self.alarm_conn.get_alarms())
self.assertEqual(2, len(survivors))
for s in survivors:
self.assertNotEqual(victim.name, s.name)
class AlarmHistoryTest(AlarmTestBase):
def setUp(self):
super(AlarmTestBase, self).setUp()
self.add_some_alarms()
self.prepare_alarm_history()
def prepare_alarm_history(self):
alarms = list(self.alarm_conn.get_alarms())
for alarm in alarms:
i = alarms.index(alarm)
alarm_change = {
"event_id": "3e11800c-a3ca-4991-b34b-d97efb6047d%s" % i,
"alarm_id": alarm.alarm_id,
"type": alarm_models.AlarmChange.CREATION,
"detail": "detail %s" % alarm.name,
"user_id": alarm.user_id,
"project_id": alarm.project_id,
"on_behalf_of": alarm.project_id,
"timestamp": datetime.datetime(2014, 4, 7, 7, 30 + i)
}
self.alarm_conn.record_alarm_change(alarm_change=alarm_change)
def _clear_alarm_history(self, utcnow, ttl, count):
self.mock_utcnow.return_value = utcnow
self.alarm_conn.clear_expired_alarm_history_data(ttl, 100)
history = list(self.alarm_conn.query_alarm_history())
self.assertEqual(count, len(history))
def test_clear_alarm_history_no_data_to_remove(self):
utcnow = datetime.datetime(2013, 4, 7, 7, 30)
self._clear_alarm_history(utcnow, 1, 3)
def test_clear_some_alarm_history(self):
utcnow = datetime.datetime(2014, 4, 7, 7, 35)
self._clear_alarm_history(utcnow, 3 * 60, 1)
def test_clear_all_alarm_history(self):
utcnow = datetime.datetime(2014, 4, 7, 7, 45)
self._clear_alarm_history(utcnow, 3 * 60, 0)
def test_delete_history_when_delete_alarm(self):
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(3, len(alarms))
history = list(self.alarm_conn.query_alarm_history())
self.assertEqual(3, len(history))
for alarm in alarms:
self.alarm_conn.delete_alarm(alarm.alarm_id)
self.assertEqual(3, len(alarms))
history = list(self.alarm_conn.query_alarm_history())
self.assertEqual(0, len(history))
def test_record_severity_when_alarm_change(self):
alarm = list(self.alarm_conn.get_alarms(name='orange-alert'))[0]
severity = "low"
alarm_change = {
"event_id": "3d22800c-a3ca-4991-b34b-d97efb6047d9",
"alarm_id": alarm.alarm_id,
"type": alarm_models.AlarmChange.STATE_TRANSITION,
"detail": "detail %s" % alarm.name,
"user_id": alarm.user_id,
"project_id": alarm.project_id,
"on_behalf_of": alarm.project_id,
"severity": severity,
"timestamp": datetime.datetime(2014, 4, 7, 7, 34)
}
self.alarm_conn.record_alarm_change(alarm_change=alarm_change)
filter_expr = {"=": {"severity": "low"}}
history = list(self.alarm_conn.query_alarm_history(
filter_expr=filter_expr))
self.assertEqual(1, len(history))
self.assertEqual("low", history[0].severity)
class ComplexAlarmQueryTest(AlarmTestBase):
def test_no_filter(self):
self.add_some_alarms()
result = list(self.alarm_conn.query_alarms())
self.assertEqual(3, len(result))
def test_no_filter_with_limit(self):
self.add_some_alarms()
result = list(self.alarm_conn.query_alarms(limit=2))
self.assertEqual(2, len(result))
def test_filter(self):
self.add_some_alarms()
filter_expr = {"and":
[{"or":
[{"=": {"name": "yellow-alert"}},
{"=": {"name": "red-alert"}}]},
{"=": {"enabled": True}}]}
result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr))
self.assertEqual(1, len(result))
for a in result:
self.assertIn(a.name, set(["yellow-alert", "red-alert"]))
self.assertTrue(a.enabled)
def test_filter_with_regexp(self):
self.add_some_alarms()
filter_expr = {"and":
[{"or": [{"=": {"name": "yellow-alert"}},
{"=": {"name": "red-alert"}}]},
{"=~": {"description": "yel.*"}}]}
result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr))
self.assertEqual(1, len(result))
for a in result:
self.assertEqual("yellow", a.description)
def test_filter_for_alarm_id(self):
self.add_some_alarms()
filter_expr = {"=": {"alarm_id": "0r4ng3"}}
result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr))
self.assertEqual(1, len(result))
for a in result:
self.assertEqual("0r4ng3", a.alarm_id)
def test_filter_and_orderby(self):
self.add_some_alarms()
result = list(self.alarm_conn.query_alarms(filter_expr=(
{"=": {"enabled": True}}),
orderby=[{"name": "asc"}]))
self.assertEqual(2, len(result))
self.assertEqual(["orange-alert", "red-alert"],
[a.name for a in result])
for a in result:
self.assertTrue(a.enabled)
class ComplexAlarmHistoryQueryTest(AlarmTestBase):
def setUp(self):
super(DBTestBase, self).setUp()
self.filter_expr = {"and":
[{"or":
[{"=": {"type": "rule change"}},
{"=": {"type": "state transition"}}]},
{"=": {"alarm_id": "0r4ng3"}}]}
self.add_some_alarms()
self.prepare_alarm_history()
def prepare_alarm_history(self):
alarms = list(self.alarm_conn.get_alarms())
name_index = {
'red-alert': 0,
'orange-alert': 1,
'yellow-alert': 2
}
for alarm in alarms:
i = name_index[alarm.name]
alarm_change = dict(event_id=(
"16fd2706-8baf-433b-82eb-8c7fada847c%s" % i),
alarm_id=alarm.alarm_id,
type=alarm_models.AlarmChange.CREATION,
detail="detail %s" % alarm.name,
user_id=alarm.user_id,
project_id=alarm.project_id,
on_behalf_of=alarm.project_id,
timestamp=datetime.datetime(2012, 9, 24,
7 + i,
30 + i))
self.alarm_conn.record_alarm_change(alarm_change=alarm_change)
alarm_change2 = dict(event_id=(
"16fd2706-8baf-433b-82eb-8c7fada847d%s" % i),
alarm_id=alarm.alarm_id,
type=alarm_models.AlarmChange.RULE_CHANGE,
detail="detail %s" % i,
user_id=alarm.user_id,
project_id=alarm.project_id,
on_behalf_of=alarm.project_id,
timestamp=datetime.datetime(2012, 9, 25,
10 + i,
30 + i))
self.alarm_conn.record_alarm_change(alarm_change=alarm_change2)
alarm_change3 = dict(
event_id="16fd2706-8baf-433b-82eb-8c7fada847e%s" % i,
alarm_id=alarm.alarm_id,
type=alarm_models.AlarmChange.STATE_TRANSITION,
detail="detail %s" % (i + 1),
user_id=alarm.user_id,
project_id=alarm.project_id,
on_behalf_of=alarm.project_id,
timestamp=datetime.datetime(2012, 9, 26, 10 + i, 30 + i)
)
if alarm.name == "red-alert":
alarm_change3['on_behalf_of'] = 'and-da-girls'
self.alarm_conn.record_alarm_change(alarm_change=alarm_change3)
def test_alarm_history_with_no_filter(self):
history = list(self.alarm_conn.query_alarm_history())
self.assertEqual(9, len(history))
def test_alarm_history_with_no_filter_and_limit(self):
history = list(self.alarm_conn.query_alarm_history(limit=3))
self.assertEqual(3, len(history))
def test_alarm_history_with_filter(self):
history = list(
self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr))
self.assertEqual(2, len(history))
def test_alarm_history_with_regexp(self):
filter_expr = {"and":
[{"=~": {"type": "(rule)|(state)"}},
{"=": {"alarm_id": "0r4ng3"}}]}
history = list(
self.alarm_conn.query_alarm_history(filter_expr=filter_expr))
self.assertEqual(2, len(history))
def test_alarm_history_with_filter_and_orderby(self):
history = list(
self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr,
orderby=[{"timestamp":
"asc"}]))
self.assertEqual([alarm_models.AlarmChange.RULE_CHANGE,
alarm_models.AlarmChange.STATE_TRANSITION],
[h.type for h in history])
def test_alarm_history_with_filter_and_orderby_and_limit(self):
history = list(
self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr,
orderby=[{"timestamp":
"asc"}],
limit=1))
self.assertEqual(alarm_models.AlarmChange.RULE_CHANGE, history[0].type)
def test_alarm_history_with_on_behalf_of_filter(self):
filter_expr = {"=": {"on_behalf_of": "and-da-girls"}}
history = list(self.alarm_conn.query_alarm_history(
filter_expr=filter_expr))
self.assertEqual(1, len(history))
self.assertEqual("16fd2706-8baf-433b-82eb-8c7fada847e0",
history[0].event_id)
def test_alarm_history_with_alarm_id_as_filter(self):
filter_expr = {"=": {"alarm_id": "r3d"}}
history = list(self.alarm_conn.query_alarm_history(
filter_expr=filter_expr, orderby=[{"timestamp": "asc"}]))
self.assertEqual(3, len(history))
self.assertEqual([alarm_models.AlarmChange.CREATION,
alarm_models.AlarmChange.RULE_CHANGE,
alarm_models.AlarmChange.STATE_TRANSITION],
[h.type for h in history])
|
"""Pretty print logging."""
import logging
import pprint
from typing import Any
def log(level: int, x: Any) -> None:
if logging.getLogger(None).isEnabledFor(level):
for line in pprint.pformat(x).split('\n'):
logging.log(level, line)
def info(x: Any) -> None:
log(logging.INFO, x)
def debug(x: Any) -> None:
log(logging.DEBUG, x)
|
import copy
import json
from unittest import mock
import sqlalchemy as sa
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral import exceptions as exc
from mistral.services import security
from mistral.tests.unit.api import base
from mistral.tests.unit import base as unit_base
WF = models.WorkflowDefinition(
spec={
'version': '2.0',
'name': 'my_wf',
'tasks': {
'task1': {
'action': 'std.noop'
}
}
}
)
WF.update({'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'my_wf'})
TRIGGER = {
'id': '02abb422-55ef-4bb2-8cb9-217a583a6a3f',
'name': 'my_cron_trigger',
'pattern': '* * * * *',
'workflow_name': WF.name,
'workflow_id': '123e4567-e89b-12d3-a456-426655440000',
'workflow_input': '{}',
'workflow_params': '{}',
'scope': 'private',
'remaining_executions': 42
}
trigger_values = copy.deepcopy(TRIGGER)
trigger_values['workflow_input'] = json.loads(
trigger_values['workflow_input'])
trigger_values['workflow_params'] = json.loads(
trigger_values['workflow_params'])
TRIGGER_DB = models.CronTrigger()
TRIGGER_DB.update(trigger_values)
TRIGGER_DB_WITH_PROJECT_ID = TRIGGER_DB.get_clone()
TRIGGER_DB_WITH_PROJECT_ID.project_id = '<default-project>'
MOCK_WF = mock.MagicMock(return_value=WF)
MOCK_TRIGGER = mock.MagicMock(return_value=TRIGGER_DB)
MOCK_TRIGGERS = mock.MagicMock(return_value=[TRIGGER_DB])
MOCK_DELETE = mock.MagicMock(return_value=1)
MOCK_EMPTY = mock.MagicMock(return_value=[])
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError())
MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError())
class TestCronTriggerController(base.APITest):
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
def test_get(self):
resp = self.app.get('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
@mock.patch.object(db_api, 'get_cron_trigger')
def test_get_operational_error(self, mocked_get):
mocked_get.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
TRIGGER_DB # Successful run
]
resp = self.app.get('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
@mock.patch.object(db_api, "get_cron_trigger",
return_value=TRIGGER_DB_WITH_PROJECT_ID)
def test_get_within_project_id(self, mock_get):
resp = self.app.get('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_NOT_FOUND)
def test_get_not_found(self):
resp = self.app.get(
'/v2/cron_triggers/my_cron_trigger',
expect_errors=True
)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
def test_get_by_id(self):
resp = self.app.get(
"/v2/cron_triggers/02abb422-55ef-4bb2-8cb9-217a583a6a3f")
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
@mock.patch.object(db_api, "get_workflow_definition", MOCK_WF)
@mock.patch.object(db_api, "create_cron_trigger")
def test_post(self, mock_mtd):
mock_mtd.return_value = TRIGGER_DB
resp = self.app.post_json('/v2/cron_triggers', TRIGGER)
self.assertEqual(201, resp.status_int)
self.assertDictEqual(TRIGGER, resp.json)
self.assertEqual(1, mock_mtd.call_count)
values = mock_mtd.call_args[0][0]
self.assertEqual('* * * * *', values['pattern'])
self.assertEqual(42, values['remaining_executions'])
@mock.patch.object(db_api, "get_workflow_definition", MOCK_WF)
@mock.patch.object(db_api, "create_cron_trigger", MOCK_DUPLICATE)
@mock.patch.object(security, "delete_trust")
def test_post_dup(self, delete_trust):
resp = self.app.post_json(
'/v2/cron_triggers', TRIGGER, expect_errors=True
)
self.assertEqual(1, delete_trust.call_count)
self.assertEqual(409, resp.status_int)
@mock.patch.object(db_api, "get_workflow_definition", MOCK_WF)
@mock.patch.object(db_api, "create_cron_trigger", MOCK_DUPLICATE)
def test_post_same_wf_and_input(self):
trig = TRIGGER.copy()
trig['name'] = 'some_trigger_name'
resp = self.app.post_json(
'/v2/cron_triggers', trig, expect_errors=True
)
self.assertEqual(409, resp.status_int)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
@mock.patch.object(db_api, "delete_cron_trigger", MOCK_DELETE)
@mock.patch.object(security, "delete_trust")
def test_delete(self, delete_trust):
resp = self.app.delete('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(1, delete_trust.call_count)
self.assertEqual(204, resp.status_int)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER)
@mock.patch.object(db_api, "delete_cron_trigger", MOCK_DELETE)
@mock.patch.object(security, "delete_trust")
def test_delete_by_id(self, delete_trust):
resp = self.app.delete(
'/v2/cron_triggers/02abb422-55ef-4bb2-8cb9-217a583a6a3f')
self.assertEqual(1, delete_trust.call_count)
self.assertEqual(204, resp.status_int)
@mock.patch.object(db_api, "delete_cron_trigger", MOCK_NOT_FOUND)
def test_delete_not_found(self):
resp = self.app.delete(
'/v2/cron_triggers/my_cron_trigger',
expect_errors=True
)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, "get_cron_triggers", MOCK_TRIGGERS)
def test_get_all(self):
resp = self.app.get('/v2/cron_triggers')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['cron_triggers']))
self.assertDictEqual(TRIGGER, resp.json['cron_triggers'][0])
@mock.patch.object(db_api, 'get_cron_triggers')
def test_get_all_operational_error(self, mocked_get_all):
mocked_get_all.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
[TRIGGER_DB] # Successful run
]
resp = self.app.get('/v2/cron_triggers')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['cron_triggers']))
self.assertDictEqual(TRIGGER, resp.json['cron_triggers'][0])
@mock.patch.object(db_api, 'get_cron_triggers')
@mock.patch('mistral.context.MistralContext.from_environ')
def test_get_all_projects_admin(self, mock_context, mock_get_triggers):
admin_ctx = unit_base.get_context(admin=True)
mock_context.return_value = admin_ctx
resp = self.app.get('/v2/cron_triggers?all_projects=true')
self.assertEqual(200, resp.status_int)
self.assertTrue(mock_get_triggers.call_args[1].get('insecure', False))
@mock.patch.object(db_api, 'get_cron_triggers')
@mock.patch('mistral.context.MistralContext.from_environ')
def test_get_all_filter_project(self, mock_context, mock_get_triggers):
admin_ctx = unit_base.get_context(admin=True)
mock_context.return_value = admin_ctx
resp = self.app.get(
'/v2/cron_triggers?all_projects=true&'
'project_id=192796e61c174f718d6147b129f3f2ff'
)
self.assertEqual(200, resp.status_int)
self.assertTrue(mock_get_triggers.call_args[1].get('insecure', False))
self.assertEqual(
{'eq': '192796e61c174f718d6147b129f3f2ff'},
mock_get_triggers.call_args[1].get('project_id')
)
@mock.patch.object(db_api, "get_cron_triggers", MOCK_EMPTY)
def test_get_all_empty(self):
resp = self.app.get('/v2/cron_triggers')
self.assertEqual(200, resp.status_int)
self.assertEqual(0, len(resp.json['cron_triggers']))
|
from __future__ import print_function
import numpy as np
import mxnet as mx
import random
import itertools
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from common import with_seed
import unittest
def test_box_nms_op():
def test_box_nms_forward(data, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1, cid=0, bid=-1,
force=False, in_format='corner', out_format='corner'):
for dtype in ['float16', 'float32', 'float64']:
data = mx.nd.array(data, dtype=dtype)
out = mx.contrib.nd.box_nms(data, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid, background_id=bid,
force_suppress=force, in_format=in_format, out_format=out_format)
assert_almost_equal(out.asnumpy(), expected.astype(dtype), rtol=1e-3, atol=1e-3)
def test_box_nms_backward(data, grad, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1,
cid=0, bid=-1, force=False, in_format='corner', out_format='corner'):
in_var = mx.sym.Variable('data')
arr_data = mx.nd.array(data)
arr_grad = mx.nd.empty(arr_data.shape)
op = mx.contrib.sym.box_nms(in_var, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid, background_id=bid,
force_suppress=force, in_format=in_format, out_format=out_format)
exe = op.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
exe.forward(is_train=True)
exe.backward(mx.nd.array(grad))
assert_almost_equal(arr_grad.asnumpy(), expected)
def corner_to_center(data):
out = np.reshape(data, (-1, 6)).copy()
out[:, 2] = (data[:, 2] + data[:, 4]) / 2.0
out[:, 3] = (data[:, 3] + data[:, 5]) / 2.0
out[:, 4] = data[:, 4] - data[:, 2]
out[:, 5] = data[:, 5] - data[:, 3]
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def center_to_corner(data):
data = np.reshape(data, (-1, 6)).copy()
out[:, 2] = data[:, 2] - data[:, 4] / 2.0
out[:, 3] = data[:, 3] - data[:, 5] / 2.0
out[:, 4] = data[:, 2] + data[:, 4] / 2.0
out[:, 5] = data[:, 3] + data[:, 5] / 2.0
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def swap_position(data, expected, coord=2, score=1, cid=0, new_col=0):
data = np.reshape(data, (-1, 6))
expected = np.reshape(expected, (-1, 6))
new_coord = random.randint(0, 6 + new_col - 4)
others = list(range(new_coord)) + list(range(new_coord + 4, 6 + new_col))
random.shuffle(others)
new_score = others[0]
new_cid = others[1]
new_data = np.full((data.shape[0], data.shape[1] + new_col), -1.0)
new_expected = np.full((expected.shape[0], expected.shape[1] + new_col), -1.0)
new_data[:, new_coord:new_coord+4] = data[:, coord:coord+4]
new_data[:, new_score] = data[:, score]
new_data[:, new_cid] = data[:, cid]
new_expected[:, new_coord:new_coord+4] = expected[:, coord:coord+4]
new_expected[:, new_score] = expected[:, score]
new_expected[:, new_cid] = expected[:, cid]
return new_data, new_expected, new_coord, new_score, new_cid
# manually set up test cases
boxes = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [1, 0.4, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [2, 0.6, 0.5, 0.5, 0.7, 0.8]]
# case1
force = True
thresh = 0.5
expected = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [-1, -1, -1, -1, -1, -1]]
grad = np.random.rand(4, 6)
expected_in_grad = grad[(1, 3, 2, 0), :]
expected_in_grad[1, :] = 0
test_box_nms_forward(np.array(boxes), np.array(expected), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes), grad, expected_in_grad, force=force, thresh=thresh)
# case2: multi batch
boxes2 = [boxes] * 3
expected2 = [expected] * 3
grad2 = np.array([grad.tolist()] * 3)
expected_in_grad2 = np.array([expected_in_grad.tolist()] * 3)
test_box_nms_forward(np.array(boxes2), np.array(expected2), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes2), grad2, expected_in_grad2, force=force, thresh=thresh)
# another new dim
boxes2 = [boxes2] * 2
expected2 = [expected2] * 2
grad2 = np.array([grad2.tolist()] * 2)
expected_in_grad2 = np.array([expected_in_grad2.tolist()] * 2)
test_box_nms_forward(np.array(boxes2), np.array(expected2), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes2), grad2, expected_in_grad2, force=force, thresh=thresh)
# case3: thresh
thresh = 0.1
boxes3 = boxes
expected3 = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]
grad3 = np.random.rand(4, 6)
expected_in_grad3 = grad3[(1, 3, 2, 0), :]
expected_in_grad3[(1, 2), :] = 0
test_box_nms_forward(np.array(boxes3), np.array(expected3), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes3), grad3, expected_in_grad3, force=force, thresh=thresh)
# case4: non-force
boxes4 = boxes
force = False
expected4 = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[1, 0.4, 0.1, 0.1, 0.2, 0.2], [-1, -1, -1, -1, -1, -1]]
grad4 = np.random.rand(4, 6)
expected_in_grad4 = grad4[(1, 2, 3, 0), :]
expected_in_grad4[2, :] = 0
test_box_nms_forward(np.array(boxes4), np.array(expected4), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes4), grad4, expected_in_grad4, force=force, thresh=thresh)
# case5: different coding
boxes5 = corner_to_center(np.array(boxes4))
test_box_nms_forward(np.array(boxes5), np.array(expected4), force=force, thresh=thresh,
in_format='center')
expected5 = corner_to_center(np.array(expected4))
test_box_nms_forward(np.array(boxes4), np.array(expected5), force=force, thresh=thresh,
out_format='center')
test_box_nms_forward(np.array(boxes5), np.array(expected5), force=force, thresh=thresh,
in_format='center', out_format='center')
# case6: different position
boxes6, expected6, new_coord, new_score, new_id = swap_position(np.array(boxes4),
np.array(expected4), new_col=2)
test_box_nms_forward(np.array(boxes6), np.array(expected6), force=force, thresh=thresh,
coord=new_coord, score=new_score, cid=new_id)
# case7: no id, should be same with force=True
force = False
thresh = 0.5
test_box_nms_forward(np.array(boxes), np.array(expected), force=force, thresh=thresh, cid=-1)
# case8: multi-batch thresh + topk
boxes8 = [[[1, 1, 0, 0, 10, 10], [1, 0.4, 0, 0, 10, 10], [1, 0.3, 0, 0, 10, 10]],
[[2, 1, 0, 0, 10, 10], [2, 0.4, 0, 0, 10, 10], [2, 0.3, 0, 0, 10, 10]],
[[3, 1, 0, 0, 10, 10], [3, 0.4, 0, 0, 10, 10], [3, 0.3, 0, 0, 10, 10]]]
expected8 = [[[1, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]],
[[2, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]],
[[3, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]
grad8 = np.random.rand(3, 3, 6)
expected_in_grad8 = np.zeros((3, 3, 6))
expected_in_grad8[(0, 1, 2), (0, 0, 0), :] = grad8[(0, 1, 2), (0, 0, 0), :]
force = False
thresh = 0.5
valid = 0.5
topk = 2
test_box_nms_forward(np.array(boxes8), np.array(expected8), force=force, thresh=thresh, valid=valid, topk=topk)
test_box_nms_backward(np.array(boxes8), grad8, expected_in_grad8, force=force, thresh=thresh, valid=valid, topk=topk)
# case9: background id filter out
# default background id -1
boxes9 = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [0, 0.4, 0.1, 0.1, 0.2, 0.2],
[1, 0.3, 0.1, 0.1, 0.14, 0.14], [-1, 0.6, 0.5, 0.5, 0.7, 0.8]]
expected9 = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [1, 0.3, 0.1, 0.1, 0.14, 0.14],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]
force = True
thresh = 0.5
grad9 = np.random.rand(4, 6)
expected_in_grad9 = grad9[(0, 2, 1, 3), :]
expected_in_grad9[(1, 3), :] = 0
test_box_nms_forward(np.array(boxes9), np.array(expected9), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes9), grad9, expected_in_grad9, force=force, thresh=thresh)
# set background id
background_id = 0
expected9 = [[-1, 0.6, 0.5, 0.5, 0.7, 0.8], [1, 0.3, 0.1, 0.1, 0.14, 0.14],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]
grad9 = np.random.rand(4, 6)
expected_in_grad9 = grad9[(2, 3, 1, 0), :]
expected_in_grad9[(0, 1), :] = 0
test_box_nms_forward(np.array(boxes9), np.array(expected9), force=force, thresh=thresh, bid=background_id)
test_box_nms_backward(np.array(boxes9), grad9, expected_in_grad9, force=force, thresh=thresh, bid=background_id)
def test_box_iou_op():
def numpy_box_iou(a, b, fmt='corner'):
def area(left, top, right, bottom):
return np.maximum(0, right - left) * np.maximum(0, bottom - top)
assert a.shape[-1] == 4
assert b.shape[-1] == 4
oshape = a.shape[:-1] + b.shape[:-1]
a = a.reshape((-1, 4))
ashape = a.shape
b = b.reshape((-1, 4))
a = np.tile(a, reps=[1, b.shape[0]]).reshape((-1, 4))
b = np.tile(b, reps=[ashape[0], 1]).reshape((-1, 4))
if fmt == 'corner':
al, at, ar, ab = np.split(a, 4, axis=-1)
bl, bt, br, bb = np.split(b, 4, axis=-1)
elif fmt == 'center':
ax, ay, aw, ah = np.split(a, 4, axis=-1)
bx, by, bw, bh = np.split(b, 4, axis=-1)
al, at, ar, ab = ax - aw / 2, ay - ah / 2, ax + aw / 2, ay + ah / 2
bl, bt, br, bb = bx - bw / 2, by - bh / 2, bx + bw / 2, by + bh / 2
else:
raise NotImplementedError("Fmt {} not supported".format(fmt))
width = np.maximum(0, np.minimum(ar, br) - np.maximum(al, bl))
height = np.maximum(0, np.minimum(ab, bb) - np.maximum(at, bt))
intersect = width * height
union = area(al, at, ar, ab) + area(bl, bt, br, bb) - intersect
union[np.where(intersect <= 0)] = 1e-12
iou = intersect / union
return iou.reshape(oshape)
def generate_boxes(dims):
s1, off1, s2, off2 = np.random.rand(4) * 100
xy = np.random.rand(*(dims + [2])) * s1 + off1
wh = np.random.rand(*(dims + [2])) * s2 + off2
xywh = np.concatenate([xy, wh], axis=-1)
ltrb = np.concatenate([xy - wh / 2, xy + wh / 2], axis=-1)
return xywh, ltrb
for ndima in range(1, 6):
for ndimb in range(1, 6):
dims_a = np.random.randint(low=1, high=3, size=ndima).tolist()
dims_b = np.random.randint(low=1, high=3, size=ndimb).tolist()
# generate left, top, right, bottom
xywh_a, ltrb_a = generate_boxes(dims_a)
xywh_b, ltrb_b = generate_boxes(dims_b)
iou_np = numpy_box_iou(ltrb_a, ltrb_b, fmt='corner')
iou_np2 = numpy_box_iou(xywh_a, xywh_b, fmt='center')
iou_mx = mx.nd.contrib.box_iou(mx.nd.array(ltrb_a), mx.nd.array(ltrb_b), format='corner')
iou_mx2 = mx.nd.contrib.box_iou(mx.nd.array(xywh_a), mx.nd.array(xywh_b), format='center')
assert_allclose(iou_np, iou_np2, rtol=1e-5, atol=1e-5)
assert_allclose(iou_np, iou_mx.asnumpy(), rtol=1e-5, atol=1e-5)
assert_allclose(iou_np, iou_mx2.asnumpy(), rtol=1e-5, atol=1e-5)
def test_bipartite_matching_op():
def assert_match(inputs, x, y, threshold, is_ascend=False):
for dtype in ['float16', 'float32', 'float64']:
inputs = mx.nd.array(inputs, dtype=dtype)
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
a, b = mx.nd.contrib.bipartite_matching(inputs, threshold=threshold, is_ascend=is_ascend)
assert_array_equal(a.asnumpy().astype('int64'), x.astype('int64'))
assert_array_equal(b.asnumpy().astype('int64'), y.astype('int64'))
assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [1, -1, 0], [2, 0], 1e-12, False)
assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [-1, 0, 1], [1, 2], 100, True)
def test_multibox_target_op():
anchors = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]], ctx=default_context()).reshape((1, -1, 4))
cls_pred = mx.nd.array(list(range(10)), ctx=default_context()).reshape((1, -1, 2))
label = mx.nd.array([1, 0.1, 0.1, 0.5, 0.6], ctx=default_context()).reshape((1, -1, 5))
loc_target, loc_mask, cls_target = \
mx.nd.contrib.MultiBoxTarget(anchors, label, cls_pred,
overlap_threshold=0.5,
negative_mining_ratio=3,
negative_mining_thresh=0.4)
expected_loc_target = np.array([[5.0, 2.5000005, 3.4657357, 4.581454, 0., 0., 0., 0.]])
expected_loc_mask = np.array([[1, 1, 1, 1, 0, 0, 0, 0]])
expected_cls_target = np.array([[2, 0]])
assert_allclose(loc_target.asnumpy(), expected_loc_target, rtol=1e-5, atol=1e-5)
assert_array_equal(loc_mask.asnumpy(), expected_loc_mask)
assert_array_equal(cls_target.asnumpy(), expected_cls_target)
def test_gradient_multiplier_op():
# We use the quadratic function in combination with gradient multiplier
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
m = np.random.random_sample() - 0.5
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
gr_q_sym = mx.sym.contrib.gradientmultiplier(quad_sym, scalar=m)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = (2 * a * data_np + b) * m
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
output = mx.nd.contrib.gradientmultiplier(output, scalar=m)
assert_almost_equal(output.asnumpy(), expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(gr_q_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(gr_q_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
def test_multibox_prior_op():
h = 561
w = 728
X = mx.nd.random.uniform(shape=(1, 3, h, w))
Y = mx.contrib.nd.MultiBoxPrior(X, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5])
assert_array_equal(Y.shape, np.array((1, 2042040, 4)))
boxes = Y.reshape((h, w, 5, 4))
assert_allclose(boxes.asnumpy()[250, 250, 0, :], np.array([0.055117, 0.071524, 0.63307 , 0.821524]), atol=1e-5, rtol=1e-5)
# relax first ratio if user insists
Y = mx.contrib.nd.MultiBoxPrior(X, sizes=[0.75, 0.5, 0.25], ratios=[20, 2, 0.5])
boxes = Y.reshape((h, w, 5, 4))
assert_allclose(boxes.asnumpy()[250, 250, 0, :], np.array([-0.948249, 0.362671, 1.636436, 0.530377]), atol=1e-5, rtol=1e-5)
def test_box_encode_op():
anchors = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
refs = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
samples = mx.nd.array([[0, 1]])
matches = mx.nd.array([[0, 1]])
means = mx.nd.array([0.0, 0.0, 0.0, 0.0])
stds = mx.nd.array([0.1, 0.1, 0.2, 0.2])
Y, mask = mx.nd.contrib.box_encode(samples, matches, anchors, refs, means, stds)
assert_allclose(Y.asnumpy(), np.zeros((1, 2, 4)), atol=1e-5, rtol=1e-5)
assert_allclose(mask.asnumpy(), np.array([[[0., 0., 0., 0.], [1., 1., 1., 1.]]]), atol=1e-5, rtol=1e-5)
def test_box_decode_op():
data = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
anchors = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
Y = mx.nd.contrib.box_decode(data, anchors, .1, .1, .2, .2)
assert_allclose(Y.asnumpy(), np.array([[[-0.0562755, -0.00865743, 0.26227552, 0.42465743], \
[0.13240421, 0.17859563, 0.93759584, 1.1174043 ]]]), atol=1e-5, rtol=1e-5)
@with_seed()
def test_op_mrcnn_mask_target():
if default_context().device_type != 'gpu':
return
num_rois = 2
num_classes = 4
mask_size = (3, 3)
ctx = mx.gpu(0)
# (B, N, 4)
rois = mx.nd.array([[[2.3, 4.3, 2.2, 3.3],
[3.5, 5.5, 0.9, 2.4]]], ctx=ctx)
gt_masks = mx.nd.arange(0, 4*32*32, ctx=ctx).reshape(1, 4, 32, 32)
# (B, N)
matches = mx.nd.array([[2, 0]], ctx=ctx)
# (B, N)
cls_targets = mx.nd.array([[2, 1]], ctx=ctx)
mask_targets, mask_cls = mx.nd.contrib.mrcnn_mask_target(rois, gt_masks, matches, cls_targets,
num_rois=num_rois,
num_classes=num_classes,
mask_size=mask_size)
# Ground truth outputs were generated with GluonCV's target generator
# gluoncv.model_zoo.mask_rcnn.MaskTargetGenerator(1, num_rois, num_classes, mask_size)
gt_mask_targets = mx.nd.array([[[[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]],
[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]],
[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]],
[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]]],
[[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]],
[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]],
[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]],
[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]]]]])
gt_mask_cls = mx.nd.array([[0,0,1,0], [0,1,0,0]])
gt_mask_cls = gt_mask_cls.reshape(1,2,4,1,1).broadcast_axes(axis=(3,4), size=(3,3))
assert_almost_equal(mask_targets.asnumpy(), gt_mask_targets.asnumpy())
assert_almost_equal(mask_cls.asnumpy(), gt_mask_cls.asnumpy())
if __name__ == '__main__':
import nose
nose.runmodule()
|
import sys
import cv2
import helper as hp
class MSP():
name = "MSP"
def __init__(self):
self.__patterns_num = []
self.__patterns_sym = []
self.__labels_num = []
self.__labels_sym = []
msp_num, msp_sym = "msp/num", "msp/sym"
self.__load_num_patterns(msp_num)
self.__load_sym_patterns(msp_sym)
print 'loading MSP...'
def __load_num_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_num = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_num = [hp.get_test(path, "num")[0] for path in paths]
def __load_sym_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_sym = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_sym = [hp.get_test(path, "sym")[0] for path in paths]
def __get_mode(self, mode):
if mode == "num":
return self.__labels_num, self.__patterns_num
elif mode == "sym":
return self.__labels_sym, self.__patterns_sym
def rec(self, img, mode):
tmp_max, tmp, rec = sys.maxint, 0, 0
labels, patterns = self.__get_mode(mode)
for pattern, label in zip(patterns, labels):
tmp = cv2.countNonZero(pattern - img)
if tmp < tmp_max: tmp_max, rec = tmp, label
return rec
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.