repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
RedHatQE/pulp-automation | pulp_auto/task.py | 2 | 5632 | import item, time, hasdata
from item import (Item, GroupItem)
from pulp_auto.pulp import Request
from pulp_auto import strip_url
TASK_WAITING_STATE = 'waiting'
TASK_READY_STATE = 'ready'
TASK_RUNNING_STATE = 'running'
TASK_FINISHED_STATE = 'finished'
TASK_ERROR_STATE = 'error'
TASK_CANCELLED_STATE = 'cancelled'
class TaskError(AssertionError):
'''super class for task failures'''
def __init__(self, *args, **kvs):
'''save the task for reference'''
self.task = kvs.pop('task', None)
super(TaskError, self).__init__(*args, **kvs)
def __str__(self):
return super(TaskError, self).__str__() + ": %s" % self.task
class TaskFailure(TaskError):
'''task failed'''
class TaskTimeoutError(TaskError):
'''task timed out'''
class AbstractTask(object):
state = None
active_states = []
end_states = []
error_states = []
def update(self, pulp):
'''an abstract update does nothing'''
pass
def wait(self, pulp, timeout=120, frequency=0.5):
'''wait while all of these conditions hold:
- self.state in self.active_states
- self.state not in self.end_states
- timeout not elapsed yet
'''
delta = time.time() + timeout
while time.time() <= delta:
time.sleep(frequency)
try:
self.reload(pulp)
except AssertionError as e:
# task gone --- no need to wait anymore
break
if self.state in self.end_states:
break
else:
raise TaskTimeoutError('Waiting exceeded %r second(s)' % timeout, task=self)
if self.state in self.error_states:
raise TaskFailure('Task failed: %r' % self.data['error'], task=self)
class TaskDetails(hasdata.HasData):
time_format = '%Y-%m-%dT%H:%M:%SZ'
relevant_data_keys = [
"error",
"state",
"task_id",
"progress_report",
"result",
"exception",
"traceback",
"start_time",
"finish_time",
"tags"
]
required_data_keys = ['task_id', 'state']
active_states = [TASK_RUNNING_STATE, TASK_WAITING_STATE]
end_states = [TASK_FINISHED_STATE, TASK_ERROR_STATE, TASK_CANCELLED_STATE, 'canceled']
error_states = [TASK_ERROR_STATE]
@property
def state(self):
return self.data['state']
@property
def id(self):
return self.data['task_id']
@id.setter
def id(self, other):
self.data['task_id'] = other
@property
def start_time(self):
# return data['start_time'] as a timestruct or None
return self.data['start_time'] and time.strptime(self.data['start_time'], self.time_format)
@property
def finish_time(self):
# return data['finish_time'] as timestruct or None
return self.data['finish_time'] and time.strptime(self.data['finish_time'], self.time_format)
class Task(TaskDetails, AbstractTask, Item):
'''an item-view task'''
path = '/tasks/'
@classmethod
def wait_for_response(cls, pulp, response, timeout=120):
'''a shortcut for wait & from_response'''
ret = cls.from_response(response)
if isinstance(ret, list):
# more than one task pending
for task in ret:
task.wait(pulp, timeout=timeout)
else:
ret.wait(pulp, timeout=timeout)
@classmethod
def from_report(cls, pulp, report):
# report-based constructor
# now every asyncronous call returns a call report object
# call report has 'spawned_tasks' that contains list of tasks
# meanwhile every tasks can have its own spawned tasks
data = report.json()
assert 'spawned_tasks' in data, 'invalid report data: %s' % data
reported_tasks = data['spawned_tasks']
if not reported_tasks:
return []
ret = []
for reported_task in reported_tasks:
response = pulp.send(Request('GET', strip_url(reported_task['_href'])))
assert pulp.is_ok, response.reason
task = Task.from_response(response)
ret.append(task)
if 'spawned_tasks' in task.data:
# recurse
ret += cls.from_report(pulp, response)
return ret
@classmethod
def from_call_report_data(cls, data):
# older interface; event-listeners still use this to report tasks
assert 'call_report' in data, 'invalid data format: %s' % data
return cls(data['call_report'])
@classmethod
def wait_for_report(cls, pulp, response, timeout=300):
tasks = Task.from_report(pulp, response)
for task in tasks:
task.wait(pulp, timeout=timeout)
@classmethod
def wait_for_reports(cls, pulp, responses, timeout=300):
# a wrapper for multiple task report waiting
# will take up to sum(tasks.time)
# single-exception breaks
for response in responses:
cls.wait_for_report(pulp, response, timeout)
TASK_DATA_EXAMPLE = {
"_href": "/pulp/api/v2/tasks/0fe4fcab-a040-11e1-a71c-00508d977dff/",
"state": "running",
"task_id": "0fe4fcab-a040-11e1-a71c-00508d977dff",
"progress": {}, # contents depend on the operation
"result": None,
"start_time": "2012-05-17T16:48:00Z",
"finish_time": None,
"tags": [
"pulp:repository:f16",
"pulp:action:sync"
],
"spawned_tasks": [{"href": "/pulp/api/v2/tasks/7744e2df-39b9-46f0-bb10-feffa2f7014b/",
"task_id": "7744e2df-39b9-46f0-bb10-feffa2f7014b" }],
"error": None
}
| gpl-2.0 |
darkforestzero/buck | third-party/py/pex/pex/link.py | 36 | 3733 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import os
import posixpath
from collections import Iterable
from .compatibility import string as compatible_string
from .compatibility import PY3
from .util import Memoizer
if PY3:
import urllib.parse as urlparse
else:
import urlparse
class Link(object):
"""Wrapper around a URL."""
@classmethod
def wrap(cls, url):
"""Given a url that is either a string or :class:`Link`, return a :class:`Link`.
:param url: A string-like or :class:`Link` object to wrap.
:returns: A :class:`Link` object wrapping the url.
"""
if isinstance(url, cls):
return url
elif isinstance(url, compatible_string):
return cls(url)
else:
raise ValueError('url must be either a string or Link.')
@classmethod
def wrap_iterable(cls, url_or_urls):
"""Given a string or :class:`Link` or iterable, return an iterable of :class:`Link` objects.
:param url_or_urls: A string or :class:`Link` object, or iterable of string or :class:`Link`
objects.
:returns: A list of :class:`Link` objects.
"""
try:
return [cls.wrap(url_or_urls)]
except ValueError:
pass
if isinstance(url_or_urls, Iterable):
return [cls.wrap(url) for url in url_or_urls]
raise ValueError('url_or_urls must be string/Link or iterable of strings/Links')
@classmethod
def _normalize(cls, filename):
return 'file://' + os.path.realpath(os.path.expanduser(filename))
# A cache for the result of from_filename
_FROM_FILENAME_CACHE = Memoizer()
@classmethod
def from_filename(cls, filename):
"""Return a :class:`Link` wrapping the local filename."""
result = cls._FROM_FILENAME_CACHE.get(filename)
if result is None:
result = cls(cls._normalize(filename))
cls._FROM_FILENAME_CACHE.store(filename, result)
return result
def __init__(self, url):
"""Construct a :class:`Link` from a url.
:param url: A string-like object representing a url.
"""
purl = urlparse.urlparse(url)
if purl.scheme == '':
purl = urlparse.urlparse(self._normalize(url), allow_fragments=False)
elif purl.scheme == 'file':
purl = urlparse.urlparse(url, allow_fragments=False)
self._url = purl
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, link):
return self.__class__ == link.__class__ and self._url == link._url
def __hash__(self):
return hash(self._url)
def join(self, href):
"""Given a href relative to this link, return the :class:`Link` of the absolute url.
:param href: A string-like path relative to this link.
"""
return self.wrap(urlparse.urljoin(self.url, href))
@property
def filename(self):
"""The basename of this url."""
return posixpath.basename(self._url.path)
@property
def path(self):
"""The full path of this url with any hostname and scheme components removed."""
return self._url.path
@property
def url(self):
"""The url string to which this link points."""
return urlparse.urlunparse(self._url)
@property
def fragment(self):
"""The url fragment following '#' if any."""
return self._url.fragment
@property
def scheme(self):
"""The URI scheme used by this Link."""
return self._url.scheme
@property
def local(self):
"""Is the url a local file?"""
return self._url.scheme in ('', 'file')
@property
def remote(self):
"""Is the url a remote file?"""
return self._url.scheme in ('http', 'https')
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.url)
| apache-2.0 |
niltonlk/nest-simulator | doc/userdoc/guides/spatial/user_manual_scripts/layers.py | 17 | 11076 | # -*- coding: utf-8 -*-
#
# layers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Run as python3 layers.py > layers.log
import matplotlib.pyplot as plt
import nest
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(1234567)
def beautify_layer(layer, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
ctr = layer.spatial['center']
ext = layer.spatial['extent']
if xticks is None:
if 'shape' in layer.spatial:
dx = float(ext[0]) / layer.spatial['shape'][0]
dy = float(ext[1]) / layer.spatial['shape'][1]
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
layer.spatial['shape'][0])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
layer.spatial['shape'][1])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_axisbelow(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
# --------------------------------------------------
nest.ResetKernel()
#{ layer1 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(ax.text(0.65, 0.4 - r * 0.2, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(ax.text(-0.4 + r * 0.2, 0.65, str(r),
horizontalalignment='center',
verticalalignment='center'))
# For bbox_extra_artists, see
# https://github.com/matplotlib/matplotlib/issues/351
# plt.savefig('../user_manual_figures/layer1.png', bbox_inches='tight',
# bbox_extra_artists=tx)
print("#{ layer1s.log #}")
#{ layer1s #}
print(layer.spatial)
#{ end #}
print("#{ end.log #}")
print("#{ layer1p.log #}")
#{ layer1p #}
nest.PrintNodes()
#{ end #}
print("#{ end.log #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer2 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
extent=[2.0, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(fig.gca().text(1.25, 0.2 - r * 0.1, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(fig.gca().text(-0.8 + r * 0.4, 0.35, str(r),
horizontalalignment='center',
verticalalignment='center'))
# See https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer2.png', bbox_inches='tight',
bbox_extra_artists=tx)
# --------------------------------------------------
nest.ResetKernel()
#{ layer3 #}
layer1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
layer2 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[-1., 1.]))
layer3 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[1.5, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer1, nodesize=50)
nest.PlotLayer(layer2, nodesize=50, nodecolor='g', fig=fig)
nest.PlotLayer(layer3, nodesize=50, nodecolor='r', fig=fig)
beautify_layer(layer1, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-1.6, 2.1], ylim=[-0.6, 1.6],
xticks=np.arange(-1.4, 2.05, 0.2),
yticks=np.arange(-0.4, 1.45, 0.2))
plt.savefig('../user_manual_figures/layer3.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer3a #}
nx, ny = 5, 3
d = 0.1
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[nx, ny],
extent=[nx * d, ny * d],
center=[nx * d / 2., 0.]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=100)
plt.plot(0, 0, 'x', markersize=20, c='k', mew=3)
plt.plot(nx * d / 2, 0, 'o', markersize=20, c='k', mew=3, mfc='none',
zorder=100)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xticks=np.arange(0., 0.501, 0.05),
yticks=np.arange(-0.15, 0.151, 0.05),
xlim=[-0.05, 0.55], ylim=[-0.2, 0.2])
plt.savefig('../user_manual_figures/layer3a.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4 #}
pos = nest.spatial.free(pos=nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=2)
layer = nest.Create('iaf_psc_alpha', 50,
positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.55], ylim=[-0.55, 0.55],
xticks=[-0.5, 0., 0.5], yticks=[-0.5, 0., 0.5])
plt.savefig('../user_manual_figures/layer4.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4b #}
pos = nest.spatial.free(pos=[[-0.5, -0.5], [-0.25, -0.25], [0.75, 0.75]])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.80], ylim=[-0.55, 0.80],
xticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.],
yticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.])
plt.savefig('../user_manual_figures/layer4b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d #}
pos = nest.spatial.free(nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=3)
layer = nest.Create('iaf_psc_alpha', 200, positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d_b #}
pos = nest.spatial.grid(shape=[4, 5, 6])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d_b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ player #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 1],
extent=[5., 1.],
edge_wrap=True))
#{ end #}
# fake plot with layer on line and circle
clist = [(0, 0, 1), (0.35, 0, 1), (0.6, 0, 1), (0.8, 0, 1), (1.0, 0, 1)]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1.scatter(range(1, 6), [0] * 5, s=200, c=clist)
ax1.set_xlim([0, 6])
ax1.set_ylim([-0.5, 1.25])
ax1.set_aspect('equal', 'box')
ax1.set_xticks([])
ax1.set_yticks([])
for j in range(1, 6):
ax1.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax1a = fig.add_subplot(223)
ax1a.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1a.scatter(range(1, 6), [0] * 5, s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax1a.set_xlim([0, 6])
ax1a.set_ylim([-0.5, 1.25])
ax1a.set_aspect('equal', 'box')
ax1a.set_xticks([])
ax1a.set_yticks([])
for j in range(1, 6):
ax1a.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax2 = fig.add_subplot(122)
phic = np.arange(0., 2 * np.pi + 0.5, 0.1)
r = 5. / (2 * np.pi)
ax2.plot(r * np.cos(phic), r * np.sin(phic), 'k-', lw=2)
phin = np.arange(0., 4.1, 1.) * 2 * np.pi / 5
ax2.scatter(r * np.sin(phin), r * np.cos(phin), s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax2.set_xlim([-1.3, 1.3])
ax2.set_ylim([-1.2, 1.2])
ax2.set_aspect('equal', 'box')
ax2.set_xticks([])
ax2.set_yticks([])
for j in range(5):
ax2.text(1.4 * r * np.sin(phin[j]), 1.4 * r * np.cos(phin[j]),
str('(%d,0)' % (j + 1 - 3)),
horizontalalignment='center', verticalalignment='center')
plt.savefig('../user_manual_figures/player.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer6 #}
layer1 = nest.Create('iaf_cond_alpha',
positions=nest.spatial.grid(shape=[2, 1]))
layer2 = nest.Create('poisson_generator',
positions=nest.spatial.grid(shape=[2, 1]))
#{ end #}
print("#{ layer6 #}")
nest.PrintNodes()
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ vislayer #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[21, 21]))
probability_param = nest.spatial_distributions.gaussian(nest.spatial.distance, std=0.15)
conndict = {'rule': 'pairwise_bernoulli',
'p': probability_param,
'mask': {'circular': {'radius': 0.4}}}
nest.Connect(layer, layer, conndict)
fig = nest.PlotLayer(layer, nodesize=80)
ctr = nest.FindCenterElement(layer)
nest.PlotTargets(ctr, layer, fig=fig,
mask=conndict['mask'], probability_parameter=probability_param,
src_size=250, tgt_color='red', tgt_size=20, mask_color='red',
probability_cmap='Greens')
#{ end #}
plt.savefig('../user_manual_figures/vislayer.png', bbox_inches='tight')
| gpl-2.0 |
kaplun/invenio | modules/bibfield/lib/bibfield_config_engine.py | 15 | 27538 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibField configuration loader
This module uses pyparsing to read from the configuration file all the rules.
http://pyparsing.wikispaces.com/
"""
import os
import re
import six
from pyparsing import ParseException, FollowedBy, Suppress, OneOrMore, Word, \
LineEnd, ZeroOrMore, Optional, Literal, alphas, alphanums, \
originalTextFor, oneOf, nestedExpr, quotedString, removeQuotes, lineEnd, \
empty, col, restOfLine, delimitedList, Each, indentedBlock, QuotedString
from invenio.config import CFG_ETCDIR
from invenio.importutils import try_to_eval
def _create_field_parser():
"""
Creates a parser using pyparsing that works with bibfield rule definitions
BNF like grammar:
rule ::= ([persitent_identifier] json_id ["[0]" | "[n]"] "," aliases":" INDENT body UNDENT) | include | python_comment
include ::= "include(" PATH ")"
body ::= [inherit_from] (creator | derived | calculated) [checker] [documentation] [producer]
aliases ::= json_id ["[0]" | "[n]"] ["," aliases]
creator ::= "creator:" INDENT creator_body+ UNDENT
creator_body ::= [decorators] source_format "," source_tag "," python_allowed_expr
source_format ::= MASTER_FORMATS
source_tag ::= QUOTED_STRING
derived ::= "derived" INDENT derived_calculated_body UNDENT
calculated ::= "calculated:" INDENT derived_calculated_body UNDENT
derived_calculated_body ::= [decorators] "," python_allowed_exp
decorators ::= (peristent_identfier | legacy | do_not_cache | parse_first | depends_on | only_if | only_if_master_value)*
peristent_identfier ::= @persitent_identifier( level )
legacy ::= "@legacy(" correspondences+ ")"
correspondences ::= "(" source_tag [ "," tag_name ] "," json_id ")"
parse_first ::= "@parse_first(" jsonid+ ")"
depends_on ::= "@depends_on(" json_id+ ")"
only_if ::= "@only_if(" python_condition+ ")"
only_if_master_value ::= "@only_if_master_value(" python_condition+ ")"
inherit_from ::= "@inherit_from()"
python_allowed_exp ::= ident | list_def | dict_def | list_access | dict_access | function_call
checker ::= "checker:" INDENT checker_function+ UNDENT
documentation ::= INDENT doc_string subfield* UNDENT
doc_string ::= QUOTED_STRING
subfield ::= "@subfield" json_id["."json_id*] ":" docstring
producer ::= "producer:" INDENT producer_body UNDENT
producer_body ::= producer_code "," python_dictionary
producer_code ::= ident
"""
indent_stack = [1]
def check_sub_indent(str, location, tokens):
cur_col = col(location, str)
if cur_col > indent_stack[-1]:
indent_stack.append(cur_col)
else:
raise ParseException(str, location, "not a subentry")
def check_unindent(str, location, tokens):
if location >= len(str):
return
cur_col = col(location, str)
if not(cur_col < indent_stack[-1] and cur_col <= indent_stack[-2]):
raise ParseException(str, location, "not an unindent")
def do_unindent():
indent_stack.pop()
INDENT = lineEnd.suppress() + empty + empty.copy().setParseAction(check_sub_indent)
UNDENT = FollowedBy(empty).setParseAction(check_unindent)
UNDENT.setParseAction(do_unindent)
json_id = (Word(alphas + "_", alphanums + "_") + Optional(oneOf("[0] [n]")))\
.setResultsName("json_id", listAllMatches=True)\
.setParseAction(lambda tokens: "".join(tokens))
aliases = delimitedList((Word(alphanums + "_") + Optional(oneOf("[0] [n]")))
.setParseAction(lambda tokens: "".join(tokens)))\
.setResultsName("aliases")
ident = Word(alphas + "_", alphanums + "_")
dict_def = originalTextFor(nestedExpr('{', '}'))
list_def = originalTextFor(nestedExpr('[', ']'))
dict_access = list_access = originalTextFor(ident + nestedExpr('[', ']'))
function_call = originalTextFor(ZeroOrMore(ident + ".") + ident + nestedExpr('(', ')'))
python_allowed_expr = (dict_def ^ list_def ^ dict_access ^ \
list_access ^ function_call ^ restOfLine)\
.setResultsName("value", listAllMatches=True)
persistent_identifier = (Suppress("@persistent_identifier") + \
nestedExpr("(", ")"))\
.setResultsName("persistent_identifier")
legacy = (Suppress("@legacy") + originalTextFor(nestedExpr("(", ")")))\
.setResultsName("legacy", listAllMatches=True)
only_if = (Suppress("@only_if") + originalTextFor(nestedExpr("(", ")")))\
.setResultsName("only_if")
only_if_master_value = (Suppress("@only_if_value") + \
originalTextFor(nestedExpr("(", ")")))\
.setResultsName("only_if_master_value")
depends_on = (Suppress("@depends_on") + \
originalTextFor(nestedExpr("(", ")")))\
.setResultsName("depends_on")
parse_first = (Suppress("@parse_first") + \
originalTextFor(nestedExpr("(", ")")))\
.setResultsName("parse_first")
memoize = (Suppress("@memoize") + nestedExpr("(", ")"))\
.setResultsName("memoize")
field_decorator = parse_first ^ depends_on ^ only_if ^ \
only_if_master_value ^ memoize ^ legacy
#Independent decorators
inherit_from = (Suppress("@inherit_from") + \
originalTextFor(nestedExpr("(", ")")))\
.setResultsName("inherit_from")
override = (Suppress("@") + "override")\
.setResultsName("override")
extend = (Suppress("@") + "extend")\
.setResultsName("extend")
master_format = (Suppress("@master_format") + \
originalTextFor(nestedExpr("(", ")")))\
.setResultsName("master_format") \
.setParseAction(lambda toks: toks[0])
derived_calculated_body = (ZeroOrMore(field_decorator) + python_allowed_expr)\
.setResultsName('derived_calculated_def')
derived = "derived" + Suppress(":") + \
INDENT + derived_calculated_body + UNDENT
calculated = "calculated" + Suppress(":") + \
INDENT + derived_calculated_body + UNDENT
source_tag = quotedString\
.setParseAction(removeQuotes)\
.setResultsName("source_tag", listAllMatches=True)
source_format = Word(alphas, alphanums + "_")\
.setResultsName("source_format", listAllMatches=True)
creator_body = (ZeroOrMore(field_decorator) + source_format + \
Suppress(",") + source_tag + Suppress(",") + python_allowed_expr)\
.setResultsName("creator_def", listAllMatches=True)
creator = "creator" + Suppress(":") + \
INDENT + OneOrMore(creator_body) + UNDENT
field_def = (creator | derived | calculated)\
.setResultsName("type_field", listAllMatches=True)
#JsonExtra
json_dumps = (Suppress('dumps') + Suppress(',') + python_allowed_expr)\
.setResultsName("dumps")\
.setParseAction(lambda toks: toks.value[0])
json_loads = (Suppress("loads") + Suppress(",") + python_allowed_expr)\
.setResultsName("loads")\
.setParseAction(lambda toks: toks.value[0])
json_extra = (Suppress('json:') + \
INDENT + Each((json_dumps, json_loads)) + UNDENT)\
.setResultsName('json_ext')
#Checker
checker_function = (Optional(master_format) + ZeroOrMore(ident + ".") + ident + originalTextFor(nestedExpr('(', ')')))\
.setResultsName("checker", listAllMatches=True)
checker = ("checker" + Suppress(":") + INDENT + OneOrMore(checker_function) + UNDENT)
#Description/Documentation
doc_double = QuotedString(quoteChar='"""', multiline=True)
doc_single = QuotedString(quoteChar="'''", multiline=True)
doc_string = INDENT + (doc_double | doc_single) + UNDENT
description_body = (Suppress('description:') + doc_string).\
setParseAction(lambda toks: toks[0][0])
description = (description_body | doc_double | doc_single)\
.setResultsName('description')
#Producer
producer_code = (Word(alphas, alphanums + "_")\
+ originalTextFor(nestedExpr("(", ")")))\
.setResultsName('producer_code', listAllMatches=True)
producer_body = (producer_code + Suppress(",") + python_allowed_expr)\
.setResultsName("producer_rule", listAllMatches=True)
producer = Suppress("producer:") + INDENT + OneOrMore(producer_body) + UNDENT
schema = (Suppress('schema:') + INDENT + dict_def + UNDENT)\
.setParseAction(lambda toks: toks[0])\
.setResultsName('schema')
body = Optional(field_def) & Optional(checker) & Optional(json_extra) \
& Optional(description) & Optional(producer) & Optional(schema)
comment = Literal("#") + restOfLine + LineEnd()
include = (Suppress("include") + quotedString)\
.setResultsName("includes", listAllMatches=True)
rule = (Optional(persistent_identifier) + Optional(inherit_from) + \
Optional(override) + Optional(extend) +json_id + \
Optional(Suppress(",") + aliases) + Suppress(":") + \
INDENT + body + UNDENT)\
.setResultsName("rules", listAllMatches=True)
return OneOrMore(rule | include | comment.suppress())
class BibFieldParserException(Exception):
"""
Exception raised when some error happens when parsing doctype and rule
documents
"""
pass
class BibFieldParser(object):
"""
BibField rule parser
"""
_field_definitions = {}
"""Dictionary containing all the rules needed to create and validate json fields"""
_legacy_field_matchings = {}
"""Dictionary containing matching between the legacy master format and the current json"""
def __init__(self,
base_dir=CFG_ETCDIR + '/bibfield',
main_config_file='bibfield.cfg'):
"""
Creates the parsers for the rules and parses all the
documents inside base_dir
@param base_dir: Full path where the configuration files are placed
@param main_config_file: Name of the main file that contains the rules
to perform the translation
"""
self.base_dir = base_dir
self.main_config_file = main_config_file
self.__inherit_rules = []
self.__unresolved_inheritence = []
self.__override_rules = []
self.__extend_rules = []
@classmethod
def field_definitions(cls):
if not cls._field_definitions:
cls.reparse()
return cls._field_definitions
@classmethod
def legacy_field_matchings(cls):
if not cls._legacy_field_matchings:
cls.reparse()
return cls._legacy_field_matchings
@classmethod
def reparse(cls):
cls._field_definitions = {}
cls._legacy_field_matchings = {}
cls()._create()
def _create(self):
"""
Fills up config_rules dictionary with the rules defined inside the
configuration file.
It also resolve the includes present inside the main configuration file
and recursively the ones in the other files.
It uses @see: _create_creator_rule() and @see: _create_derived_calculated_rule()
to fill up config_rules
"""
parser = _create_field_parser()
main_rules = parser \
.parseFile(self.base_dir + '/' + self.main_config_file,
parseAll=True)
rules = main_rules.rules
includes = main_rules.includes
already_includes = [self.main_config_file]
#Resolve includes
for include in includes:
if include[0] in already_includes:
continue
already_includes.append(include[0])
if os.path.exists(include[0]):
tmp = parser.parseFile(include[0], parseAll=True)
else:
#CHECK: This will raise an IOError if the file doesn't exist
tmp = parser.parseFile(self.base_dir + '/' + include[0],
parseAll=True)
if rules and tmp.rules:
rules += tmp.rules
else:
rules = tmp.rules
if includes and tmp.includes:
includes += tmp.includes
else:
includes = tmp.includes
#Create config rules
for rule in rules:
if rule.override:
self.__override_rules.append(rule)
elif rule.extend:
self.__extend_rules.append(rule)
elif rule.inherit_from:
self.__inherit_rules.append(rule)
else:
self._create_rule(rule)
self.__resolve_inherit_rules()
self.__resolve_override_rules()
self.__resolve_extend_rules()
def _create_rule(self, rule, override=False, extend=False):
"""
Creates the field and legacy definitions.
The result looks like this::
{key: [key1, key2],
key1: {inherit_from: [],
override: True/False,
extend: True/False,
aliases: [],
persistent_identifier: num/None,
rules: {'master_format_1': [{rule1}, {rule2}, ...],
'master_format_2': [....],
......
'calculated': [....],
'derived': [...]}
}
}
Each of the rule (rule1, rule2, etc.) has the same content::
{'source_format' : [translation_rules]/None,
'parse_first' : (parse_first_json_ids),
'depends_on' : (depends_on_json_id),
'only_if' : (only_if_boolean_expressions),
'only_if_master_value': (only_if_master_value_boolean_expressions),
'memoize' : time,
'value' : value coming from master format
}
"""
json_id = rule.json_id[0]
#Chech duplicate names
if json_id in self.__class__._field_definitions and not override and not extend:
raise BibFieldParserException("Name error: '%s' field name already defined"
% (rule.json_id[0],))
if not json_id in self.__class__._field_definitions and (override or extend):
raise BibFieldParserException("Name error: '%s' field name not defined"
% (rule.json_id[0],))
#Workaround to keep clean doctype files
#Just creates a dict entry with the main json field name and points it to
#the full one i.e.: 'authors' : ['authors[0]', 'authors[n]']
if '[0]' in json_id or '[n]' in json_id:
main_json_id = re.sub('(\[n\]|\[0\])', '', json_id)
if not main_json_id in self.__class__._field_definitions:
self.__class__._field_definitions[main_json_id] = []
self.__class__._field_definitions[main_json_id].append(json_id)
aliases = []
if rule.aliases:
aliases = rule.aliases.asList()
persistent_id = None
if rule.persistent_identifier:
persistent_id = int(rule.persistent_identifier[0][0])
inherit_from = None
if rule.inherit_from:
self.__unresolved_inheritence.append(json_id)
inherit_from = eval(rule.inherit_from[0])
if extend:
rules = self.__class__._field_definitions[json_id]['rules']
else:
rules = {}
#TODO: check if pyparsing can handle this!
all_type_def = []
if rule.creator_def:
all_type_def = [r for r in rule.creator_def]
if all_type_def and rule.derived_calculated_def:
all_type_def.append(rule.derived_calculated_def)
elif rule.derived_calculated_def:
all_type_def = [rule.derived_calculated_def]
for r in all_type_def:
if r.source_format:
source = r.source_format[0]
source_tag = r.source_tag[0].split()
else:
source = rule.type_field[0]
source_tag = None
if source not in rules:
#Allow several tags point to the same json id
rules[source] = []
(depends_on, only_if, only_if_master_value,
parse_first, memoize) = self.__create_decorators_content(r)
self._create_legacy_rules(r.legacy, json_id, source)
rules[source].append({'source_tag' : source_tag,
'parse_first' : parse_first,
'depends_on' : depends_on,
'only_if' : only_if,
'only_if_master_value': only_if_master_value,
'memoize' : memoize,
'value' : compile(r.value[0].strip(), '', 'eval'),
})
if override:
self.__class__._field_definitions[json_id]['override'] = override
self.__class__._field_definitions[json_id]['rules'].update(rules)
self.__class__._field_definitions[json_id]['aliases'] = \
aliases or self.__class__._field_definitions[json_id]['aliases']
self.__class__._field_definitions[json_id]['persistent_identifier'] = \
persistent_id or self.__class__._field_definitions[json_id]['persistent_identifier']
self.__class__._field_definitions[json_id]['inherit_from'] = \
inherit_from or self.__class__._field_definitions[json_id]['inherit_from']
elif extend:
self.__class__._field_definitions[json_id]['extend'] = extend
self.__class__._field_definitions[json_id]['aliases'].extend(aliases)
else:
self.__class__._field_definitions[json_id] = {'inherit_from' : inherit_from,
'rules' : rules,
'aliases' : aliases,
'persistent_identifier': persistent_id,
'override' : override,
'extend' : extend,
}
self.__create_checker(rule)
self.__create_description(rule)
self.__create_producer(rule)
self.__create_schema(rule)
self.__create_json_extra(rule)
def _create_legacy_rules(self, legacy_rules, json_id, source_format=None):
"""
Creates the legacy rules dictionary::
{'100' : ['authors[0]'],
'100__' : ['authors[0]'],
'100__%': ['authors[0]'],
'100__a': ['auhtors[0].full_name'],
.......
}
"""
if not legacy_rules:
return
for legacy_rule in legacy_rules:
legacy_rule = eval(legacy_rule[0])
if source_format in ('derived', 'calculated'):
inner_source_format = legacy_rule[0]
legacy_rule = legacy_rule[1:]
else:
inner_source_format = source_format
if not inner_source_format in self.__class__._legacy_field_matchings:
self.__class__._legacy_field_matchings[inner_source_format] = {}
for field_legacy_rule in legacy_rule:
#Allow string and tuple in the config file
legacy_fields = isinstance(field_legacy_rule[0], basestring) and (field_legacy_rule[0], ) or field_legacy_rule[0]
json_field = json_id
if field_legacy_rule[-1]:
json_field = '.'.join((json_field, field_legacy_rule[-1]))
for legacy_field in legacy_fields:
if not legacy_field in self.__class__._legacy_field_matchings[inner_source_format]:
self.__class__._legacy_field_matchings[inner_source_format][legacy_field] = []
self.__class__._legacy_field_matchings[inner_source_format][legacy_field].append(json_field)
def __create_checker(self, rule):
json_id = rule.json_id[0]
checkers = []
for checker in rule.checker:
if checker.master_format:
master_format = eval(rule.master_format)
checker_function_name = checker[1]
arguments = checker[2][1:-1]
else:
master_format = ('all',)
checker_function_name = checker[0]
arguments = checker[1][1:-1]
checkers.append((master_format, checker_function_name, arguments))
self.__class__._field_definitions[json_id]['checker'] = checkers
def __create_description(self, rule):
json_id = rule.json_id[0]
self.__class__._field_definitions[json_id]['description'] = rule.description
def __create_producer(self, rule):
json_id = rule.json_id[0]
producers = dict()
for producer in rule.producer_rule:
if producer.producer_code[0][0] not in producers:
producers[producer.producer_code[0][0]] = []
producers[producer.producer_code[0][0]].append(
(eval(producer.producer_code[0][1]), eval(producer.value[0])))#FIXME: remove eval
self.__class__._field_definitions[json_id]['producer'] = producers
def __create_schema(self, rule):
json_id = rule.json_id[0]
self.__class__._field_definitions[json_id]['schema'] = rule.schema if rule.schema else {}
def __create_json_extra(self, rule):
from invenio.bibfield_utils import CFG_BIBFIELD_FUNCTIONS
json_id = rule.json_id[0]
if rule.json_ext:
self.__class__._field_definitions[json_id]['json_ext'] = \
{'loads': try_to_eval(rule.json_ext.loads.strip(), CFG_BIBFIELD_FUNCTIONS),
'dumps': try_to_eval(rule.json_ext.dumps.strip(), CFG_BIBFIELD_FUNCTIONS)}
#FIXME: it might be nice to have the decorators also extendibles
def __create_decorators_content(self, rule):
"""
Extracts from the rule all the possible decorators.
"""
depends_on = only_if = only_if_master_value = parse_first = memoize = None
if rule.depends_on:
depends_on = rule.depends_on[0]
if rule.only_if:
only_if = rule.only_if[0]
if rule.only_if_master_value:
only_if_master_value = rule.only_if_master_value[0]
if rule.parse_first:
parse_first = rule.parse_first[0]
if rule.memoize:
try:
memoize = int(rule.memoize[0][0])
except IndexError:
memoize = 300 # FIXME: Default value will be used
return (depends_on, only_if, only_if_master_value, parse_first, memoize)
def __resolve_inherit_rules(self):
"""
Iterates over all the 'inherit' fields after all the normal field
creation to avoid problem when creating this rules.
"""
def resolve_inheritance(json_id):
rule = self.__class__._field_definitions[json_id]
inherit_from_list = self.__class__._field_definitions[json_id]['inherit_from']
for inherit_json_id in inherit_from_list:
#Check if everithing is fine
if inherit_json_id == json_id:
raise BibFieldParserException("Inheritance from itself")
if inherit_json_id not in self.__class__._field_definitions:
raise BibFieldParserException("Unable to solve %s inheritance" % (inherit_json_id,))
if inherit_json_id in self.__unresolved_inheritence:
self._resolve_inheritance(inherit_json_id)
self.__unresolved_inheritence.remove(inherit_json_id)
inherit_rule = self.__class__._field_definitions[inherit_json_id]
for format in inherit_rule['rules']:
if not format in rule['rules']:
rule['rules'][format] = []
rule['rules'][format].extend(inherit_rule['rules'][format])
# rule['checker'].extend(inherit_rule['checker'])
for rule in self.__inherit_rules:
self._create_rule(rule)
#Resolve inheritance
for i in xrange(len(self.__unresolved_inheritence) - 1, -1, -1):
resolve_inheritance(self.__unresolved_inheritence[i])
del self.__unresolved_inheritence[i]
def __resolve_override_rules(self):
"""
Iterates over all the 'override' field to override the already created
fields.
"""
for rule in self.__override_rules:
self._create_rule(rule, override=True)
def __resolve_extend_rules(self):
"""
Iterates over all the 'extend' field to extend the rule definition of this
field.
"""
for rule in self.__extend_rules:
self._create_rule(rule, extend=True)
def guess_legacy_field_names(fields, master_format):
"""
Using the legacy rules written in the config file (@legacy) tries to find
the equivalent json field for one or more legacy fields.
>>> guess_legacy_fields(('100__a', '245'), 'marc')
{'100__a':['authors[0].full_name'], '245':['title']}
"""
res = {}
if isinstance(fields, six.string_types):
fields = (fields, )
for field in fields:
try:
res[field] = BibFieldParser.legacy_field_matchings()[master_format].get(field, [])
except:
res[field] = []
return res
def get_producer_rules(field, code):
"""docstring for get_producer_rules"""
rule = BibFieldParser.field_definitions()[field]
if isinstance(rule, list):
if len(rule) == 1:
# case field[n]
return [(rule[0].replace('[n]', ''), BibFieldParser.field_definitions()[rule[0]]['producer'].get(code, {}))]
else:
# case field[1], field[n]
rules = []
for new_field in rule:
rules.append((new_field.replace('[n]', '[1:]'), BibFieldParser.field_definitions()[new_field]['producer'].get(code, {})))
return rules
else:
return [(field, rule['producer'].get(code, {}))]
| gpl-2.0 |
kronenpj/python-for-android | pythonforandroid/build.py | 1 | 40354 | from __future__ import print_function
from os.path import (
abspath, join, realpath, dirname, expanduser, exists,
split, isdir
)
from os import environ
import copy
import os
import glob
import sys
import re
import sh
import shutil
import subprocess
from pythonforandroid.util import (
current_directory, ensure_dir, get_virtualenv_executable,
BuildInterruptingException
)
from pythonforandroid.logger import (info, warning, info_notify, info_main, shprint)
from pythonforandroid.archs import ArchARM, ArchARMv7_a, ArchAarch_64, Archx86, Archx86_64
from pythonforandroid.pythonpackage import get_package_name
from pythonforandroid.recipe import CythonRecipe, Recipe
from pythonforandroid.recommendations import (
check_ndk_version, check_target_api, check_ndk_api,
RECOMMENDED_NDK_API, RECOMMENDED_TARGET_API)
class Context(object):
'''A build context. If anything will be built, an instance this class
will be instantiated and used to hold all the build state.'''
env = environ.copy()
# the filepath of toolchain.py
root_dir = None
# the root dir where builds and dists will be stored
storage_dir = None
# in which bootstraps are copied for building
# and recipes are built
build_dir = None
# the Android project folder where everything ends up
dist_dir = None
# where Android libs are cached after build
# but before being placed in dists
libs_dir = None
aars_dir = None
# Whether setup.py or similar should be used if present:
use_setup_py = False
ccache = None # whether to use ccache
cython = None # the cython interpreter name
ndk_platform = None # the ndk platform directory
dist_name = None # should be deprecated in favour of self.dist.dist_name
bootstrap = None
bootstrap_build_dir = None
recipe_build_order = None # Will hold the list of all built recipes
symlink_java_src = False # If True, will symlink instead of copying during build
java_build_tool = 'auto'
@property
def packages_path(self):
'''Where packages are downloaded before being unpacked'''
return join(self.storage_dir, 'packages')
@property
def templates_dir(self):
return join(self.root_dir, 'templates')
@property
def libs_dir(self):
# Was previously hardcoded as self.build_dir/libs
dir = join(self.build_dir, 'libs_collections',
self.bootstrap.distribution.name)
ensure_dir(dir)
return dir
@property
def javaclass_dir(self):
# Was previously hardcoded as self.build_dir/java
dir = join(self.build_dir, 'javaclasses',
self.bootstrap.distribution.name)
ensure_dir(dir)
return dir
@property
def aars_dir(self):
dir = join(self.build_dir, 'aars', self.bootstrap.distribution.name)
ensure_dir(dir)
return dir
@property
def python_installs_dir(self):
dir = join(self.build_dir, 'python-installs')
ensure_dir(dir)
return dir
def get_python_install_dir(self):
dir = join(self.python_installs_dir, self.bootstrap.distribution.name)
return dir
def setup_dirs(self, storage_dir):
'''Calculates all the storage and build dirs, and makes sure
the directories exist where necessary.'''
self.storage_dir = expanduser(storage_dir)
if ' ' in self.storage_dir:
raise ValueError('storage dir path cannot contain spaces, please '
'specify a path with --storage-dir')
self.build_dir = join(self.storage_dir, 'build')
self.dist_dir = join(self.storage_dir, 'dists')
def ensure_dirs(self):
ensure_dir(self.storage_dir)
ensure_dir(self.build_dir)
ensure_dir(self.dist_dir)
ensure_dir(join(self.build_dir, 'bootstrap_builds'))
ensure_dir(join(self.build_dir, 'other_builds'))
@property
def android_api(self):
'''The Android API being targeted.'''
if self._android_api is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._android_api
@android_api.setter
def android_api(self, value):
self._android_api = value
@property
def ndk_api(self):
'''The API number compile against'''
if self._ndk_api is None:
raise ValueError('Tried to access ndk_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._ndk_api
@ndk_api.setter
def ndk_api(self, value):
self._ndk_api = value
@property
def sdk_dir(self):
'''The path to the Android SDK.'''
if self._sdk_dir is None:
raise ValueError('Tried to access sdk_dir but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._sdk_dir
@sdk_dir.setter
def sdk_dir(self, value):
self._sdk_dir = value
@property
def ndk_dir(self):
'''The path to the Android NDK.'''
if self._ndk_dir is None:
raise ValueError('Tried to access ndk_dir but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._ndk_dir
@ndk_dir.setter
def ndk_dir(self, value):
self._ndk_dir = value
def prepare_build_environment(self,
user_sdk_dir,
user_ndk_dir,
user_android_api,
user_ndk_api):
'''Checks that build dependencies exist and sets internal variables
for the Android SDK etc.
..warning:: This *must* be called before trying any build stuff
'''
self.ensure_dirs()
if self._build_env_prepared:
return
ok = True
# Work out where the Android SDK is
sdk_dir = None
if user_sdk_dir:
sdk_dir = user_sdk_dir
# This is the old P4A-specific var
if sdk_dir is None:
sdk_dir = environ.get('ANDROIDSDK', None)
# This seems used more conventionally
if sdk_dir is None:
sdk_dir = environ.get('ANDROID_HOME', None)
# Checks in the buildozer SDK dir, useful for debug tests of p4a
if sdk_dir is None:
possible_dirs = glob.glob(expanduser(join(
'~', '.buildozer', 'android', 'platform', 'android-sdk-*')))
possible_dirs = [d for d in possible_dirs if not
(d.endswith('.bz2') or d.endswith('.gz'))]
if possible_dirs:
info('Found possible SDK dirs in buildozer dir: {}'.format(
', '.join([d.split(os.sep)[-1] for d in possible_dirs])))
info('Will attempt to use SDK at {}'.format(possible_dirs[0]))
warning('This SDK lookup is intended for debug only, if you '
'use python-for-android much you should probably '
'maintain your own SDK download.')
sdk_dir = possible_dirs[0]
if sdk_dir is None:
raise BuildInterruptingException('Android SDK dir was not specified, exiting.')
self.sdk_dir = realpath(sdk_dir)
# Check what Android API we're using
android_api = None
if user_android_api:
android_api = user_android_api
info('Getting Android API version from user argument: {}'.format(android_api))
elif 'ANDROIDAPI' in environ:
android_api = environ['ANDROIDAPI']
info('Found Android API target in $ANDROIDAPI: {}'.format(android_api))
else:
info('Android API target was not set manually, using '
'the default of {}'.format(RECOMMENDED_TARGET_API))
android_api = RECOMMENDED_TARGET_API
android_api = int(android_api)
self.android_api = android_api
check_target_api(android_api, self.archs[0].arch)
if exists(join(sdk_dir, 'tools', 'bin', 'avdmanager')):
avdmanager = sh.Command(join(sdk_dir, 'tools', 'bin', 'avdmanager'))
targets = avdmanager('list', 'target').stdout.decode('utf-8').split('\n')
elif exists(join(sdk_dir, 'tools', 'android')):
android = sh.Command(join(sdk_dir, 'tools', 'android'))
targets = android('list').stdout.decode('utf-8').split('\n')
else:
raise BuildInterruptingException(
'Could not find `android` or `sdkmanager` binaries in Android SDK',
instructions='Make sure the path to the Android SDK is correct')
apis = [s for s in targets if re.match(r'^ *API level: ', s)]
apis = [re.findall(r'[0-9]+', s) for s in apis]
apis = [int(s[0]) for s in apis if s]
info('Available Android APIs are ({})'.format(
', '.join(map(str, apis))))
if android_api in apis:
info(('Requested API target {} is available, '
'continuing.').format(android_api))
else:
raise BuildInterruptingException(
('Requested API target {} is not available, install '
'it with the SDK android tool.').format(android_api))
# Find the Android NDK
# Could also use ANDROID_NDK, but doesn't look like many tools use this
ndk_dir = None
if user_ndk_dir:
ndk_dir = user_ndk_dir
info('Getting NDK dir from from user argument')
if ndk_dir is None: # The old P4A-specific dir
ndk_dir = environ.get('ANDROIDNDK', None)
if ndk_dir is not None:
info('Found NDK dir in $ANDROIDNDK: {}'.format(ndk_dir))
if ndk_dir is None: # Apparently the most common convention
ndk_dir = environ.get('NDK_HOME', None)
if ndk_dir is not None:
info('Found NDK dir in $NDK_HOME: {}'.format(ndk_dir))
if ndk_dir is None: # Another convention (with maven?)
ndk_dir = environ.get('ANDROID_NDK_HOME', None)
if ndk_dir is not None:
info('Found NDK dir in $ANDROID_NDK_HOME: {}'.format(ndk_dir))
if ndk_dir is None: # Checks in the buildozer NDK dir, useful
# # for debug tests of p4a
possible_dirs = glob.glob(expanduser(join(
'~', '.buildozer', 'android', 'platform', 'android-ndk-r*')))
if possible_dirs:
info('Found possible NDK dirs in buildozer dir: {}'.format(
', '.join([d.split(os.sep)[-1] for d in possible_dirs])))
info('Will attempt to use NDK at {}'.format(possible_dirs[0]))
warning('This NDK lookup is intended for debug only, if you '
'use python-for-android much you should probably '
'maintain your own NDK download.')
ndk_dir = possible_dirs[0]
if ndk_dir is None:
raise BuildInterruptingException('Android NDK dir was not specified')
self.ndk_dir = realpath(ndk_dir)
check_ndk_version(ndk_dir)
ndk_api = None
if user_ndk_api:
ndk_api = user_ndk_api
info('Getting NDK API version (i.e. minimum supported API) from user argument')
elif 'NDKAPI' in environ:
ndk_api = environ.get('NDKAPI', None)
info('Found Android API target in $NDKAPI')
else:
ndk_api = min(self.android_api, RECOMMENDED_NDK_API)
warning('NDK API target was not set manually, using '
'the default of {} = min(android-api={}, default ndk-api={})'.format(
ndk_api, self.android_api, RECOMMENDED_NDK_API))
ndk_api = int(ndk_api)
self.ndk_api = ndk_api
check_ndk_api(ndk_api, self.android_api)
virtualenv = get_virtualenv_executable()
if virtualenv is None:
raise IOError('Couldn\'t find a virtualenv executable, '
'you must install this to use p4a.')
self.virtualenv = virtualenv
info('Found virtualenv at {}'.format(virtualenv))
# path to some tools
self.ccache = sh.which("ccache")
if not self.ccache:
info('ccache is missing, the build will not be optimized in the '
'future.')
for cython_fn in ("cython", "cython3", "cython2", "cython-2.7"):
cython = sh.which(cython_fn)
if cython:
self.cython = cython
break
else:
raise BuildInterruptingException('No cython binary found.')
if not self.cython:
ok = False
warning("Missing requirement: cython is not installed")
# This would need to be changed if supporting multiarch APKs
arch = self.archs[0]
platform_dir = arch.platform_dir
toolchain_prefix = arch.toolchain_prefix
toolchain_version = None
self.ndk_platform = join(
self.ndk_dir,
'platforms',
'android-{}'.format(self.ndk_api),
platform_dir)
if not exists(self.ndk_platform):
warning('ndk_platform doesn\'t exist: {}'.format(
self.ndk_platform))
ok = False
py_platform = sys.platform
if py_platform in ['linux2', 'linux3']:
py_platform = 'linux'
toolchain_versions = []
toolchain_path = join(self.ndk_dir, 'toolchains')
if isdir(toolchain_path):
toolchain_contents = glob.glob('{}/{}-*'.format(toolchain_path,
toolchain_prefix))
toolchain_versions = [split(path)[-1][len(toolchain_prefix) + 1:]
for path in toolchain_contents]
else:
warning('Could not find toolchain subdirectory!')
ok = False
toolchain_versions.sort()
toolchain_versions_gcc = []
for toolchain_version in toolchain_versions:
if toolchain_version[0].isdigit():
# GCC toolchains begin with a number
toolchain_versions_gcc.append(toolchain_version)
if toolchain_versions:
info('Found the following toolchain versions: {}'.format(
toolchain_versions))
info('Picking the latest gcc toolchain, here {}'.format(
toolchain_versions_gcc[-1]))
toolchain_version = toolchain_versions_gcc[-1]
else:
warning('Could not find any toolchain for {}!'.format(
toolchain_prefix))
ok = False
self.toolchain_prefix = toolchain_prefix
self.toolchain_version = toolchain_version
# Modify the path so that sh finds modules appropriately
environ['PATH'] = (
'{ndk_dir}/toolchains/{toolchain_prefix}-{toolchain_version}/'
'prebuilt/{py_platform}-x86/bin/:{ndk_dir}/toolchains/'
'{toolchain_prefix}-{toolchain_version}/prebuilt/'
'{py_platform}-x86_64/bin/:{ndk_dir}:{sdk_dir}/'
'tools:{path}').format(
sdk_dir=self.sdk_dir, ndk_dir=self.ndk_dir,
toolchain_prefix=toolchain_prefix,
toolchain_version=toolchain_version,
py_platform=py_platform, path=environ.get('PATH'))
for executable in ("pkg-config", "autoconf", "automake", "libtoolize",
"tar", "bzip2", "unzip", "make", "gcc", "g++"):
if not sh.which(executable):
warning("Missing executable: {} is not installed".format(
executable))
if not ok:
raise BuildInterruptingException(
'python-for-android cannot continue due to the missing executables above')
def __init__(self):
super(Context, self).__init__()
self.include_dirs = []
self._build_env_prepared = False
self._sdk_dir = None
self._ndk_dir = None
self._android_api = None
self._ndk_api = None
self.ndk = None
self.toolchain_prefix = None
self.toolchain_version = None
self.local_recipes = None
self.copy_libs = False
# this list should contain all Archs, it is pruned later
self.archs = (
ArchARM(self),
ArchARMv7_a(self),
Archx86(self),
Archx86_64(self),
ArchAarch_64(self),
)
self.root_dir = realpath(dirname(__file__))
# remove the most obvious flags that can break the compilation
self.env.pop("LDFLAGS", None)
self.env.pop("ARCHFLAGS", None)
self.env.pop("CFLAGS", None)
self.python_recipe = None # Set by TargetPythonRecipe
def set_archs(self, arch_names):
all_archs = self.archs
new_archs = set()
for name in arch_names:
matching = [arch for arch in all_archs if arch.arch == name]
for match in matching:
new_archs.add(match)
self.archs = list(new_archs)
if not self.archs:
raise BuildInterruptingException('Asked to compile for no Archs, so failing.')
info('Will compile for the following archs: {}'.format(
', '.join([arch.arch for arch in self.archs])))
def prepare_bootstrap(self, bs):
bs.ctx = self
self.bootstrap = bs
self.bootstrap.prepare_build_dir()
self.bootstrap_build_dir = self.bootstrap.build_dir
def prepare_dist(self, name):
self.dist_name = name
self.bootstrap.prepare_dist_dir(self.dist_name)
def get_site_packages_dir(self, arch=None):
'''Returns the location of site-packages in the python-install build
dir.
'''
return self.get_python_install_dir()
def get_libs_dir(self, arch):
'''The libs dir for a given arch.'''
ensure_dir(join(self.libs_dir, arch))
return join(self.libs_dir, arch)
def has_lib(self, arch, lib):
return exists(join(self.get_libs_dir(arch), lib))
def has_package(self, name, arch=None):
# If this is a file path, it'll need special handling:
if (name.find("/") >= 0 or name.find("\\") >= 0) and \
name.find("://") < 0: # (:// would indicate an url)
if not os.path.exists(name):
# Non-existing dir, cannot look this up.
return False
try:
name = get_package_name(os.path.abspath(name))
except ValueError:
# Failed to look up any meaningful name.
return False
# Try to look up recipe by name:
try:
recipe = Recipe.get_recipe(name, self)
except ValueError:
pass
else:
name = getattr(recipe, 'site_packages_name', None) or name
name = name.replace('.', '/')
site_packages_dir = self.get_site_packages_dir(arch)
return (exists(join(site_packages_dir, name)) or
exists(join(site_packages_dir, name + '.py')) or
exists(join(site_packages_dir, name + '.pyc')) or
exists(join(site_packages_dir, name + '.pyo')) or
exists(join(site_packages_dir, name + '.so')) or
glob.glob(join(site_packages_dir, name + '-*.egg')))
def not_has_package(self, name, arch=None):
return not self.has_package(name, arch)
def build_recipes(build_order, python_modules, ctx, project_dir,
ignore_project_setup_py=False
):
# Put recipes in correct build order
info_notify("Recipe build order is {}".format(build_order))
if python_modules:
python_modules = sorted(set(python_modules))
info_notify(
('The requirements ({}) were not found as recipes, they will be '
'installed with pip.').format(', '.join(python_modules)))
recipes = [Recipe.get_recipe(name, ctx) for name in build_order]
# download is arch independent
info_main('# Downloading recipes ')
for recipe in recipes:
recipe.download_if_necessary()
for arch in ctx.archs:
info_main('# Building all recipes for arch {}'.format(arch.arch))
info_main('# Unpacking recipes')
for recipe in recipes:
ensure_dir(recipe.get_build_container_dir(arch.arch))
recipe.prepare_build_dir(arch.arch)
info_main('# Prebuilding recipes')
# 2) prebuild packages
for recipe in recipes:
info_main('Prebuilding {} for {}'.format(recipe.name, arch.arch))
recipe.prebuild_arch(arch)
recipe.apply_patches(arch)
# 3) build packages
info_main('# Building recipes')
for recipe in recipes:
info_main('Building {} for {}'.format(recipe.name, arch.arch))
if recipe.should_build(arch):
recipe.build_arch(arch)
else:
info('{} said it is already built, skipping'
.format(recipe.name))
# 4) biglink everything
info_main('# Biglinking object files')
if not ctx.python_recipe or not ctx.python_recipe.from_crystax:
biglink(ctx, arch)
else:
info('NDK is crystax, skipping biglink (will this work?)')
# 5) postbuild packages
info_main('# Postbuilding recipes')
for recipe in recipes:
info_main('Postbuilding {} for {}'.format(recipe.name, arch.arch))
recipe.postbuild_arch(arch)
info_main('# Installing pure Python modules')
run_pymodules_install(
ctx, python_modules, project_dir,
ignore_setup_py=ignore_project_setup_py
)
return
def project_has_setup_py(project_dir):
if project_dir is not None and \
(os.path.exists(os.path.join(project_dir,
"setup.py")) or
os.path.exists(os.path.join(project_dir,
"pyproject.toml"))
):
return True
return False
def run_setuppy_install(ctx, project_dir, env=None):
if env is None:
env = dict()
with current_directory(project_dir):
info('got setup.py or similar, running project install. ' +
'(disable this behavior with --ignore-setup-py)')
# Compute & output the constraints we will use:
info('Contents that will be used for constraints.txt:')
constraints = subprocess.check_output([
join(
ctx.build_dir, "venv", "bin", "pip"
),
"freeze"
], env=copy.copy(env))
try:
constraints = constraints.decode("utf-8", "replace")
except AttributeError:
pass
info(constraints)
# Make sure all packages found are fixed in version
# by writing a constraint file, to avoid recipes being
# upgraded & reinstalled:
with open('._tmp_p4a_recipe_constraints.txt', 'wb') as fileh:
fileh.write(constraints.encode("utf-8", "replace"))
try:
info('Populating venv\'s site-packages with '
'ctx.get_site_packages_dir()...')
# Copy dist contents into site-packages for discovery.
# Why this is needed:
# --target is somewhat evil and messes with discovery of
# packages in PYTHONPATH if that also includes the target
# folder. So we need to use the regular virtualenv
# site-packages folder instead.
# Reference:
# https://github.com/pypa/pip/issues/6223
ctx_site_packages_dir = os.path.normpath(
os.path.abspath(ctx.get_site_packages_dir())
)
venv_site_packages_dir = os.path.normpath(os.path.join(
ctx.build_dir, "venv", "lib", [
f for f in os.listdir(os.path.join(
ctx.build_dir, "venv", "lib"
)) if f.startswith("python")
][0], "site-packages"
))
copied_over_contents = []
for f in os.listdir(ctx_site_packages_dir):
full_path = os.path.join(ctx_site_packages_dir, f)
if not os.path.exists(os.path.join(
venv_site_packages_dir, f
)):
if os.path.isdir(full_path):
shutil.copytree(full_path, os.path.join(
venv_site_packages_dir, f
))
else:
shutil.copy2(full_path, os.path.join(
venv_site_packages_dir, f
))
copied_over_contents.append(f)
# Get listing of virtualenv's site-packages, to see the
# newly added things afterwards & copy them back into
# the distribution folder / build context site-packages:
previous_venv_contents = os.listdir(
venv_site_packages_dir
)
# Actually run setup.py:
info('Launching package install...')
shprint(sh.bash, '-c', (
"'" + join(
ctx.build_dir, "venv", "bin", "pip"
).replace("'", "'\"'\"'") + "' " +
"install -c ._tmp_p4a_recipe_constraints.txt -v ."
).format(ctx.get_site_packages_dir().
replace("'", "'\"'\"'")),
_env=copy.copy(env))
# Go over all new additions and copy them back:
info('Copying additions resulting from setup.py back '
'into ctx.get_site_packages_dir()...')
new_venv_additions = []
for f in (set(os.listdir(venv_site_packages_dir)) -
set(previous_venv_contents)):
new_venv_additions.append(f)
full_path = os.path.join(venv_site_packages_dir, f)
if os.path.isdir(full_path):
shutil.copytree(full_path, os.path.join(
ctx_site_packages_dir, f
))
else:
shutil.copy2(full_path, os.path.join(
ctx_site_packages_dir, f
))
# Undo all the changes we did to the venv-site packages:
info('Reverting additions to '
'virtualenv\'s site-packages...')
for f in set(copied_over_contents + new_venv_additions):
full_path = os.path.join(venv_site_packages_dir, f)
if os.path.isdir(full_path):
shutil.rmtree(full_path)
else:
os.remove(full_path)
finally:
os.remove("._tmp_p4a_recipe_constraints.txt")
def run_pymodules_install(ctx, modules, project_dir=None,
ignore_setup_py=False):
""" This function will take care of all non-recipe things, by:
1. Processing them from --requirements (the modules argument)
and installing them
2. Installing the user project/app itself via setup.py if
ignore_setup_py=True
"""
info('*** PYTHON PACKAGE / PROJECT INSTALL STAGE ***')
modules = list(filter(ctx.not_has_package, modules))
# We change current working directory later, so this has to be an absolute
# path or `None` in case that we didn't supply the `project_dir` via kwargs
project_dir = abspath(project_dir) if project_dir else None
# Bail out if no python deps and no setup.py to process:
if not modules and (
ignore_setup_py or
project_dir is None or
not project_has_setup_py(project_dir)
):
info('No Python modules and no setup.py to process, skipping')
return
# Output messages about what we're going to do:
if modules:
info('The requirements ({}) don\'t have recipes, attempting to '
'install them with pip'.format(', '.join(modules)))
info('If this fails, it may mean that the module has compiled '
'components and needs a recipe.')
if project_dir is not None and \
project_has_setup_py(project_dir) and not ignore_setup_py:
info('Will process project install, if it fails then the '
'project may not be compatible for Android install.')
venv = sh.Command(ctx.virtualenv)
with current_directory(join(ctx.build_dir)):
shprint(venv,
'--python=python{}'.format(
ctx.python_recipe.major_minor_version_string.
partition(".")[0]
),
'venv'
)
# Prepare base environment and upgrade pip:
base_env = copy.copy(os.environ)
base_env["PYTHONPATH"] = ctx.get_site_packages_dir()
info('Upgrade pip to latest version')
shprint(sh.bash, '-c', (
"source venv/bin/activate && pip install -U pip"
), _env=copy.copy(base_env))
# Install Cython in case modules need it to build:
info('Install Cython in case one of the modules needs it to build')
shprint(sh.bash, '-c', (
"venv/bin/pip install Cython"
), _env=copy.copy(base_env))
# Get environment variables for build (with CC/compiler set):
standard_recipe = CythonRecipe()
standard_recipe.ctx = ctx
# (note: following line enables explicit -lpython... linker options)
standard_recipe.call_hostpython_via_targetpython = False
recipe_env = standard_recipe.get_recipe_env(ctx.archs[0])
env = copy.copy(base_env)
env.update(recipe_env)
# Make sure our build package dir is available, and the virtualenv
# site packages come FIRST (so the proper pip version is used):
env["PYTHONPATH"] += ":" + ctx.get_site_packages_dir()
env["PYTHONPATH"] = os.path.abspath(join(
ctx.build_dir, "venv", "lib",
"python" + ctx.python_recipe.major_minor_version_string,
"site-packages")) + ":" + env["PYTHONPATH"]
# Install the manually specified requirements first:
if not modules:
info('There are no Python modules to install, skipping')
else:
info('Creating a requirements.txt file for the Python modules')
with open('requirements.txt', 'w') as fileh:
for module in modules:
key = 'VERSION_' + module
if key in environ:
line = '{}=={}\n'.format(module, environ[key])
else:
line = '{}\n'.format(module)
fileh.write(line)
info('Installing Python modules with pip')
info('IF THIS FAILS, THE MODULES MAY NEED A RECIPE. '
'A reason for this is often modules compiling '
'native code that is unaware of Android cross-compilation '
'and does not work without additional '
'changes / workarounds.')
shprint(sh.bash, '-c', (
"venv/bin/pip " +
"install -v --target '{0}' --no-deps -r requirements.txt"
).format(ctx.get_site_packages_dir().replace("'", "'\"'\"'")),
_env=copy.copy(env))
# Afterwards, run setup.py if present:
if project_dir is not None and (
project_has_setup_py(project_dir) and not ignore_setup_py
):
run_setuppy_install(ctx, project_dir, env)
elif not ignore_setup_py:
info("No setup.py found in project directory: " +
str(project_dir)
)
# Strip object files after potential Cython or native code builds:
standard_recipe.strip_object_files(ctx.archs[0], env,
build_dir=ctx.build_dir)
def biglink(ctx, arch):
# First, collate object files from each recipe
info('Collating object files from each recipe')
obj_dir = join(ctx.bootstrap.build_dir, 'collated_objects')
ensure_dir(obj_dir)
recipes = [Recipe.get_recipe(name, ctx) for name in ctx.recipe_build_order]
for recipe in recipes:
recipe_obj_dir = join(recipe.get_build_container_dir(arch.arch),
'objects_{}'.format(recipe.name))
if not exists(recipe_obj_dir):
info('{} recipe has no biglinkable files dir, skipping'
.format(recipe.name))
continue
files = glob.glob(join(recipe_obj_dir, '*'))
if not len(files):
info('{} recipe has no biglinkable files, skipping'
.format(recipe.name))
continue
info('{} recipe has object files, copying'.format(recipe.name))
files.append(obj_dir)
shprint(sh.cp, '-r', *files)
env = arch.get_env()
env['LDFLAGS'] = env['LDFLAGS'] + ' -L{}'.format(
join(ctx.bootstrap.build_dir, 'obj', 'local', arch.arch))
if not len(glob.glob(join(obj_dir, '*'))):
info('There seem to be no libraries to biglink, skipping.')
return
info('Biglinking')
info('target {}'.format(join(ctx.get_libs_dir(arch.arch),
'libpymodules.so')))
do_biglink = copylibs_function if ctx.copy_libs else biglink_function
# Move to the directory containing crtstart_so.o and crtend_so.o
# This is necessary with newer NDKs? A gcc bug?
with current_directory(join(ctx.ndk_platform, 'usr', 'lib')):
do_biglink(
join(ctx.get_libs_dir(arch.arch), 'libpymodules.so'),
obj_dir.split(' '),
extra_link_dirs=[join(ctx.bootstrap.build_dir,
'obj', 'local', arch.arch),
os.path.abspath('.')],
env=env)
def biglink_function(soname, objs_paths, extra_link_dirs=[], env=None):
print('objs_paths are', objs_paths)
sofiles = []
for directory in objs_paths:
for fn in os.listdir(directory):
fn = os.path.join(directory, fn)
if not fn.endswith(".so.o"):
continue
if not os.path.exists(fn[:-2] + ".libs"):
continue
sofiles.append(fn[:-2])
# The raw argument list.
args = []
for fn in sofiles:
afn = fn + ".o"
libsfn = fn + ".libs"
args.append(afn)
with open(libsfn) as fd:
data = fd.read()
args.extend(data.split(" "))
unique_args = []
while args:
a = args.pop()
if a in ('-L', ):
continue
if a not in unique_args:
unique_args.insert(0, a)
for dir in extra_link_dirs:
link = '-L{}'.format(dir)
if link not in unique_args:
unique_args.append(link)
cc_name = env['CC']
cc = sh.Command(cc_name.split()[0])
cc = cc.bake(*cc_name.split()[1:])
shprint(cc, '-shared', '-O3', '-o', soname, *unique_args, _env=env)
def copylibs_function(soname, objs_paths, extra_link_dirs=[], env=None):
print('objs_paths are', objs_paths)
re_needso = re.compile(r'^.*\(NEEDED\)\s+Shared library: \[lib(.*)\.so\]\s*$')
blacklist_libs = (
'c',
'stdc++',
'dl',
'python2.7',
'sdl',
'sdl_image',
'sdl_ttf',
'z',
'm',
'GLESv2',
'jpeg',
'png',
'log',
# bootstrap takes care of sdl2 libs (if applicable)
'SDL2',
'SDL2_ttf',
'SDL2_image',
'SDL2_mixer',
)
found_libs = []
sofiles = []
if env and 'READELF' in env:
readelf = env['READELF']
elif 'READELF' in os.environ:
readelf = os.environ['READELF']
else:
readelf = sh.which('readelf').strip()
readelf = sh.Command(readelf).bake('-d')
dest = dirname(soname)
for directory in objs_paths:
for fn in os.listdir(directory):
fn = join(directory, fn)
if not fn.endswith('.libs'):
continue
dirfn = fn[:-1] + 'dirs'
if not exists(dirfn):
continue
with open(fn) as f:
libs = f.read().strip().split(' ')
needed_libs = [lib for lib in libs
if lib and
lib not in blacklist_libs and
lib not in found_libs]
while needed_libs:
print('need libs:\n\t' + '\n\t'.join(needed_libs))
start_needed_libs = needed_libs[:]
found_sofiles = []
with open(dirfn) as f:
libdirs = f.read().split()
for libdir in libdirs:
if not needed_libs:
break
if libdir == dest:
# don't need to copy from dest to dest!
continue
libdir = libdir.strip()
print('scanning', libdir)
for lib in needed_libs[:]:
if lib in found_libs:
continue
if lib.endswith('.a'):
needed_libs.remove(lib)
found_libs.append(lib)
continue
lib_a = 'lib' + lib + '.a'
libpath_a = join(libdir, lib_a)
lib_so = 'lib' + lib + '.so'
libpath_so = join(libdir, lib_so)
plain_so = lib + '.so'
plainpath_so = join(libdir, plain_so)
sopath = None
if exists(libpath_so):
sopath = libpath_so
elif exists(plainpath_so):
sopath = plainpath_so
if sopath:
print('found', lib, 'in', libdir)
found_sofiles.append(sopath)
needed_libs.remove(lib)
found_libs.append(lib)
continue
if exists(libpath_a):
print('found', lib, '(static) in', libdir)
needed_libs.remove(lib)
found_libs.append(lib)
continue
for sofile in found_sofiles:
print('scanning dependencies for', sofile)
out = readelf(sofile)
for line in out.splitlines():
needso = re_needso.match(line)
if needso:
lib = needso.group(1)
if (lib not in needed_libs
and lib not in found_libs
and lib not in blacklist_libs):
needed_libs.append(needso.group(1))
sofiles += found_sofiles
if needed_libs == start_needed_libs:
raise RuntimeError(
'Failed to locate needed libraries!\n\t' +
'\n\t'.join(needed_libs))
print('Copying libraries')
for lib in sofiles:
shprint(sh.cp, lib, dest)
| mit |
llooker/python_sdk | lookerapi/models/lookml_model_explore_access_filter.py | 1 | 5420 | # coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class LookmlModelExploreAccessFilter(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, field=None, user_attribute=None):
"""
LookmlModelExploreAccessFilter - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'field': 'str',
'user_attribute': 'str'
}
self.attribute_map = {
'field': 'field',
'user_attribute': 'user_attribute'
}
self._field = field
self._user_attribute = user_attribute
@property
def field(self):
"""
Gets the field of this LookmlModelExploreAccessFilter.
Field to be filtered
:return: The field of this LookmlModelExploreAccessFilter.
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""
Sets the field of this LookmlModelExploreAccessFilter.
Field to be filtered
:param field: The field of this LookmlModelExploreAccessFilter.
:type: str
"""
self._field = field
@property
def user_attribute(self):
"""
Gets the user_attribute of this LookmlModelExploreAccessFilter.
User attribute name
:return: The user_attribute of this LookmlModelExploreAccessFilter.
:rtype: str
"""
return self._user_attribute
@user_attribute.setter
def user_attribute(self, user_attribute):
"""
Sets the user_attribute of this LookmlModelExploreAccessFilter.
User attribute name
:param user_attribute: The user_attribute of this LookmlModelExploreAccessFilter.
:type: str
"""
self._user_attribute = user_attribute
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, LookmlModelExploreAccessFilter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| mit |
guaycuru/gmvault | src/gmv/gmv_cmd.py | 2 | 38076 | # -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import socket
import sys
import datetime
import os
import signal
import traceback
import argparse
import imaplib
import gmv.log_utils as log_utils
import gmv.gmvault_utils as gmvault_utils
import gmv.gmvault as gmvault
import gmv.gmvault_export as gmvault_export
import gmv.collections_utils as collections_utils
from gmv.cmdline_utils import CmdLineParser
from gmv.credential_utils import CredentialHelper
GMVAULT_VERSION = gmvault_utils.GMVAULT_VERSION
GLOBAL_HELP_EPILOGUE = """Examples:
a) Get help for each of the individual commands
#> gmvault sync -h
#> gmvault restore --help
#> gmvault check -h
#> gmvault export -h
"""
REST_HELP_EPILOGUE = """Examples:
a) Complete restore of your gmail account (backed up in ~/gmvault-db) into anewfoo.bar@gmail.com
#> gmvault restore -d ~/gmvault-db anewfoo.bar@gmail.com
b) Quick restore (restore only the last 2 months to make regular updates) of your gmail account into anewfoo.bar@gmail.com
#> gmvault restore --type quick -d ~/gmvault-db foo.bar@gmail.com
c) Restart a restore after a previous error (Gmail can cut the connection if it is too long)
#> gmvault restore -d ~/gmvault-db anewfoo.bar@gmail.com --resume
d) Apply a label to all restored emails
#> gmvault restore --apply-label "20120422-gmvault" -d ~/gmvault-db anewfoo.bar@gmail.com
"""
SYNC_HELP_EPILOGUE = """Examples:
a) Full synchronisation with email and oauth login in ./gmvault-db
#> gmvault sync foo.bar@gmail.com
b) Quick daily synchronisation (only the last 2 months are scanned)
#> gmvault sync --type quick foo.bar@gmail.com
c) Resume Full synchronisation from where it failed to not go through your mailbox again
#> gmvault sync foo.bar@gmail.com --resume
d) Encrypt stored emails to save them safely anywhere
#> gmvault sync foo.bar@gmail.com --encrypt
d) Custom synchronisation with an IMAP request for advance users
#> gmvault sync --type custom --imap-req "Since 1-Nov-2011 Before 10-Nov-2011" foo.bar@gmail.com
e) Custom synchronisation with an Gmail request for advance users.
Get all emails with label work and sent by foo.
#> gmvault sync --type custom --gmail-req "in:work from:foo" foo.bar@gmail.com
"""
EXPORT_HELP_EPILOGUE = """Warning: Experimental Functionality requiring more testing.
Examples:
a) Export default gmvault-db ($HOME/gmvault-db or %HOME$/gmvault-db) as a maildir mailbox.
#> gmvault export ~/my-mailbox-dir
b) Export a gmvault-db as a mbox mailbox (compliant with Thunderbird).
#> gmvault export -d /tmp/gmvault-db /tmp/a-mbox-dir
c) Export only a limited set of labels from the default gmvault-db as a mbox mailbox (compliant with Thunderbird).
#> gmvault export -l "label1" -l "TopLabel/LabelLev1" /tmp/a-mbox-dir
d) Use one of the export type dedicated to a specific tool (dovecot or offlineIMAP)
#> gmvault export -t dovecot /tmp/a-dovecot-dir
"""
LOG = log_utils.LoggerFactory.get_logger('gmv')
class NotSeenAction(argparse.Action): #pylint:disable=R0903,w0232
"""
to differenciate between a seen and non seen command
"""
def __call__(self, parser, namespace, values, option_string=None):
if values:
setattr(namespace, self.dest, 'empty')
else:
setattr(namespace, self.dest, values)
class GMVaultLauncher(object):
"""
GMVault launcher handling the command parsing
"""
SYNC_TYPES = ['full', 'quick', 'custom']
RESTORE_TYPES = ['full', 'quick']
CHECK_TYPES = ['full']
EXPORT_TYPES = collections_utils.OrderedDict([
('offlineimap', gmvault_export.OfflineIMAP),
('dovecot', gmvault_export.Dovecot),
('maildir', gmvault_export.OfflineIMAP),
('mbox', gmvault_export.MBox)])
EXPORT_TYPE_NAMES = ", ".join(EXPORT_TYPES)
DEFAULT_GMVAULT_DB = "%s/gmvault-db" % (os.getenv("HOME", "."))
def __init__(self):
""" constructor """
super(GMVaultLauncher, self).__init__()
@gmvault_utils.memoized
def _create_parser(self): #pylint: disable=R0915
"""
Create the argument parser
Return the created parser
"""
parser = CmdLineParser()
parser.epilogue = GLOBAL_HELP_EPILOGUE
parser.add_argument("-v", '--version', action='version', version='Gmvault v%s' % (GMVAULT_VERSION))
subparsers = parser.add_subparsers(title='subcommands', help='valid subcommands.')
# A sync command
sync_parser = subparsers.add_parser('sync', \
help='synchronize with a given gmail account.')
#email argument can be optional so it should be an option
sync_parser.add_argument('email', \
action='store', default='empty_$_email', help='email to sync with.')
# sync typ
sync_parser.add_argument('-t', '-type', '--type', \
action='store', dest='type', \
default='full', help='type of synchronisation: full|quick|custom. (default: full)')
sync_parser.add_argument("-d", "--db-dir", \
action='store', help="Database root directory. (default: $HOME/gmvault-db)",\
dest="db_dir", default= self.DEFAULT_GMVAULT_DB)
# for both when seen add const empty otherwise not_seen
# this allow to distinguish between an empty value and a non seen option
sync_parser.add_argument("-y", "--oauth2", \
help="use oauth for authentication. (default recommended method)",\
action='store_const', dest="oauth2_token", const='empty', default='not_seen')
sync_parser.add_argument("-p", "--passwd", \
help="use interactive password authentication. (not recommended)",
action= 'store_const' , dest="passwd", const='empty', default='not_seen')
sync_parser.add_argument("--renew-oauth2-tok", \
help="renew the stored oauth token (two legged or normal) via an interactive authentication session.",
action= 'store_const' , dest="oauth2_token", const='renew')
sync_parser.add_argument("--renew-passwd", \
help="renew the stored password via an interactive authentication session. (not recommended)",
action= 'store_const' , dest="passwd", const='renew')
sync_parser.add_argument("--store-passwd", \
help="use interactive password authentication, encrypt and store the password. (not recommended)",
action= 'store_const' , dest="passwd", const='store')
sync_parser.add_argument("-r", "--imap-req", metavar = "REQ", \
help="Imap request to restrict sync.",\
dest="imap_request", default=None)
sync_parser.add_argument("-g", "--gmail-req", metavar = "REQ", \
help="Gmail search request to restrict sync as defined in"\
"https://support.google.com/mail/bin/answer.py?hl=en&answer=7190",\
dest="gmail_request", default=None)
# activate the resume mode --restart is deprecated
sync_parser.add_argument("--resume", "--restart", \
action='store_true', dest='restart', \
default=False, help= 'Resume the sync action from the last saved gmail id.')
# activate the resume mode --restart is deprecated
sync_parser.add_argument("--emails-only", \
action='store_true', dest='only_emails', \
default=False, help= 'Only sync emails.')
# activate the resume mode --restart is deprecated
sync_parser.add_argument("--chats-only", \
action='store_true', dest='only_chats', \
default=False, help= 'Only sync chats.')
sync_parser.add_argument("-e", "--encrypt", \
help="encrypt stored email messages in the database.",\
action='store_true',dest="encrypt", default=False)
sync_parser.add_argument("-c", "--check-db", metavar = "VAL", \
help="enable/disable the removal from the gmvault db of the emails "\
"that have been deleted from the given gmail account. VAL = yes or no.",\
dest="db_cleaning", default=None)
sync_parser.add_argument("-m", "--multiple-db-owner", \
help="Allow the email database to be synchronized with emails from multiple accounts.",\
action='store_true',dest="allow_mult_owners", default=False)
# activate the restart mode
sync_parser.add_argument("--no-compression", \
action='store_false', dest='compression', \
default=True, help= 'disable email storage compression (gzip).')
sync_parser.add_argument("--server", metavar = "HOSTNAME", \
action='store', help="Gmail imap server hostname. (default: imap.gmail.com)",\
dest="host", default="imap.gmail.com")
sync_parser.add_argument("--port", metavar = "PORT", \
action='store', help="Gmail imap server port. (default: 993)",\
dest="port", default=993)
sync_parser.add_argument("--debug", "-debug", \
action='store_true', help="Activate debugging info",\
dest="debug", default=False)
sync_parser.set_defaults(verb='sync')
sync_parser.epilogue = SYNC_HELP_EPILOGUE
# restore command
rest_parser = subparsers.add_parser('restore', \
help='restore gmvault-db to a given email account.')
#email argument can be optional so it should be an option
rest_parser.add_argument('email', \
action='store', default='empty_$_email', help='email account to restore.')
# restore typ
rest_parser.add_argument('-t', '-type', '--type', \
action='store', dest='type', \
default='full', help='type of restoration: full|quick. (default: full)')
# add a label
rest_parser.add_argument('-a', '--apply-label', \
action='store', dest='apply_label', \
default=None, help='Apply a label to restored emails')
# activate the resume mode --restart is deprecated
rest_parser.add_argument("--resume", "--restart", \
action='store_true', dest='restart', \
default=False, help= 'Restart from the last saved gmail id.')
# activate the resume mode --restart is deprecated
rest_parser.add_argument("--emails-only", \
action='store_true', dest='only_emails', \
default=False, help= 'Only sync emails.')
# activate the resume mode --restart is deprecated
rest_parser.add_argument("--chats-only", \
action='store_true', dest='only_chats', \
default=False, help= 'Only sync chats.')
rest_parser.add_argument("-d", "--db-dir", \
action='store', help="Database root directory. (default: $HOME/gmvault-db)",\
dest="db_dir", default= self.DEFAULT_GMVAULT_DB)
# for both when seen add const empty otherwise not_seen
# this allow to distinguish between an empty value and a non seen option
rest_parser.add_argument("-y", "--oauth2", \
help="use oauth for authentication. (default recommended method)",\
action='store_const', dest="oauth2_token", const='empty', default='not_seen')
rest_parser.add_argument("-p", "--passwd", \
help="use interactive password authentication. (not recommended)",
action= 'store_const' , dest="passwd", const='empty', default='not_seen')
rest_parser.add_argument("--renew-oauth2-tok", \
help="renew the stored oauth token (two legged or normal) via an interactive authentication session.",
action= 'store_const' , dest="oauth2_token", const='renew')
rest_parser.add_argument("--server", metavar = "HOSTNAME", \
action='store', help="Gmail imap server hostname. (default: imap.gmail.com)",\
dest="host", default="imap.gmail.com")
rest_parser.add_argument("--port", metavar = "PORT", \
action='store', help="Gmail imap server port. (default: 993)",\
dest="port", default=993)
rest_parser.add_argument("--debug", "-debug", \
action='store_true', help="Activate debugging info",\
dest="debug", default=False)
rest_parser.set_defaults(verb='restore')
rest_parser.epilogue = REST_HELP_EPILOGUE
# check_db command
check_parser = subparsers.add_parser('check', \
help='check and clean the gmvault-db disk database.')
#email argument
check_parser.add_argument('email', \
action='store', default='empty_$_email', help='gmail account against which to check.')
check_parser.add_argument("-d", "--db-dir", \
action='store', help="Database root directory. (default: $HOME/gmvault-db)",\
dest="db_dir", default= self.DEFAULT_GMVAULT_DB)
# for both when seen add const empty otherwise not_seen
# this allow to distinguish between an empty value and a non seen option
check_parser.add_argument("-y", "--oauth2", \
help="use oauth for authentication. (default recommended method)",\
action='store_const', dest="oauth2_token", const='empty', default='not_seen')
check_parser.add_argument("-p", "--passwd", \
help="use interactive password authentication. (not recommended)",
action= 'store_const' , dest="passwd", const='empty', default='not_seen')
check_parser.add_argument("--renew-oauth2-tok", \
help="renew the stored oauth token (two legged or normal) via an interactive authentication session.",
action= 'store_const' , dest="oauth2_token", const='renew')
check_parser.add_argument("--server", metavar = "HOSTNAME", \
action='store', help="Gmail imap server hostname. (default: imap.gmail.com)",\
dest="host", default="imap.gmail.com")
check_parser.add_argument("--port", metavar = "PORT", \
action='store', help="Gmail imap server port. (default: 993)",\
dest="port", default=993)
check_parser.add_argument("--debug", "-debug", \
action='store_true', help="Activate debugging info",\
dest="debug", default=False)
check_parser.set_defaults(verb='check')
# export command
export_parser = subparsers.add_parser('export', \
help='Export the gmvault-db database to another format.')
export_parser.add_argument('output_dir', \
action='store', help='destination directory to export to.')
export_parser.add_argument("-d", "--db-dir", \
action='store', help="Database root directory. (default: $HOME/gmvault-db)",\
dest="db_dir", default= self.DEFAULT_GMVAULT_DB)
export_parser.add_argument('-t', '-type', '--type', \
action='store', dest='type', \
default='mbox', help='type of export: %s. (default: mbox)' % self.EXPORT_TYPE_NAMES)
export_parser.add_argument('-l', '--label', \
action='append', dest='label', \
default=None,
help='specify a label to export')
export_parser.add_argument("--debug", "-debug", \
action='store_true', help="Activate debugging info",\
dest="debug", default=False)
export_parser.set_defaults(verb='export')
export_parser.epilogue = EXPORT_HELP_EPILOGUE
return parser
@classmethod
def _parse_common_args(cls, options, parser, parsed_args, list_of_types = []): #pylint:disable=W0102
"""
Parse the common arguments for sync and restore
"""
#add email
parsed_args['email'] = options.email
parsed_args['debug'] = options.debug
parsed_args['restart'] = options.restart
#user entered both authentication methods
if options.passwd == 'empty' and (options.oauth2_token == 'empty'):
parser.error('You have to use one authentication method. '\
'Please choose between OAuth2 and password (recommend OAuth2).')
# user entered no authentication methods => go to default oauth
if options.passwd == 'not_seen' and options.oauth2_token == 'not_seen':
#default to xoauth
options.oauth2_token = 'empty'
# add passwd
parsed_args['passwd'] = options.passwd
# add oauth2 tok
if options.oauth2_token == 'empty':
parsed_args['oauth2'] = options.oauth2_token
elif options.oauth2_token == 'renew':
parsed_args['oauth2'] = 'renew'
#add ops type
if options.type:
tempo_list = ['auto']
tempo_list.extend(list_of_types)
if options.type.lower() in tempo_list:
parsed_args['type'] = options.type.lower()
else:
parser.error('Unknown type for command %s. The type should be one of %s' \
% (parsed_args['command'], list_of_types))
#add db_dir
parsed_args['db-dir'] = options.db_dir
LOG.critical("Use gmvault-db located in %s.\n" % (parsed_args['db-dir']))
# add host
parsed_args['host'] = options.host
#convert to int if necessary
port_type = type(options.port)
try:
if port_type == type('s') or port_type == type("s"):
port = int(options.port)
else:
port = options.port
except Exception, _: #pylint:disable=W0703
parser.error("--port option %s is not a number. Please check the port value" % (port))
# add port
parsed_args['port'] = port
return parsed_args
def parse_args(self): #pylint: disable=R0912
""" Parse command line arguments
:returns: a dict that contains the arguments
:except Exception Error
"""
parser = self._create_parser()
options = parser.parse_args()
LOG.debug("Namespace = %s\n" % (options))
parsed_args = { }
parsed_args['command'] = options.verb
if parsed_args.get('command', '') == 'sync':
# parse common arguments for sync and restore
self._parse_common_args(options, parser, parsed_args, self.SYNC_TYPES)
# handle the search requests (IMAP or GMAIL dialect)
if options.imap_request and options.gmail_request:
parser.error('Please use only one search request type. You can use --imap-req or --gmail-req.')
elif not options.imap_request and not options.gmail_request:
LOG.debug("No search request type passed: Get everything.")
parsed_args['request'] = {'type': 'imap', 'req':'ALL'}
elif options.gmail_request and not options.imap_request:
parsed_args['request'] = { 'type': 'gmail', 'req' : self._clean_imap_or_gm_request(options.gmail_request)}
else:
parsed_args['request'] = { 'type':'imap', 'req' : self._clean_imap_or_gm_request(options.imap_request)}
# handle emails or chats only
if options.only_emails and options.only_chats:
parser.error("--emails-only and --chats-only cannot be used together. Please choose one.")
parsed_args['emails_only'] = options.only_emails
parsed_args['chats_only'] = options.only_chats
# add db-cleaning
# if request passed put it False unless it has been forced by the user
# default is True (db-cleaning done)
#default
parsed_args['db-cleaning'] = True
# if there is a value then it is forced
if options.db_cleaning:
parsed_args['db-cleaning'] = parser.convert_to_boolean(options.db_cleaning)
#elif parsed_args['request']['req'] != 'ALL' and not options.db_cleaning:
# #else if we have a request and not forced put it to false
# parsed_args['db-cleaning'] = False
if parsed_args['db-cleaning']:
LOG.critical("Activate Gmvault db cleaning.")
else:
LOG.critical("Disable deletion of emails that are in Gmvault db and not anymore in Gmail.")
#add encryption option
parsed_args['encrypt'] = options.encrypt
#add ownership checking
parsed_args['ownership_control'] = not options.allow_mult_owners
#compression flag
parsed_args['compression'] = options.compression
elif parsed_args.get('command', '') == 'restore':
# parse common arguments for sync and restore
self._parse_common_args(options, parser, parsed_args, self.RESTORE_TYPES)
# apply restore labels if there is any
parsed_args['apply_label'] = options.apply_label
parsed_args['restart'] = options.restart
# handle emails or chats only
if options.only_emails and options.only_chats:
parser.error("--emails-only and --chats-only cannot be used together. Please choose one.")
parsed_args['emails_only'] = options.only_emails
parsed_args['chats_only'] = options.only_chats
elif parsed_args.get('command', '') == 'check':
#add defaults for type
options.type = 'full'
options.restart = False
# parse common arguments for sync and restore
self._parse_common_args(options, parser, parsed_args, self.CHECK_TYPES)
elif parsed_args.get('command', '') == 'export':
parsed_args['labels'] = options.label
parsed_args['db-dir'] = options.db_dir
parsed_args['output-dir'] = options.output_dir
if options.type.lower() in self.EXPORT_TYPES:
parsed_args['type'] = options.type.lower()
else:
parser.error('Unknown type for command export. The type should be one of %s' % self.EXPORT_TYPE_NAMES)
parsed_args['debug'] = options.debug
elif parsed_args.get('command', '') == 'config':
pass
#add parser
parsed_args['parser'] = parser
return parsed_args
@classmethod
def _clean_imap_or_gm_request(cls, request):
"""
Clean request passed by the user with the option --imap-req or --gmail-req.
Windows batch script preserve the single quote and unix shell doesn't.
If the request starts and ends with single quote eat them.
"""
LOG.debug("clean_imap_or_gm_request. original request = %s\n" % (request))
if request and (len(request) > 2) and (request[0] == "'" and request[-1] == "'"):
request = request[1:-1]
LOG.debug("clean_imap_or_gm_request. processed request = %s\n" % (request))
return request
@classmethod
def _export(cls, args):
"""
Export gmvault-db into another format
"""
export_type = cls.EXPORT_TYPES[args['type']]
output_dir = export_type(args['output-dir'])
LOG.critical("Export gmvault-db as a %s mailbox." % (args['type']))
exporter = gmvault_export.GMVaultExporter(args['db-dir'], output_dir,
labels=args['labels'])
exporter.export()
output_dir.close()
@classmethod
def _restore(cls, args, credential):
"""
Execute All restore operations
"""
LOG.critical("Connect to Gmail server.\n")
# Create a gmvault releaving read_only_access
restorer = gmvault.GMVaulter(args['db-dir'], args['host'], args['port'], \
args['email'], credential, read_only_access = False)
#full sync is the first one
if args.get('type', '') == 'full':
#call restore
labels = [args['apply_label']] if args['apply_label'] else []
restorer.restore(extra_labels = labels, restart = args['restart'], \
emails_only = args['emails_only'], chats_only = args['chats_only'])
elif args.get('type', '') == 'quick':
#take the last two to 3 months depending on the current date
# today - 2 months
today = datetime.date.today()
begin = today - datetime.timedelta(gmvault_utils.get_conf_defaults().getint("Restore", "quick_days", 8))
starting_dir = gmvault_utils.get_ym_from_datetime(begin)
#call restore
labels = [args['apply_label']] if args['apply_label'] else []
restorer.restore(pivot_dir = starting_dir, extra_labels = labels, restart = args['restart'], \
emails_only = args['emails_only'], chats_only = args['chats_only'])
else:
raise ValueError("Unknown synchronisation mode %s. Please use full (default), quick.")
#print error report
LOG.critical(restorer.get_operation_report())
@classmethod
def _sync(cls, args, credential):
"""
Execute All synchronisation operations
"""
LOG.critical("Connect to Gmail server.\n")
# handle credential in all levels
syncer = gmvault.GMVaulter(args['db-dir'], args['host'], args['port'], \
args['email'], credential, read_only_access = True, \
use_encryption = args['encrypt'])
#full sync is the first one
if args.get('type', '') == 'full':
#choose full sync. Ignore the request
syncer.sync({ 'mode': 'full', 'type': 'imap', 'req': 'ALL' } , compress_on_disk = args['compression'], \
db_cleaning = args['db-cleaning'], ownership_checking = args['ownership_control'],\
restart = args['restart'], emails_only = args['emails_only'], chats_only = args['chats_only'])
elif args.get('type', '') == 'auto':
#choose auto sync. imap request = ALL and restart = True
syncer.sync({ 'mode': 'auto', 'type': 'imap', 'req': 'ALL' } , compress_on_disk = args['compression'], \
db_cleaning = args['db-cleaning'], ownership_checking = args['ownership_control'],\
restart = True, emails_only = args['emails_only'], chats_only = args['chats_only'])
elif args.get('type', '') == 'quick':
#sync only the last x days (taken in defaults) in order to be quick
#(cleaning is import here because recent days might move again
# today - 2 months
today = datetime.date.today()
begin = today - datetime.timedelta(gmvault_utils.get_conf_defaults().getint("Sync", "quick_days", 8))
LOG.critical("Quick sync mode. Check for new emails since %s." % (begin.strftime('%d-%b-%Y')))
# today + 1 day
end = today + datetime.timedelta(1)
req = { 'type' : 'imap', \
'req' : syncer.get_imap_request_btw_2_dates(begin, end), \
'mode' : 'quick'}
syncer.sync( req, \
compress_on_disk = args['compression'], \
db_cleaning = args['db-cleaning'], \
ownership_checking = args['ownership_control'], restart = args['restart'], \
emails_only = args['emails_only'], chats_only = args['chats_only'])
elif args.get('type', '') == 'custom':
#convert args to unicode
args['request']['req'] = gmvault_utils.convert_to_unicode(args['request']['req'])
args['request']['charset'] = 'utf-8' #for the moment always utf-8
args['request']['mode'] = 'custom'
# pass an imap request. Assume that the user know what to do here
LOG.critical("Perform custom synchronisation with %s request: %s.\n" \
% (args['request']['type'], args['request']['req']))
syncer.sync(args['request'], compress_on_disk = args['compression'], db_cleaning = args['db-cleaning'], \
ownership_checking = args['ownership_control'], restart = args['restart'], \
emails_only = args['emails_only'], chats_only = args['chats_only'])
else:
raise ValueError("Unknown synchronisation mode %s. Please use full (default), quick or custom.")
#print error report
LOG.critical(syncer.get_operation_report())
@classmethod
def _check_db(cls, args, credential):
"""
Check DB
"""
LOG.critical("Connect to Gmail server.\n")
# handle credential in all levels
checker = gmvault.GMVaulter(args['db-dir'], args['host'], args['port'], \
args['email'], credential, read_only_access = True)
checker.check_clean_db(db_cleaning = True)
def run(self, args): #pylint:disable=R0912
"""
Run the grep with the given args
"""
on_error = True
die_with_usage = True
try:
if args.get('command') not in ('export'):
credential = CredentialHelper.get_credential(args)
if args.get('command', '') == 'sync':
self._sync(args, credential)
elif args.get('command', '') == 'restore':
self._restore(args, credential)
elif args.get('command', '') == 'check':
self._check_db(args, credential)
elif args.get('command', '') == 'export':
self._export(args)
elif args.get('command', '') == 'config':
LOG.critical("Configure something. TBD.\n")
on_error = False
except KeyboardInterrupt, _:
LOG.critical("\nCTRL-C. Stop all operations.\n")
on_error = False
except socket.error:
LOG.critical("Error: Network problem. Please check your gmail server hostname,"\
" the internet connection or your network setup.\n")
LOG.critical("=== Exception traceback ===")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
die_with_usage = False
except imaplib.IMAP4.error, imap_err:
#bad login or password
if str(imap_err) in ['[AUTHENTICATIONFAILED] Invalid credentials (Failure)', \
'[ALERT] Web login required: http://support.google.com/'\
'mail/bin/answer.py?answer=78754 (Failure)', \
'[ALERT] Invalid credentials (Failure)'] :
LOG.critical("ERROR: Invalid credentials, cannot login to the gmail server."\
" Please check your login and password or xoauth token.\n")
die_with_usage = False
else:
LOG.critical("Error: %s. \n" % (imap_err) )
LOG.critical("=== Exception traceback ===")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
except Exception, err:
LOG.critical("Error: %s. \n" % (err) )
LOG.critical("=== Exception traceback ===")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
die_with_usage = False
finally:
if on_error:
if die_with_usage:
args['parser'].die_with_usage()
sys.exit(1)
def init_logging():
"""
init logging infrastructure
"""
#setup application logs: one handler for stdout and one for a log file
log_utils.LoggerFactory.setup_cli_app_handler(log_utils.STANDALONE, activate_log_file=False, file_path="./gmvault.log")
def activate_debug_mode():
"""
Activate debugging logging
"""
LOG.critical("Debugging logs are going to be saved in file %s/gmvault.log.\n" % os.getenv("HOME","."))
log_utils.LoggerFactory.setup_cli_app_handler(log_utils.STANDALONE, activate_log_file=True, \
console_level= 'DEBUG', file_path="%s/gmvault.log" % os.getenv("HOME","."))
def sigusr1_handler(signum, frame): #pylint:disable=W0613
"""
Signal handler to get stack trace if the program is stuck
"""
filename = './gmvault.traceback.txt'
print("GMVAULT: Received SIGUSR1 -- Printing stack trace in %s..." %
os.path.abspath(filename))
with open(filename, 'a') as f:
traceback.print_stack(file=f)
def register_traceback_signal():
""" To register a USR1 signal allowing to get stack trace """
signal.signal(signal.SIGUSR1, sigusr1_handler)
def setup_default_conf():
"""
set the environment GMVAULT_CONF_FILE which is necessary for Conf object
"""
gmvault_utils.get_conf_defaults() # force instanciation of conf to load the defaults
def bootstrap_run():
""" temporary bootstrap """
init_logging()
#force argv[0] to gmvault
sys.argv[0] = "gmvault"
LOG.critical("")
gmvlt = GMVaultLauncher()
args = gmvlt.parse_args()
#activate debug if enabled
if args['debug']:
LOG.critical("Activate debugging information.")
activate_debug_mode()
# force instanciation of conf to load the defaults
gmvault_utils.get_conf_defaults()
gmvlt.run(args)
if __name__ == '__main__':
#import memdebug
#memdebug.start(8080)
#import sys
#print("sys.argv=[%s]" %(sys.argv))
register_traceback_signal()
bootstrap_run()
#sys.exit(0)
| agpl-3.0 |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/MultiCall.py | 45 | 17464 | """
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import string
import re
import Tkinter
from idlelib import macosxSupport
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
# the modifier state constants, which define the meaning of mc_state
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
if macosxSupport.runningAsOSXApp():
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
# a dictionary to map a modifier name into its number
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
# A binder is a class which binds functions to one type of event. It has two
# methods: bind and unbind, which get a function and a parsed sequence, as
# returned by _parse_sequence(). There are two types of binders:
# _SimpleBinder handles event types with no modifiers and no detail.
# No Python functions are called when no events are binded.
# _ComplexBinder handles event types with modifiers and a detail.
# A Python function is called each time an event is generated.
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
# An int in range(1 << len(_modifiers)) represents a combination of modifiers
# (if the least significent bit is on, _modifiers[0] is on, and so on).
# _state_subsets gives for each combination of modifiers, or *state*,
# a list of the states which are a subset of it. This list is ordered by the
# number of modifiers is the state - the most specific state comes first.
_states = range(1 << len(_modifiers))
_state_names = [''.join(m[0]+'-'
for i, m in enumerate(_modifiers)
if (1 << i) & s)
for s in _states]
def expand_substates(states):
'''For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
'''
def nbits(n):
"number of bits set in n base 2"
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(state & x for x in states))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
# _state_codes gives for each state, the portable code to be passed as mc_state
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if (1 << i) & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
# This class binds many functions, and only unbinds them when it is deleted.
# self.handlerids is the list of seqs and ids of binded handler functions.
# The binded functions sit in a dictionary of lists of lists, which maps
# a detail (or None) and a state into a list of functions.
# When a new detail is discovered, handlers for all the possible states
# are binded.
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
while doafterhandler:
doafterhandler.pop()()
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# we don't want to change the lists of functions while a handler is
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
self.widget.unbind(self.widgetinst, seq, id)
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = string.split(sequence[1:-1], '-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, Tkinter.Misc)
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print "bind(%s, %s, %s) called." % (sequence, func, add)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print "event_add(%s,%s) was called"%(repr(virtual),repr(sequences))
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print >> sys.stderr, "Seq. %s was added by Tkinter."%seq
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print >> sys.stderr, "Seq. %s was deleted by Tkinter."%seq
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
_multicall_dict[widget] = MultiCall
return MultiCall
if __name__ == "__main__":
# Test
root = Tkinter.Tk()
text = MultiCallCreator(Tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print seq
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
| gpl-2.0 |
thehyve/variant | eggs/djangorecipe-0.99-py2.7.egg/djangorecipe/tests.py | 1 | 23904 | import unittest
import tempfile
import os
import sys
import shutil
import mock
from zc.recipe.egg.egg import Scripts as ZCRecipeEggScripts
from djangorecipe.recipe import Recipe
# Add the testing dir to the Python path so we can use a fake Django
# install. This needs to be done so that we can use this as a base for
# mock's with some of the tests.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'testing'))
# Now that we have a fake Django on the path we can import the
# scripts. These are depenent on a Django install, hence the fake one.
from djangorecipe import test
from djangorecipe import manage
class TestRecipe(unittest.TestCase):
def setUp(self):
# Create a directory for our buildout files created by the recipe
self.buildout_dir = tempfile.mkdtemp('djangorecipe')
self.bin_dir = os.path.join(self.buildout_dir, 'bin')
self.develop_eggs_dir = os.path.join(self.buildout_dir,
'develop-eggs')
self.eggs_dir = os.path.join(self.buildout_dir, 'eggs')
self.parts_dir = os.path.join(self.buildout_dir, 'parts')
# We need to create the bin dir since the recipe should be able to
# expect it exists
os.mkdir(self.bin_dir)
self.recipe = Recipe({
'buildout': {
'eggs-directory': self.eggs_dir,
'develop-eggs-directory': self.develop_eggs_dir,
'python': 'python-version',
'bin-directory': self.bin_dir,
'parts-directory': self.parts_dir,
'directory': self.buildout_dir,
'find-links': '',
'allow-hosts': '',
},
'python-version': {'executable': sys.executable}},
'django',
{'recipe': 'djangorecipe'})
def tearDown(self):
# Remove our test dir
shutil.rmtree(self.buildout_dir)
def test_consistent_options(self):
# Buildout is pretty clever in detecting changing options. If
# the recipe modifies it's options during initialisation it
# will store this to determine wheter it needs to update or do
# a uninstall & install. We need to make sure that we normally
# do not trigger this. That means running the recipe with the
# same options should give us the same results.
self.assertEqual(*[
Recipe({'buildout': {
'eggs-directory': self.eggs_dir,
'develop-eggs-directory': self.develop_eggs_dir,
'python': 'python-version',
'bin-directory': self.bin_dir,
'parts-directory': self.parts_dir,
'directory': self.buildout_dir,
'find-links': '',
'allow-hosts':'',
},
'python-version': {'executable': sys.executable}},
'django',
{'recipe': 'djangorecipe'}).options.copy()
for i in range(2)])
def test_create_file(self):
# The create file helper should create a file at a certain
# location unless it already exists. We will need a
# non-existing file first.
f, name = tempfile.mkstemp()
# To show the function in action we need to delete the file
# before testing.
os.remove(name)
# The method accepts a template argument which it will use
# with the options argument for string substitution.
self.recipe.create_file(name, 'Spam %s', 'eggs')
# Let's check the contents of the file
self.assertEqual(open(name).read(), 'Spam eggs')
# If we try to write it again it will just ignore our request
self.recipe.create_file(name, 'Spam spam spam %s', 'eggs')
# The content of the file should therefore be the same
self.assertEqual(open(name).read(), 'Spam eggs')
# Now remove our temp file
os.remove(name)
def test_generate_secret(self):
# To create a basic skeleton the recipe also generates a
# random secret for the settings file. Since it should very
# unlikely that it will generate the same key a few times in a
# row we will test it with letting it generate a few keys.
self.assert_(len(set(
[self.recipe.generate_secret() for i in xrange(10)])) > 1)
def test_make_protocol_scripts(self):
# To ease deployment a WSGI script can be generated. The
# script adds any paths from the `extra_paths` option to the
# Python path.
self.recipe.options['wsgi'] = 'true'
self.recipe.options['fcgi'] = 'true'
self.recipe.make_scripts([], [])
# This should have created a script in the bin dir
wsgi_script = os.path.join(self.bin_dir, 'django.wsgi')
self.assert_(os.path.exists(wsgi_script))
# The contents should list our paths
contents = open(wsgi_script).read()
# It should also have a reference to our settings module
self.assert_('project.development' in contents)
# and a line which set's up the WSGI app
self.assert_("application = "
"djangorecipe.wsgi.main('project.development', "
"logfile='')"
in contents)
self.assert_("class logger(object)" not in contents)
# Another deployment options is FCGI. The recipe supports an option to
# automatically create the required script.
fcgi_script = os.path.join(self.bin_dir, 'django.fcgi')
self.assert_(os.path.exists(fcgi_script))
# The contents should list our paths
contents = open(fcgi_script).read()
# It should also have a reference to our settings module
self.assert_('project.development' in contents)
# and a line which set's up the WSGI app
self.assert_("djangorecipe.fcgi.main('project.development', "
"logfile='')"
in contents)
self.assert_("class logger(object)" not in contents)
self.recipe.options['logfile'] = '/foo'
self.recipe.make_scripts([], [])
wsgi_script = os.path.join(self.bin_dir, 'django.wsgi')
contents = open(wsgi_script).read()
self.assert_("logfile='/foo'" in contents)
self.recipe.options['logfile'] = '/foo'
self.recipe.make_scripts([], [])
fcgi_script = os.path.join(self.bin_dir, 'django.fcgi')
contents = open(fcgi_script).read()
self.assert_("logfile='/foo'" in contents)
@mock.patch('zc.buildout.easy_install', 'scripts')
def test_make_protocol_scripts_return_value(self, scripts):
# The return value of make scripts lists the generated scripts.
self.recipe.options['wsgi'] = 'true'
self.recipe.options['fcgi'] = 'true'
scripts.return_value = ['some-path']
self.assertEqual(self.recipe.make_scripts([], []),
['some-path', 'some-path'])
def test_create_project(self):
# If a project does not exist already the recipe will create
# one.
project_dir = os.path.join(self.buildout_dir, 'project')
self.recipe.create_project(project_dir)
# This should have create a project directory
self.assert_(os.path.exists(project_dir))
# With this directory we should have __init__.py to make it a
# package
self.assert_(
os.path.exists(os.path.join(project_dir, '__init__.py')))
# There should also be a urls.py
self.assert_(
os.path.exists(os.path.join(project_dir, 'urls.py')))
# To make it easier to start using this project both a media
# and a templates folder are created
self.assert_(
os.path.exists(os.path.join(project_dir, 'media')))
self.assert_(
os.path.exists(os.path.join(project_dir, 'templates')))
# The project is ready to go since the recipe has generated a
# base settings, development and production file
for f in ('settings.py', 'development.py', 'production.py'):
self.assert_(
os.path.exists(os.path.join(project_dir, f)))
def test_create_test_runner(self):
# An executable script can be generated which will make it
# possible to execute the Django test runner. This options
# only works if we specify one or apps to test.
testrunner = os.path.join(self.bin_dir, 'test')
# This first argument sets extra_paths, we will use this to
# make sure the script can find this recipe
recipe_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
# First we will show it does nothing by default
self.recipe.create_test_runner([recipe_dir], [])
self.failIf(os.path.exists(testrunner))
# When we specify an app to test it should create the the
# testrunner
self.recipe.options['test'] = 'knight'
self.recipe.create_test_runner([recipe_dir], [])
self.assert_(os.path.exists(testrunner))
def test_create_manage_script(self):
# This buildout recipe creates a alternative for the standard
# manage.py script. It has all the same functionality as the
# original one but it sits in the bin dir instead of within
# the project.
manage = os.path.join(self.bin_dir, 'django')
self.recipe.create_manage_script([], [])
self.assert_(os.path.exists(manage))
def test_create_manage_script_projectegg(self):
# When a projectegg is specified, then the egg specified
# should get used as the project file.
manage = os.path.join(self.bin_dir, 'django')
self.recipe.options['projectegg'] = 'spameggs'
self.recipe.create_manage_script([], [])
self.assert_(os.path.exists(manage))
# Check that we have 'spameggs' as the project
self.assert_("djangorecipe.manage.main('spameggs.development')"
in open(manage).read())
@mock.patch('shutil', 'rmtree')
@mock.patch('os.path', 'exists')
@mock.patch('urllib', 'urlretrieve')
@mock.patch('shutil', 'copytree')
@mock.patch(ZCRecipeEggScripts, 'working_set')
@mock.patch('zc.buildout.easy_install', 'scripts')
@mock.patch(Recipe, 'create_manage_script')
@mock.patch(Recipe, 'create_test_runner')
@mock.patch('zc.recipe.egg', 'Develop')
def test_extra_paths(self, rmtree, path_exists, urlretrieve,
copytree, working_set, scripts,
manage, testrunner, develop):
# The recipe allows extra-paths to be specified. It uses these to
# extend the Python path within it's generated scripts.
self.recipe.options['version'] = '1.0'
self.recipe.options['extra-paths'] = 'somepackage\nanotherpackage'
path_exists.return_value = True
working_set.return_value = (None, [])
manage.return_value = []
scripts.return_value = []
testrunner.return_value = []
develop.return_value = mock.Mock()
self.recipe.install()
self.assertEqual(manage.call_args[0][0][-2:],
['somepackage', 'anotherpackage'])
@mock.patch('shutil', 'rmtree')
@mock.patch('os.path', 'exists')
@mock.patch('urllib', 'urlretrieve')
@mock.patch('shutil', 'copytree')
@mock.patch(ZCRecipeEggScripts, 'working_set')
@mock.patch('zc.buildout.easy_install', 'scripts')
@mock.patch(Recipe, 'create_manage_script')
@mock.patch(Recipe, 'create_test_runner')
@mock.patch('site', 'addsitedir')
@mock.patch('zc.recipe.egg', 'Develop')
def test_pth_files(self, rmtree, path_exists, urlretrieve,
copytree, working_set, scripts,
manage, testrunner, addsitedir, develop):
# When a pth-files option is set the recipe will use that to add more
# paths to extra-paths.
self.recipe.options['version'] = '1.0'
path_exists.return_value = True
working_set.return_value = (None, [])
scripts.return_value = []
manage.return_value = []
testrunner.return_value = []
develop.return_value = mock.Mock()
# The mock values needed to demonstrate the pth-files option.
addsitedir.return_value = ['extra', 'dirs']
self.recipe.options['pth-files'] = 'somedir'
self.recipe.install()
self.assertEqual(addsitedir.call_args, (('somedir', set([])), {}))
# The extra-paths option has been extended.
self.assertEqual(self.recipe.options['extra-paths'], '\nextra\ndirs')
def test_create_wsgi_script_projectegg(self):
# When a projectegg is specified, then the egg specified
# should get used as the project in the wsgi script.
wsgi = os.path.join(self.bin_dir, 'django.wsgi')
recipe_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
self.recipe.options['projectegg'] = 'spameggs'
self.recipe.options['wsgi'] = 'true'
self.recipe.make_scripts([recipe_dir], [])
self.assert_(os.path.exists(wsgi))
# Check that we have 'spameggs' as the project
self.assert_('spameggs.development' in open(wsgi).read())
def test_settings_option(self):
# The settings option can be used to specify the settings file
# for Django to use. By default it uses `development`.
self.assertEqual(self.recipe.options['settings'], 'development')
# When we change it an generate a manage script it will use
# this var.
self.recipe.options['settings'] = 'spameggs'
self.recipe.create_manage_script([], [])
manage = os.path.join(self.bin_dir, 'django')
self.assert_("djangorecipe.manage.main('project.spameggs')"
in open(manage).read())
@mock.patch('shutil', 'rmtree')
@mock.patch('os.path', 'exists')
@mock.patch('urllib', 'urlretrieve')
@mock.patch('shutil', 'copytree')
@mock.patch(ZCRecipeEggScripts, 'working_set')
@mock.patch('zc.buildout.easy_install', 'scripts')
@mock.patch('subprocess', 'call')
def test_update_with_cache(self, rmtree, path_exists, urlretrieve,
copytree, working_set, scripts,
call_process):
path_exists.return_value = True
working_set.return_value = (None, [])
# When the recipe is asked to do an update whilst in install
# from cache mode it just ignores it
self.recipe.install_from_cache = True
self.recipe.update()
self.failIf(call_process.called)
@mock.patch('shutil', 'rmtree')
@mock.patch('os.path', 'exists')
@mock.patch('urllib', 'urlretrieve')
@mock.patch('shutil', 'copytree')
@mock.patch(ZCRecipeEggScripts, 'working_set')
@mock.patch('zc.buildout.easy_install', 'scripts')
@mock.patch('subprocess', 'call')
def test_update_with_newest_false(self, rmtree, path_exists, urlretrieve,
copytree, working_set, scripts,
call_process):
path_exists.return_value = True
working_set.return_value = (None, [])
# When the recipe is asked to do an update whilst in install
# from cache mode it just ignores it
self.recipe.buildout['buildout']['newest'] = 'false'
self.recipe.update()
self.assertFalse(call_process.called)
def test_python_option(self):
# The python option makes it possible to specify a specific Python
# executable which is to be used for the generated scripts.
recipe = Recipe({
'buildout': {
'eggs-directory': self.eggs_dir,
'develop-eggs-directory': self.develop_eggs_dir,
'python': 'python-version',
'bin-directory': self.bin_dir,
'parts-directory': self.parts_dir,
'directory': self.buildout_dir,
'find-links': '',
'allow-hosts': '',
},
'python-version': {'executable': '/python4k'}},
'django',
{'recipe': 'djangorecipe',
'wsgi': 'true'})
recipe.make_scripts([], [])
# This should have created a script in the bin dir
wsgi_script = os.path.join(self.bin_dir, 'django.wsgi')
self.assertEqual(open(wsgi_script).readlines()[0], '#!/python4k\n')
# Changeing the option for only the part will change the used Python
# version.
recipe = Recipe({
'buildout': {'eggs-directory': self.eggs_dir,
'develop-eggs-directory': self.develop_eggs_dir,
'python': 'python-version',
'bin-directory': self.bin_dir,
'parts-directory': self.parts_dir,
'directory': self.buildout_dir,
'find-links': '',
'allow-hosts': '',
},
'python-version': {'executable': '/python4k'},
'py5k': {'executable': '/python5k'}},
'django',
{'recipe': 'djangorecipe',
'python': 'py5k', 'wsgi': 'true'})
recipe.make_scripts([], [])
self.assertEqual(open(wsgi_script).readlines()[0], '#!/python5k\n')
def test_boilerplate_newest(self):
"""Test the default boilerplate."""
project_dir = os.path.join(self.buildout_dir, 'project')
secret = '$55upfci7a#gi@&e9o1-hb*k+f$3+(&b$j=cn67h#22*0%-bj0'
self.recipe.generate_secret = lambda: secret
self.recipe.create_project(project_dir)
settings = open(os.path.join(project_dir, 'settings.py')).read()
settings_dict = {'project': self.recipe.options['project'],
'secret': secret,
'urlconf': self.recipe.options['urlconf'],
}
from boilerplate import versions
self.assertEquals(versions['Newest']['settings'] % settings_dict,
settings)
def test_boilerplate_1_2(self):
"""Test the boilerplate for django 1.2."""
secret = '$55upfci7a#gi@&e9o1-hb*k+f$3+(&b$j=cn67h#22*0%-bj0'
self.recipe.generate_secret = lambda: secret
recipe = Recipe({
'buildout': {'eggs-directory': self.eggs_dir,
'develop-eggs-directory': self.develop_eggs_dir,
'python': 'python-version',
'bin-directory': self.bin_dir,
'parts-directory': self.parts_dir,
'directory': self.buildout_dir,
'find-links': '',
'allow-hosts': '',
'versions': 'versions',
},
'versions': {'django': '1.2.5'},
'python-version': {'executable': '/python4k'},
'py5k': {'executable': '/python5k'}},
'django',
{'recipe': 'djangorecipe',
'python': 'py5k', 'wsgi': 'true'})
secret = '$55upfci7a#gi@&e9o1-hb*k+f$3+(&b$j=cn67h#22*0%-bj0'
recipe.generate_secret = lambda: secret
project_dir = os.path.join(self.buildout_dir, 'project')
recipe.create_project(project_dir)
settings = open(os.path.join(project_dir, 'settings.py')).read()
settings_dict = {'project': self.recipe.options['project'],
'secret': secret,
'urlconf': self.recipe.options['urlconf'],
}
from boilerplate import versions
self.assertEquals(versions['1.2']['settings'] % settings_dict,
settings)
def test_versions_deprecation(self):
from zc.buildout import UserError
options = {'recipe': 'djangorecipe',
'version': 'trunk',
'python': 'py5k', 'wsgi': 'true'}
self.assertRaises(UserError, Recipe, *('buildout', 'test', options))
class ScriptTestCase(unittest.TestCase):
def setUp(self):
# We will also need to fake the settings file's module
self.settings = mock.sentinel.Settings
sys.modules['cheeseshop'] = mock.sentinel.CheeseShop
sys.modules['cheeseshop.development'] = self.settings
sys.modules['cheeseshop'].development = self.settings
def tearDown(self):
# We will clear out sys.modules again to clean up
for m in ['cheeseshop', 'cheeseshop.development']:
del sys.modules[m]
class TestTestScript(ScriptTestCase):
@mock.patch('django.core.management', 'execute_manager')
def test_script(self, execute_manager):
# The test script should execute the standard Django test
# command with any apps given as its arguments.
test.main('cheeseshop.development', 'spamm', 'eggs')
# We only care about the arguments given to execute_manager
self.assertEqual(execute_manager.call_args[1],
{'argv': ['test', 'test', 'spamm', 'eggs']})
@mock.patch('django.core.management', 'execute_manager')
def test_deeply_nested_settings(self, execute_manager):
# Settings files can be more than two levels deep. We need to
# make sure the test script can properly import those. To
# demonstrate this we need to add another level to our
# sys.modules entries.
settings = mock.sentinel.SettingsModule
nce = mock.sentinel.NCE
nce.development = settings
sys.modules['cheeseshop'].nce = nce
sys.modules['cheeseshop.nce'] = nce
sys.modules['cheeseshop.nce.development'] = settings
test.main('cheeseshop.nce.development', 'tilsit', 'stilton')
self.assertEqual(execute_manager.call_args[0], (settings,))
@mock.patch('sys', 'exit')
def test_settings_error(self, sys_exit):
# When the settings file cannot be imported the test runner
# wil exit with a message and a specific exit code.
test.main('cheeseshop.tilsit', 'stilton')
self.assertEqual(sys_exit.call_args, ((1,), {}))
class TestManageScript(ScriptTestCase):
@mock.patch('django.core.management', 'execute_manager')
def test_script(self, execute_manager):
# The manage script is a replacement for the default manage.py
# script. It has all the same bells and whistles since all it
# does is call the normal Django stuff.
manage.main('cheeseshop.development')
self.assertEqual(execute_manager.call_args,
((self.settings,), {}))
@mock.patch('sys', 'exit')
def test_settings_error(self, sys_exit):
# When the settings file cannot be imported the management
# script it wil exit with a message and a specific exit code.
manage.main('cheeseshop.tilsit')
self.assertEqual(sys_exit.call_args, ((1,), {}))
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestRecipe),
unittest.makeSuite(TestTestScript),
unittest.makeSuite(TestManageScript),
))
| apache-2.0 |
max0d41/ThugBrowser | src/ActiveX/ActiveX.py | 1 | 4769 | #!/usr/bin/env python
#
# ActiveX.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import os
#import new
import logging
log = logging.getLogger("Thug")
acropdf = ( 'acropdf.pdf',
'pdf.pdfctrl',
'CA8A9780-280D-11CF-A24D-444553540000', )
shockwave = ( 'shockwaveflash.shockwaveflash',
'shockwaveflash.shockwaveflash.9',
'shockwaveflash.shockwaveflash.10',
'swctl.swctl',
'swctl.swctl.8',
'233C1507-6A77-46A4-9443-F871F945D258', )
java_deployment_toolkit = ( 'CAFEEFAC-DEC7-0000-0000-ABCDEFFEDCBA',
'8AD9C840-044E-11D1-B3E9-00805F499D93', )
class _ActiveXObject:
shockwave_flash = { 'shockwaveflash.shockwaveflash' : '10',
'shockwaveflash.shockwaveflash.9' : '9' ,
'shockwaveflash.shockwaveflash.10' : '10',
'shockwaveflash.shockwaveflash.11' : '11' }
def __init__(self, window, cls, type = 'name'):
self.funcattrs = dict()
self._window = window
obj = None
methods = dict()
self.shockwave = log.ThugVulnModules.shockwave_flash.split('.')[0]
if type == 'id':
if len(cls) > 5 and cls[:6].lower() == 'clsid:':
cls = cls[6:].upper()
if cls.startswith('{') and cls.endswith('}'):
cls = cls[1:-1]
if type == 'name':
cls = cls.lower()
# Adobe Acrobat Reader
if cls in acropdf and log.ThugVulnModules.acropdf_disabled:
log.warning("Unknown ActiveX Object: %s" % (cls, ))
raise TypeError()
# Shockwave Flash
if cls in shockwave and log.ThugVulnModules.shockwave_flash_disabled:
log.warning("Unknown ActiveX Object: %s" % (cls, ))
raise TypeError()
if cls in self.shockwave_flash and not self.shockwave in (self.shockwave_flash[cls], ):
log.warning("Unknown ActiveX Object: %s" % (cls, ))
raise TypeError()
_cls = cls
# Java Deployment Toolkit
if cls in java_deployment_toolkit and log.ThugVulnModules.javaplugin_disabled:
log.warning("Unknown ActiveX Object: %s" % (cls, ))
raise TypeError()
# JavaPlugin
if cls.lower().startswith('javaplugin'):
if log.ThugVulnModules.javaplugin_disabled or not cls.endswith(log.ThugVulnModules.javaplugin):
log.warning("Unknown ActiveX Object: %s" % (cls, ))
raise TypeError()
else:
_cls = 'javaplugin'
# JavaWebStart
if cls.lower().startswith('javawebstart.isinstalled'):
if log.ThugVulnModules.javaplugin_disabled or not cls.endswith(log.ThugVulnModules.javawebstart_isinstalled):
log.warning("Unknown ActiveX Object: %s" % (cls, ))
raise TypeError()
else:
_cls = 'javawebstart.isinstalled'
for c in CLSID:
if _cls in c[type]:
obj = c
break
if not obj:
log.warning("Unknown ActiveX Object: %s" % (cls, ))
#return None
raise TypeError()
log.warning("ActiveXObject: %s" % (cls, ))
for method_name, method in obj['methods'].items():
#_method = new.instancemethod(method, self, _ActiveXObject)
_method = method.__get__(self, _ActiveXObject)
setattr(self, method_name, _method)
methods[method] = _method
for attr_name, attr_value in obj['attrs'].items():
setattr(self, attr_name, attr_value)
for attr_name, attr_value in obj['funcattrs'].items():
self.funcattrs[attr_name] = methods[attr_value]
def __setattr__(self, name, value):
self.__dict__[name] = value
if name in self.funcattrs:
self.funcattrs[name](value)
def __getattribute__(self, name):
if name in self.__dict__:
return self.__dict__[name]
log.warning("Unknown ActiveX Object attribute: %s" % (name, ))
| gpl-2.0 |
axbaretto/beam | sdks/python/apache_beam/testing/test_utils_test.py | 5 | 3255 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittest for testing utilities,"""
# pytype: skip-file
import logging
import os
import tempfile
import unittest
import mock
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
from apache_beam.testing import test_utils as utils
class TestUtilsTest(unittest.TestCase):
def setUp(self):
utils.patch_retry(self, utils)
self.tmpdir = tempfile.mkdtemp()
def test_delete_files_succeeds(self):
path = os.path.join(self.tmpdir, 'f1')
with open(path, 'a') as f:
f.write('test')
assert FileSystems.exists(path)
utils.delete_files([path])
assert not FileSystems.exists(path)
def test_delete_files_fails_with_io_error(self):
path = os.path.join(self.tmpdir, 'f2')
with self.assertRaises(BeamIOError) as error:
utils.delete_files([path])
self.assertTrue(
error.exception.args[0].startswith('Delete operation failed'))
self.assertEqual(list(error.exception.exception_details.keys()), [path])
def test_delete_files_fails_with_invalid_arg(self):
with self.assertRaises(RuntimeError):
utils.delete_files([])
def test_temp_dir_removes_files(self):
with utils.TempDir() as tempdir:
dir_path = tempdir.get_path()
file_path = tempdir.create_temp_file()
self.assertTrue(os.path.exists(dir_path))
self.assertTrue(os.path.exists(file_path))
self.assertFalse(os.path.exists(dir_path))
self.assertFalse(os.path.exists(file_path))
def test_temp_file_field_correct(self):
with utils.TempDir() as tempdir:
filename = tempdir.create_temp_file(
suffix='.txt', lines=[b'line1\n', b'line2\n', b'line3\n'])
self.assertTrue(filename.endswith('.txt'))
with open(filename, 'rb') as f:
self.assertEqual(f.readline(), b'line1\n')
self.assertEqual(f.readline(), b'line2\n')
self.assertEqual(f.readline(), b'line3\n')
def test_cleanup_subscriptions(self):
sub_client = mock.Mock()
sub = mock.Mock()
sub.name = 'test_sub'
utils.cleanup_subscriptions(sub_client, [sub])
sub_client.delete_subscription.assert_called_with(sub.name)
def test_cleanup_topics(self):
pub_client = mock.Mock()
topic = mock.Mock()
topic.name = 'test_topic'
utils.cleanup_topics(pub_client, [topic])
pub_client.delete_topic.assert_called_with(topic.name)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
karthikvadla16/spark-tk | python/sparktk/frame/ops/timeseries_slice.py | 14 | 3826 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def timeseries_slice(self, date_time_index, start, end):
"""
Returns a frame split on the specified start and end date/times.
Splits a time series frame on the specified start and end date/times.
:param date_time_index: List of date/time strings. DateTimeIndex to conform all series to.
:param start: The start date for the slice in the ISO 8601 format, like: yyyy-MM-dd'T'HH:mm:ss.SSSZ
:param end: The end date for the slice in the ISO 8601 format, like: yyyy-MM-dd'T'HH:mm:ss.SSSZ
:return: Frame that contains a sub-slice of the current frame, based on the specified start/end date/times.
Examples
--------
For this example, we start with a frame that has already been formatted as a time series.
This means that the frame has a string column for key and a vector column that contains
a series of the observed values. We must also know the date/time index that corresponds
to the time series.
The time series is in a Frame object called *ts_frame*.
<hide>
>>> from sparktk import dtypes
>>> schema= [("key", str), ("series", dtypes.vector(6))]
>>> data = [["A", [62,55,60,61,60,59]],["B", [60,58,61,62,60,61]],["C", [69,68,68,70,71,69]]]
>>> ts_frame = tc.frame.create(data, schema)
-etc-
</hide>
>>> ts_frame.inspect()
[#] key series
==================================
[0] A [62, 55, 60, 61, 60, 59]
[1] B [60, 58, 61, 62, 60, 61]
[2] C [69, 68, 68, 70, 71, 69]
Next, we define the date/time index. In this example, it is one day intervals from
2016-01-01 to 2016-01-06:
>>> datetimeindex = ["2016-01-01T12:00:00.000Z","2016-01-02T12:00:00.000Z","2016-01-03T12:00:00.000Z","2016-01-04T12:00:00.000Z","2016-01-05T12:00:00.000Z","2016-01-06T12:00:00.000Z"]
Get a slice of our time series from 2016-01-02 to 2016-01-04:
>>> slice_start = "2016-01-02T12:00:00.000Z"
>>> slice_end = "2016-01-04T12:00:00.000Z"
>>> sliced_frame = ts_frame.timeseries_slice(datetimeindex, slice_start, slice_end)
<progress>
Take a look at our sliced time series:
>>> sliced_frame.inspect()
[#] key series
============================
[0] A [55.0, 60.0, 61.0]
[1] B [58.0, 61.0, 62.0]
[2] C [68.0, 68.0, 70.0]
"""
if not isinstance(date_time_index, list):
raise TypeError("date_time_index should be a list of date/times")
if not isinstance(start, basestring):
raise TypeError("start date/time should be a string in the ISO 8601 format")
if not isinstance(end, basestring):
raise TypeError("end date/time should be a string in the ISO 8601 format")
from sparktk.frame.frame import Frame
return Frame(self._tc,
self._scala.timeSeriesSlice(self._tc.jutils.convert.to_scala_date_time_list(date_time_index),
self._tc.jutils.convert.to_scala_date_time(start),
self._tc.jutils.convert.to_scala_date_time(end)))
| apache-2.0 |
DewarM/oppia | extensions/dependencies/dependencies_config.py | 4 | 1068 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for JavaScript library dependencies."""
__author__ = 'Sean Lip'
# A dict mapping dependency ids to the Angular module names they
# should insert when the Angular app is first initialized.
DEPENDENCIES_TO_ANGULAR_MODULES_DICT = {
'codemirror': ['ui.codemirror'],
'google_maps': ['ui.map'],
'guppy': [],
'logic_proof': [],
'math_expressions': [],
'midijs': [],
'pencilcode': [],
'skulpt': [],
}
| apache-2.0 |
ancafarcas/liveblog | server/liveblog/prepopulate/app_initialize.py | 2 | 5860 | import os
import json
import superdesk
import pymongo
from superdesk import get_resource_service
from flask import current_app as app
"""
App initialization information, maps resource name to the file containing the data
and the index to be created for the resource and the boolean flag to update the
resource or not.
__entities__ = {
"resource_name": ("file_name", "index_params", "do_patch")
}
"file_name" (str): name of the file containing seed data
"index_params list: List of key (field or List of tuple as required by pymongo create_index function.
http://api.mongodb.org/python/current/api/pymongo/collection.html
For example:
[[("first_name", pymongo.ASCENDING), ("last_name", pymongo.ASCENDING)], "username"] will create two indexes
- composite index of "first_name", "last_name" field.
- index on username field.
Alternatively index param can be specified as
[[("first_name", pymongo.ASCENDING), ("last_name", pymongo.ASCENDING)], [("username", pymongo.ASCENDING)]]
"""
__entities__ = {
'roles': ('roles.json', ['name'], True),
'global_preferences': ('global_preferences.json', ['key'], False),
'users': ('users.json', [[('first_name', pymongo.ASCENDING),
('last_name', pymongo.DESCENDING)],
'username'], False)
}
class AppInitializeWithDataCommand(superdesk.Command):
"""
Initialize application with predefined data for various entities.
Entities supported: [roles, users, desks, stages, groups, vocabularies, validators, content_templates].
If no --entity-name parameter is supplied, all the entities are inserted.
The entities [vocabularies, validators] will be updated with the predefined data if it already exists,
no action will be taken for the other entities.
"""
option_list = [
superdesk.Option('--entity-name', '-n', dest='entity_name', default='')
]
def run(self, entity_name=None, index_only='false'):
self.logger.info('Starting data import')
if entity_name:
(file_name, index_params, do_patch) = __entities__[entity_name]
self.import_file(entity_name, file_name, index_params, do_patch)
return 0
for name, (file_name, index_params, do_patch) in __entities__.items():
try:
self.import_file(name, file_name, index_params, do_patch)
except Exception as ex:
self.logger.info('Exception loading entity {} from {}'.format(name, file_name))
self.logger.exception(ex)
self.logger.info('Data import finished')
return 0
def import_file(self, entity_name, file_name, index_params, do_patch=False):
"""
imports seed data based on the entity_name (resource name) from the file_name specified.
index_params use to create index for that entity/resource
:param str entity_name: name of the resource
:param str file_name: file name that contains seed data
:param list index_params: list of indexes that is created on that entity.
For example:
[[("first_name", pymongo.ASCENDING), ("last_name", pymongo.ASCENDING)], "username"] will create two indexes
- composite index of "first_name", "last_name" field.
- index on username field.
Alternatively index param can be specified as
[[("first_name", pymongo.ASCENDING), ("last_name", pymongo.ASCENDING)], [("username", pymongo.ASCENDING)]]
Refer to pymongo create_index documentation for more information.
http://api.mongodb.org/python/current/api/pymongo/collection.html
:param bool do_patch: if True then patch the document else don't patch.
"""
print('Config: ', app.config['APP_ABSPATH'])
if file_name:
file_path = os.path.join(app.config.get('APP_ABSPATH'), 'apps', 'prepopulate', 'data_initialization',
file_name)
print('Got file path: ', file_path)
with open(file_path, 'rt') as app_prepopulation:
json_data = json.loads(app_prepopulation.read())
service = get_resource_service(entity_name)
data = [app.data.mongo._mongotize(item, service.datasource) for item in json_data]
existing_data = []
existing = service.get_from_mongo(None, {})
update_data = True
if not do_patch and existing.count() > 0:
self.logger.info('Data already exists for {} none will be loaded'.format(entity_name))
update_data = False
elif do_patch and existing.count() > 0:
self.logger.info('Data already exists for {} it will be updated'.format(entity_name))
if update_data:
if do_patch:
for item in existing:
for loaded_item in data:
if '_id' in loaded_item and loaded_item['_id'] == item['_id']:
existing_data.append(loaded_item)
data.remove(loaded_item)
if data:
service.post(data)
if existing_data and do_patch:
for item in existing_data:
service.patch(item['_id'], item)
self.logger.info('File {} imported successfully.'.format(file_name))
if index_params:
for index in index_params:
index_name = app.data.mongo.pymongo(resource=entity_name).db[entity_name].create_index(index)
self.logger.info('Index: {} for collection {} created successfully.'.format(index_name, entity_name))
superdesk.command('app:initialize_data', AppInitializeWithDataCommand())
| agpl-3.0 |
kellieotto/permute | permute/tests/test_npc.py | 1 | 4750 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from nose.plugins.attrib import attr
from nose.tools import assert_raises, raises
import numpy as np
from numpy.random import RandomState
from scipy.stats import norm
from ..npc import (fisher,
liptak,
tippett,
inverse_n_weight,
t2p,
npc,
check_combfunc_monotonic)
def test_fisher():
pvalues = np.linspace(0.05, 0.9, num=5)
np.testing.assert_almost_equal(fisher(pvalues), 11.11546, 5)
np.testing.assert_equal(fisher(1), -0.0)
np.testing.assert_array_less(fisher(10), 0)
def test_liptak():
pvalues = np.linspace(0.05, 0.9, num=5)
np.testing.assert_almost_equal(liptak(pvalues), 0.5728894, 5)
np.testing.assert_equal(liptak(1), norm.ppf(0))
np.testing.assert_equal(liptak(10), np.nan)
def test_tippett():
pvalues = np.linspace(0.05, 0.9, num=5)
np.testing.assert_almost_equal(tippett(pvalues), 0.95, 5)
np.testing.assert_equal(tippett(1), 0)
np.testing.assert_equal(tippett(10), -9)
def test_inverse_n_weight():
pval = np.array([0.5, 0.25, 0.75])
size = np.array([2, 4, 6])
expected_npc = -0.7847396
res_npc = inverse_n_weight(pval, size)
np.testing.assert_almost_equal(expected_npc, res_npc)
def test_t2p():
obs = 5
distr = np.array(range(-10, 11))
expected = np.linspace(21, 1, num=21)/21
np.testing.assert_array_almost_equal(t2p(distr, "greater", plus1=False), expected)
np.testing.assert_array_almost_equal(t2p(distr, "less", plus1=False), expected[::-1])
expected2 = 2*np.concatenate([expected[::-1][:10],
[0.5], expected[11:]])
np.testing.assert_array_almost_equal(t2p(distr, "two-sided", plus1=False), expected2)
@raises(ValueError)
def test_t2p_bad_alternative():
t2p(np.array([0.5, 0.25, 0.75]), "not a real alternative")
def test_npc():
prng = RandomState(55)
pvalues = np.linspace(0.05, 0.9, num=5)
distr = prng.uniform(low=0, high=10, size=500).reshape(100, 5)
res = npc(pvalues, distr, "fisher", "greater", plus1=False)
np.testing.assert_almost_equal(res, 0.33)
res = npc(pvalues, distr, "fisher", "less", plus1=False)
np.testing.assert_almost_equal(res, 0.33)
res = npc(pvalues, distr, "fisher", "two-sided", plus1=False)
np.testing.assert_almost_equal(res, 0.31)
res = npc(pvalues, distr, "liptak", "greater", plus1=False)
np.testing.assert_almost_equal(res, 0.35)
res = npc(pvalues, distr, "tippett", "greater", plus1=False)
np.testing.assert_almost_equal(res, 0.25)
res = npc(pvalues, distr, "fisher",
alternatives=np.array(["less", "greater", "less",
"greater", "two-sided"]), plus1=False)
np.testing.assert_almost_equal(res, 0.38)
def test_npc_callable_combine():
prng = RandomState(55)
pvalues = np.linspace(0.05, 0.9, num=5)
distr = prng.uniform(low=0, high=10, size=500).reshape(100, 5)
size = np.array([2, 4, 6, 4, 2])
combine = lambda p: inverse_n_weight(p, size)
res = npc(pvalues, distr, combine, "greater", plus1=False)
np.testing.assert_equal(res, 0.39)
@raises(ValueError)
def test_npc_bad_distr():
prng = RandomState(55)
pvalues = np.linspace(0.05, 0.9, num=5)
distr = prng.uniform(low=0, high=10, size=20).reshape(10, 2)
npc(pvalues, distr, "fisher", "greater")
@raises(ValueError)
def test_npc_bad_alternative():
prng = RandomState(55)
pvalues = np.linspace(0.05, 0.9, num=5)
distr = prng.uniform(low=0, high=10, size=50).reshape(10, 5)
npc(pvalues, distr, "fisher", np.array(["greater", "less"]))
@raises(ValueError)
def test_npc_single_pvalue():
npc(np.array([1]), np.array([1, 2, 3]))
def test_monotonic_checker():
pvalues = np.array([0.1, 0.2, 0.3])
np.testing.assert_equal(check_combfunc_monotonic(pvalues, fisher), True)
np.testing.assert_equal(check_combfunc_monotonic(pvalues, liptak), True)
np.testing.assert_equal(check_combfunc_monotonic(pvalues, tippett), True)
comb_function = lambda p: inverse_n_weight(p, np.array([2, 4, 6]))
np.testing.assert_equal(check_combfunc_monotonic(pvalues, comb_function), True)
bad_comb_function = lambda p: -1*fisher(p)
np.testing.assert_equal(check_combfunc_monotonic(pvalues, bad_comb_function), False)
@raises(ValueError)
def test_mono_checker_in_npc():
prng = RandomState(55)
pvalues = np.linspace(0.05, 0.9, num=5)
distr = prng.uniform(low=0, high=10, size=500).reshape(100, 5)
bad_comb_function = lambda p: -1*fisher(p)
npc(pvalues, distr, bad_comb_function)
| bsd-2-clause |
ilCapo77/django-dbbackup | dbbackup/management/commands/mediabackup.py | 1 | 5759 | """
Save media files.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import sys
from datetime import datetime
import tarfile
import tempfile
from optparse import make_option
import re
from django.conf import settings
from django.core.management.base import CommandError
from dbbackup.management.commands._base import BaseDbBackupCommand
from dbbackup import utils
from dbbackup.storage.base import BaseStorage
from dbbackup.storage.base import StorageError
from dbbackup import settings as dbbackup_settings
class Command(BaseDbBackupCommand):
help = "backup_media [--encrypt] [--clean] [--no-compress] " \
"--servername SERVER_NAME"
option_list = BaseDbBackupCommand.option_list + (
make_option("-c", "--clean", help="Clean up old backup files", action="store_true", default=False),
make_option("-s", "--servername", help="Specify server name to include in backup filename"),
make_option("-e", "--encrypt", help="Encrypt the backup files", action="store_true", default=False),
make_option("-x", "--no-compress", help="Do not compress the archive", action="store_true", default=False),
)
@utils.email_uncaught_exception
def handle(self, *args, **options):
try:
self.servername = options.get('servername')
self.storage = BaseStorage.storage_factory()
self.backup_mediafiles(
options.get('encrypt'),
options.get('no_compress')^True)
if options.get('clean'):
self.cleanup_old_backups()
except StorageError as err:
raise CommandError(err)
def backup_mediafiles(self, encrypt, compress):
source_dir = self.get_source_dir()
if not source_dir:
self.stderr.write("No media source dir configured.")
sys.exit(0)
self.log("Backing up media files in %s" % source_dir, 1)
filename = self.get_backup_basename(compress=compress)
output_file = self.create_backup_file(
source_dir,
filename,
compress=compress
)
if encrypt:
encrypted_file = utils.encrypt_file(output_file, filename)
output_file, filename = encrypted_file
self.log(" Backup tempfile created: %s (%s)" % (filename, utils.handle_size(output_file)), 1)
self.log(" Writing file to %s: %s" % (self.storage.name, self.storage.backup_dir), 1)
self.storage.write_file(
output_file,
self.get_backup_basename(
compress=compress)
)
def get_backup_basename(self, **kwargs):
# TODO: use DBBACKUP_FILENAME_TEMPLATE
server_name = self.get_servername()
if server_name:
server_name = '-%s' % server_name
return '%s%s-%s.media.tar%s' % (
self.get_databasename(),
server_name,
datetime.now().strftime(dbbackup_settings.DATE_FORMAT),
('.gz' if kwargs.get('compress') else '')
)
def get_databasename(self):
# TODO: WTF is this??
return settings.DATABASES['default']['NAME']
def get_source_dir(self):
# TODO: WTF again ??
return dbbackup_settings.MEDIA_PATH
def create_backup_file(self, source_dir, backup_basename, **kwargs):
temp_dir = tempfile.mkdtemp(dir=dbbackup_settings.TMP_DIR)
try:
backup_filename = os.path.join(temp_dir, backup_basename)
try:
tar_file = tarfile.open(backup_filename, 'w|gz') \
if kwargs.get('compress') \
else tarfile.open(backup_filename, 'w')
try:
tar_file.add(source_dir)
finally:
tar_file.close()
return utils.create_spooled_temporary_file(backup_filename)
finally:
if os.path.exists(backup_filename):
os.remove(backup_filename)
finally:
os.rmdir(temp_dir)
def cleanup_old_backups(self):
""" Cleanup old backups, keeping the number of backups specified by
DBBACKUP_CLEANUP_KEEP and any backups that occur on first of the month.
"""
self.log("Cleaning Old Backups for media files", 1)
file_list = self.get_backup_file_list()
for backup_date, filename in file_list[0:-dbbackup_settings.CLEANUP_KEEP_MEDIA]:
if int(backup_date.strftime("%d")) != 1:
self.log(" Deleting: %s" % filename, 1)
self.storage.delete_file(filename)
def get_backup_file_list(self):
""" Return a list of backup files including the backup date. The result is a list of tuples (datetime, filename).
The list is sorted by date.
"""
server_name = self.get_servername()
if server_name:
server_name = '-%s' % server_name
media_re = re.compile(r'^%s%s-(.*)\.media\.tar(?:\.gz)?(?:\.\d+)?$' %
re.escape(self.get_databasename()), re.escape(server_name))
def is_media_backup(filename):
return media_re.search(filename)
def get_datetime_from_filename(filename):
datestr = media_re.findall(filename)[0]
return datetime.strptime(datestr, dbbackup_settings.DATE_FORMAT)
file_list = [
(get_datetime_from_filename(os.path.basename(f)), f)
for f in self.storage.list_directory()
if is_media_backup(os.path.basename(f))
]
return sorted(file_list, key=lambda v: v[0])
def get_servername(self):
return self.servername or dbbackup_settings.HOSTNAME
| bsd-3-clause |
sfstpala/Victory-Chat | cherrypy/test/sessiondemo.py | 42 | 5420 | #!/usr/bin/python
"""A session demonstration app."""
import calendar
from datetime import datetime
import sys
import cherrypy
from cherrypy.lib import sessions
from cherrypy._cpcompat import copyitems
page = """
<html>
<head>
<style type='text/css'>
table { border-collapse: collapse; border: 1px solid #663333; }
th { text-align: right; background-color: #663333; color: white; padding: 0.5em; }
td { white-space: pre-wrap; font-family: monospace; padding: 0.5em;
border: 1px solid #663333; }
.warn { font-family: serif; color: #990000; }
</style>
<script type="text/javascript">
<!--
function twodigit(d) { return d < 10 ? "0" + d : d; }
function formattime(t) {
var month = t.getUTCMonth() + 1;
var day = t.getUTCDate();
var year = t.getUTCFullYear();
var hours = t.getUTCHours();
var minutes = t.getUTCMinutes();
return (year + "/" + twodigit(month) + "/" + twodigit(day) + " " +
hours + ":" + twodigit(minutes) + " UTC");
}
function interval(s) {
// Return the given interval (in seconds) as an English phrase
var seconds = s %% 60;
s = Math.floor(s / 60);
var minutes = s %% 60;
s = Math.floor(s / 60);
var hours = s %% 24;
var v = twodigit(hours) + ":" + twodigit(minutes) + ":" + twodigit(seconds);
var days = Math.floor(s / 24);
if (days != 0) v = days + ' days, ' + v;
return v;
}
var fudge_seconds = 5;
function init() {
// Set the content of the 'btime' cell.
var currentTime = new Date();
var bunixtime = Math.floor(currentTime.getTime() / 1000);
var v = formattime(currentTime);
v += " (Unix time: " + bunixtime + ")";
var diff = Math.abs(%(serverunixtime)s - bunixtime);
if (diff > fudge_seconds) v += "<p class='warn'>Browser and Server times disagree.</p>";
document.getElementById('btime').innerHTML = v;
// Warn if response cookie expires is not close to one hour in the future.
// Yes, we want this to happen when wit hit the 'Expire' link, too.
var expires = Date.parse("%(expires)s") / 1000;
var onehour = (60 * 60);
if (Math.abs(expires - (bunixtime + onehour)) > fudge_seconds) {
diff = Math.floor(expires - bunixtime);
if (expires > (bunixtime + onehour)) {
var msg = "Response cookie 'expires' date is " + interval(diff) + " in the future.";
} else {
var msg = "Response cookie 'expires' date is " + interval(0 - diff) + " in the past.";
}
document.getElementById('respcookiewarn').innerHTML = msg;
}
}
//-->
</script>
</head>
<body onload='init()'>
<h2>Session Demo</h2>
<p>Reload this page. The session ID should not change from one reload to the next</p>
<p><a href='../'>Index</a> | <a href='expire'>Expire</a> | <a href='regen'>Regenerate</a></p>
<table>
<tr><th>Session ID:</th><td>%(sessionid)s<p class='warn'>%(changemsg)s</p></td></tr>
<tr><th>Request Cookie</th><td>%(reqcookie)s</td></tr>
<tr><th>Response Cookie</th><td>%(respcookie)s<p id='respcookiewarn' class='warn'></p></td></tr>
<tr><th>Session Data</th><td>%(sessiondata)s</td></tr>
<tr><th>Server Time</th><td id='stime'>%(servertime)s (Unix time: %(serverunixtime)s)</td></tr>
<tr><th>Browser Time</th><td id='btime'> </td></tr>
<tr><th>Cherrypy Version:</th><td>%(cpversion)s</td></tr>
<tr><th>Python Version:</th><td>%(pyversion)s</td></tr>
</table>
</body></html>
"""
class Root(object):
def page(self):
changemsg = []
if cherrypy.session.id != cherrypy.session.originalid:
if cherrypy.session.originalid is None:
changemsg.append('Created new session because no session id was given.')
if cherrypy.session.missing:
changemsg.append('Created new session due to missing (expired or malicious) session.')
if cherrypy.session.regenerated:
changemsg.append('Application generated a new session.')
try:
expires = cherrypy.response.cookie['session_id']['expires']
except KeyError:
expires = ''
return page % {
'sessionid': cherrypy.session.id,
'changemsg': '<br>'.join(changemsg),
'respcookie': cherrypy.response.cookie.output(),
'reqcookie': cherrypy.request.cookie.output(),
'sessiondata': copyitems(cherrypy.session),
'servertime': datetime.utcnow().strftime("%Y/%m/%d %H:%M") + " UTC",
'serverunixtime': calendar.timegm(datetime.utcnow().timetuple()),
'cpversion': cherrypy.__version__,
'pyversion': sys.version,
'expires': expires,
}
def index(self):
# Must modify data or the session will not be saved.
cherrypy.session['color'] = 'green'
return self.page()
index.exposed = True
def expire(self):
sessions.expire()
return self.page()
expire.exposed = True
def regen(self):
cherrypy.session.regenerate()
# Must modify data or the session will not be saved.
cherrypy.session['color'] = 'yellow'
return self.page()
regen.exposed = True
if __name__ == '__main__':
cherrypy.config.update({
#'environment': 'production',
'log.screen': True,
'tools.sessions.on': True,
})
cherrypy.quickstart(Root())
| isc |
Softmotions/edx-platform | common/test/acceptance/tests/video/test_video_times.py | 123 | 5601 | """
Acceptance tests for Video Times(Start, End and Finish) functionality.
"""
from flaky import flaky
from .test_video_module import VideoBaseTest
import unittest
class VideoTimesTest(VideoBaseTest):
""" Test Video Player Times """
def setUp(self):
super(VideoTimesTest, self).setUp()
def test_video_start_time(self):
"""
Scenario: Start time works for Youtube video
Given we have a video in "Youtube" mode with start_time set to 00:00:10
And I see video slider at "0:10" position
And I click video button "play"
Then video starts playing at or after start_time(00:00:10)
"""
data = {'start_time': '00:00:10'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.assertEqual(self.video.position, '0:10')
self.video.click_player_button('play')
self.assertGreaterEqual(int(self.video.position.split(':')[1]), 10)
def test_video_end_time_with_default_start_time(self):
"""
Scenario: End time works for Youtube video if starts playing from beginning.
Given we have a video in "Youtube" mode with end time set to 00:00:05
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "0:05" position
"""
data = {'end_time': '00:00:05'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertIn(self.video.position, ('0:05', '0:06'))
@flaky # TODO fix this, see TNL-1619
def test_video_end_time_wo_default_start_time(self):
"""
Scenario: End time works for Youtube video if starts playing from between.
Given we have a video in "Youtube" mode with end time set to 00:01:00
And I seek video to "0:55" position
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "1:00" position
"""
data = {'end_time': '00:01:00'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.seek('0:55')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertIn(self.video.position, ('1:00', '1:01'))
def test_video_start_time_and_end_time(self):
"""
Scenario: Start time and end time work together for Youtube video.
Given we a video in "Youtube" mode with start time set to 00:00:10 and end_time set to 00:00:15
And I see video slider at "0:10" position
And I click video button "play"
Then I wait until video stop playing
Then I see video slider at "0:15" position
"""
data = {'start_time': '00:00:10', 'end_time': '00:00:15'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.assertEqual(self.video.position, '0:10')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertIn(self.video.position, ('0:15', '0:16'))
@unittest.skip('This is actually a bug! See TNL-1619')
def test_video_end_time_and_finish_time(self):
"""
Scenario: Youtube video works after pausing at end time and then plays again from End Time to the end.
Given we have a video in "Youtube" mode with start time set to 00:02:10 and end_time set to 00:02:15
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "2:15" position
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "2:20" position
"""
data = {'start_time': '00:02:10', 'end_time': '00:02:15'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertIn(self.video.position, ('2:15', '2:16'))
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('finished')
self.assertEqual(self.video.position, '2:20')
def test_video_end_time_with_seek(self):
"""
Scenario: End Time works for Youtube Video if starts playing before Start Time.
Given we have a video in "Youtube" mode with end-time at 0:35 and start-time at 0:30
And I seek video to "0:28" position
And I click video button "play"
And I wait until video stop playing
Then I see video slider at "0:35" position
"""
data = {'start_time': '00:00:30', 'end_time': '00:00:35'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.seek('0:28')
self.video.click_player_button('play')
# wait until video stop playing
self.video.wait_for_state('pause')
self.assertIn(self.video.position, ('0:35', '0:36'))
| agpl-3.0 |
hakatashi/youtube-dl | youtube_dl/extractor/onet.py | 37 | 9342 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
get_element_by_class,
int_or_none,
js_to_json,
NO_DEFAULT,
parse_iso8601,
remove_start,
strip_or_none,
url_basename,
)
class OnetBaseIE(InfoExtractor):
def _search_mvp_id(self, webpage):
return self._search_regex(
r'id=(["\'])mvp:(?P<id>.+?)\1', webpage, 'mvp id', group='id')
def _extract_from_id(self, video_id, webpage=None):
response = self._download_json(
'http://qi.ckm.onetapi.pl/', video_id,
query={
'body[id]': video_id,
'body[jsonrpc]': '2.0',
'body[method]': 'get_asset_detail',
'body[params][ID_Publikacji]': video_id,
'body[params][Service]': 'www.onet.pl',
'content-type': 'application/jsonp',
'x-onet-app': 'player.front.onetapi.pl',
})
error = response.get('error')
if error:
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, error['message']), expected=True)
video = response['result'].get('0')
formats = []
for _, formats_dict in video['formats'].items():
if not isinstance(formats_dict, dict):
continue
for format_id, format_list in formats_dict.items():
if not isinstance(format_list, list):
continue
for f in format_list:
video_url = f.get('url')
if not video_url:
continue
ext = determine_ext(video_url)
if format_id == 'ism':
formats.extend(self._extract_ism_formats(
video_url, video_id, 'mss', fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
video_url, video_id, mpd_id='dash', fatal=False))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'height': int_or_none(f.get('vertical_resolution')),
'width': int_or_none(f.get('horizontal_resolution')),
'abr': float_or_none(f.get('audio_bitrate')),
'vbr': float_or_none(f.get('video_bitrate')),
})
self._sort_formats(formats)
meta = video.get('meta', {})
title = (self._og_search_title(
webpage, default=None) if webpage else None) or meta['title']
description = (self._og_search_description(
webpage, default=None) if webpage else None) or meta.get('description')
duration = meta.get('length') or meta.get('lenght')
timestamp = parse_iso8601(meta.get('addDate'), ' ')
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
}
class OnetMVPIE(OnetBaseIE):
_VALID_URL = r'onetmvp:(?P<id>\d+\.\d+)'
_TEST = {
'url': 'onetmvp:381027.1509591944',
'only_matching': True,
}
def _real_extract(self, url):
return self._extract_from_id(self._match_id(url))
class OnetIE(OnetBaseIE):
_VALID_URL = r'https?://(?:www\.)?onet\.tv/[a-z]/[a-z]+/(?P<display_id>[0-9a-z-]+)/(?P<id>[0-9a-z]+)'
IE_NAME = 'onet.tv'
_TEST = {
'url': 'http://onet.tv/k/openerfestival/open-er-festival-2016-najdziwniejsze-wymagania-gwiazd/qbpyqc',
'md5': 'e3ffbf47590032ac3f27249204173d50',
'info_dict': {
'id': 'qbpyqc',
'display_id': 'open-er-festival-2016-najdziwniejsze-wymagania-gwiazd',
'ext': 'mp4',
'title': 'Open\'er Festival 2016: najdziwniejsze wymagania gwiazd',
'description': 'Trzy samochody, których nigdy nie użyto, prywatne spa, hotel dekorowany czarnym suknem czy nielegalne używki. Organizatorzy koncertów i festiwali muszą stawać przed nie lada wyzwaniem zapraszając gwia...',
'upload_date': '20160705',
'timestamp': 1467721580,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id, video_id = mobj.group('display_id', 'id')
webpage = self._download_webpage(url, display_id)
mvp_id = self._search_mvp_id(webpage)
info_dict = self._extract_from_id(mvp_id, webpage)
info_dict.update({
'id': video_id,
'display_id': display_id,
})
return info_dict
class OnetChannelIE(OnetBaseIE):
_VALID_URL = r'https?://(?:www\.)?onet\.tv/[a-z]/(?P<id>[a-z]+)(?:[?#]|$)'
IE_NAME = 'onet.tv:channel'
_TEST = {
'url': 'http://onet.tv/k/openerfestival',
'info_dict': {
'id': 'openerfestival',
'title': 'Open\'er Festival Live',
'description': 'Dziękujemy, że oglądaliście transmisje. Zobaczcie nasze relacje i wywiady z artystami.',
},
'playlist_mincount': 46,
}
def _real_extract(self, url):
channel_id = self._match_id(url)
webpage = self._download_webpage(url, channel_id)
current_clip_info = self._parse_json(self._search_regex(
r'var\s+currentClip\s*=\s*({[^}]+})', webpage, 'video info'), channel_id,
transform_source=lambda s: js_to_json(re.sub(r'\'\s*\+\s*\'', '', s)))
video_id = remove_start(current_clip_info['ckmId'], 'mvp:')
video_name = url_basename(current_clip_info['url'])
if self._downloader.params.get('noplaylist'):
self.to_screen(
'Downloading just video %s because of --no-playlist' % video_name)
return self._extract_from_id(video_id, webpage)
self.to_screen(
'Downloading channel %s - add --no-playlist to just download video %s' % (
channel_id, video_name))
matches = re.findall(
r'<a[^>]+href=[\'"](https?://(?:www\.)?onet\.tv/[a-z]/[a-z]+/[0-9a-z-]+/[0-9a-z]+)',
webpage)
entries = [
self.url_result(video_link, OnetIE.ie_key())
for video_link in matches]
channel_title = strip_or_none(get_element_by_class('o_channelName', webpage))
channel_description = strip_or_none(get_element_by_class('o_channelDesc', webpage))
return self.playlist_result(entries, channel_id, channel_title, channel_description)
class OnetPlIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?(?:onet|businessinsider\.com|plejada)\.pl/(?:[^/]+/)+(?P<id>[0-9a-z]+)'
IE_NAME = 'onet.pl'
_TESTS = [{
'url': 'http://eurosport.onet.pl/zimowe/skoki-narciarskie/ziobro-wygral-kwalifikacje-w-pjongczangu/9ckrly',
'md5': 'b94021eb56214c3969380388b6e73cb0',
'info_dict': {
'id': '1561707.1685479',
'ext': 'mp4',
'title': 'Ziobro wygrał kwalifikacje w Pjongczangu',
'description': 'md5:61fb0740084d2d702ea96512a03585b4',
'upload_date': '20170214',
'timestamp': 1487078046,
},
}, {
# embedded via pulsembed
'url': 'http://film.onet.pl/pensjonat-nad-rozlewiskiem-relacja-z-planu-serialu/y428n0',
'info_dict': {
'id': '501235.965429946',
'ext': 'mp4',
'title': '"Pensjonat nad rozlewiskiem": relacja z planu serialu',
'upload_date': '20170622',
'timestamp': 1498159955,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://film.onet.pl/zwiastuny/ghost-in-the-shell-drugi-zwiastun-pl/5q6yl3',
'only_matching': True,
}, {
'url': 'http://moto.onet.pl/jak-wybierane-sa-miejsca-na-fotoradary/6rs04e',
'only_matching': True,
}, {
'url': 'http://businessinsider.com.pl/wideo/scenariusz-na-koniec-swiata-wedlug-nasa/dwnqptk',
'only_matching': True,
}, {
'url': 'http://plejada.pl/weronika-rosati-o-swoim-domniemanym-slubie/n2bq89',
'only_matching': True,
}]
def _search_mvp_id(self, webpage, default=NO_DEFAULT):
return self._search_regex(
r'data-(?:params-)?mvp=["\'](\d+\.\d+)', webpage, 'mvp id',
default=default)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mvp_id = self._search_mvp_id(webpage, default=None)
if not mvp_id:
pulsembed_url = self._search_regex(
r'data-src=(["\'])(?P<url>(?:https?:)?//pulsembed\.eu/.+?)\1',
webpage, 'pulsembed url', group='url')
webpage = self._download_webpage(
pulsembed_url, video_id, 'Downloading pulsembed webpage')
mvp_id = self._search_mvp_id(webpage)
return self.url_result(
'onetmvp:%s' % mvp_id, OnetMVPIE.ie_key(), video_id=mvp_id)
| unlicense |
dotpmrcunha/gnuradio | gr-filter/examples/interpolate.py | 58 | 8816 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq2, 0.5)
self.signal = blocks.add_cc()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = blocks.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = blocks.vector_sink_c()
self.snk2 = blocks.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
sidnarayanan/IntelROCCS | CUADRnT/src/python/cuadrnt/system_management/core/rocker_board.py | 3 | 9413 | #!/usr/bin/env python2.7
"""
File : rocker_board.py
Author : Bjorn Barrefors <bjorn dot peter dot barrefors AT cern dot ch>
Description: Distribute data in system to keep it balanced (like a rocker board)
"""
# system modules
import logging
import sys
import getopt
import datetime
import operator
from logging.handlers import TimedRotatingFileHandler
# package modules
from cuadrnt.utils.utils import weighted_choice
from cuadrnt.utils.utils import timestamp_to_datetime
from cuadrnt.utils.utils import datetime_day
from cuadrnt.utils.config import get_config
from cuadrnt.data_management.services.phedex import PhEDExService
from cuadrnt.data_management.services.mit_db import MITDBService
from cuadrnt.data_management.tools.datasets import DatasetManager
from cuadrnt.data_management.tools.sites import SiteManager
from cuadrnt.data_management.tools.popularity import PopularityManager
from cuadrnt.data_management.core.storage import StorageManager
from cuadrnt.data_analysis.rankings.ranker import Ranker
class RockerBoard(object):
"""
RockerBoard is a system balancing algorithm using popularity metrics to predict popularity
and make appropriate replications to keep the system balanced
"""
def __init__(self, config=dict()):
self.logger = logging.getLogger(__name__)
self.config = config
self.phedex = PhEDExService(self.config)
self.mit_db = MITDBService(self.config)
self.datasets = DatasetManager(self.config)
self.sites = SiteManager(self.config)
self.popularity = PopularityManager(self.config)
self.storage = StorageManager(self.config)
self.rankings = Ranker(self.config)
self.max_gb = int(self.config['rocker_board']['max_gb'])
self.csv_data = list()
def start(self, date=datetime_day(datetime.datetime.utcnow())):
"""
Begin Rocker Board Algorithm
"""
t1 = datetime.datetime.utcnow()
# Get goals
dataset_rankings = self.rankings.get_dataset_rankings(date)
site_rankings = self.rankings.get_site_rankings(date)
self.change_dataset_rankings(dataset_rankings)
subscriptions = self.replicate(dataset_rankings, site_rankings)
self.logger.info('SUBSCRIPTIONS')
for subscription in subscriptions:
self.logger.info('site: %s\tdataset: %s', subscription[1], subscription[0])
# self.subscribe(subscriptions)
t2 = datetime.datetime.utcnow()
td = t2 - t1
self.logger.info('Rocker Board took %s', str(td))
def change_dataset_rankings(self, dataset_rankings):
"""
Change the ranks from being the target number of replicas to being the
change in number of replicas required to reach the goal
"""
current_replicas = self.datasets.get_current_num_replicas()
for dataset in current_replicas:
dataset_rankings[dataset['name']] -= dataset['n_replicas']
def replicate(self, dataset_rankings, site_rankings):
"""
Balance system by creating new replicas based on popularity
"""
subscriptions = list()
subscribed_gb = 0
sites_available_storage_gb = self.sites.get_all_available_storage()
while (subscribed_gb < self.max_gb) and site_rankings:
tmp_site_rankings = dict()
for k, v in site_rankings.items():
tmp_site_rankings[k] = v
dataset = max(dataset_rankings.iteritems(), key=operator.itemgetter(1))
dataset_name = dataset[0]
dataset_rank = dataset[1]
if (not dataset_name) or (dataset_rank < 1):
break
size_gb = self.datasets.get_size(dataset_name)
unavailable_sites = set(self.datasets.get_sites(dataset_name))
for site_name in tmp_site_rankings.keys():
if (self.sites.get_available_storage(site_name) < size_gb) or (tmp_site_rankings[site_name] <= 0):
unavailable_sites.add(site_name)
for site_name in unavailable_sites:
try:
del tmp_site_rankings[site_name]
except:
continue
if not tmp_site_rankings:
del dataset_rankings[dataset_name]
continue
site_name = weighted_choice(tmp_site_rankings)
subscription = (dataset_name, site_name)
subscriptions.append(subscription)
subscribed_gb += size_gb
sites_available_storage_gb[site_name] -= size_gb
self.logger.info('%s : added', dataset_name)
if sites_available_storage_gb[site_name] <= 0:
del site_rankings[site_name]
dataset_rankings[dataset_name] -= 1
self.logger.info('Subscribed %dGB', subscribed_gb)
return subscriptions
def subscribe(self, subscriptions):
"""
Make subscriptions to phedex
subscriptions = [(dataset_name, site_name), ...]
"""
new_subscriptions = dict()
for subscription in subscriptions:
dataset_name = subscription[0]
site_name = subscription[1]
try:
new_subscriptions[site_name].append(dataset_name)
except:
new_subscriptions[site_name] = list()
new_subscriptions[site_name].append(dataset_name)
for site_name, dataset_names in new_subscriptions.items():
data = self.phedex.generate_xml(dataset_names)
comments = 'This dataset is predicted to become popular and has therefore been automatically replicated by cuadrnt'
api = 'subscribe'
params = [('node', site_name), ('data', data), ('level','dataset'), ('move', 'n'), ('custodial', 'n'), ('group', 'AnalysisOps'), ('request_only', 'n'), ('no_mail', 'n'), ('comments', comments)]
json_data = self.phedex.fetch(api=api, params=params, method='post')
# insert into db
group_name = 'AnalysisOps'
request_id = 0
request_type = 0
try:
request = json_data['phedex']
request_id = request['request_created'][0]['id']
request_created = timestamp_to_datetime(request['request_timestamp'])
except:
self.logger.warning('Subscription did not succeed\n\tSite:%s\n\tDatasets: %s', str(site_name), str(dataset_names))
continue
for dataset_name in dataset_names:
coll = 'dataset_rankings'
date = datetime_day(datetime.datetime.utcnow())
pipeline = list()
match = {'$match':{'name':dataset_name, 'date':date}}
pipeline.append(match)
project = {'$project':{'delta_rank':1, '_id':0}}
pipeline.append(project)
data = self.storage.get_data(coll=coll, pipeline=pipeline)
dataset_rank = data[0]['delta_rank']
query = "INSERT INTO Requests(RequestId, RequestType, DatasetId, SiteId, GroupId, Rank, Date) SELECT %s, %s, Datasets.DatasetId, Sites.SiteId, Groups.GroupId, %s, %s FROM Datasets, Sites, Groups WHERE Datasets.DatasetName=%s AND Sites.SiteName=%s AND Groups.GroupName=%s"
values = (request_id, request_type, dataset_rank, request_created, dataset_name, site_name, group_name)
self.mit_db.query(query=query, values=values, cache=False)
def main(argv):
"""
Main driver for Rocker Board Algorithm
"""
log_level = logging.WARNING
config = get_config(path='/var/opt/cuadrnt', file_name='cuadrnt.cfg')
try:
opts, args = getopt.getopt(argv, 'h', ['help', 'log='])
except getopt.GetoptError:
print "usage: rocker_board.py [--log=notset|debug|info|warning|error|critical]"
print " or: rocker_board.py --help"
sys.exit()
for opt, arg in opts:
if opt in ('-h', '--help'):
print "usage: rocker_board.py [--log=notset|debug|info|warning|error|critical]"
print " or: rocker_board.py --help"
sys.exit()
elif opt in ('--log'):
log_level = getattr(logging, arg.upper())
if not isinstance(log_level, int):
print "%s is not a valid log level" % (str(arg))
print "usage: rocker_board.py [--log=notset|debug|info|warning|error|critical]"
print " or: rocker_board.py --help"
sys.exit()
else:
print "usage: rocker_board.py [--log=notset|debug|info|warning|error|critical]"
print " or: rocker_board.py --help"
print "error: option %s not recognized" % (str(opt))
sys.exit()
log_path = config['paths']['log']
log_file = 'rocker_board.log'
file_name = '%s/%s' % (log_path, log_file)
logger = logging.getLogger()
logger.setLevel(log_level)
handler = TimedRotatingFileHandler(file_name, when='midnight', interval=1, backupCount=2)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s:%(funcName)s:%(lineno)d: %(message)s', datefmt='%H:%M')
handler.setFormatter(formatter)
logger.addHandler(handler)
rocker_board = RockerBoard(config)
rocker_board.start()
if __name__ == "__main__":
main(sys.argv[1:])
sys.exit()
| mit |
twain47/osm2pgsql | tests/regression-test.py | 1 | 42765 | #!/usr/bin/env python
import unittest
import psycopg2
import os
from pwd import getpwnam
import subprocess
full_import_file="tests/liechtenstein-2013-08-03.osm.pbf"
multipoly_import_file="tests/test_multipolygon.osm" #This file contains a number of different multi-polygon test cases
diff_import_file="tests/000466354.osc.gz"
diff_multipoly_import_file="tests/test_multipolygon_diff.osc" #This file contains a number of different multi-polygon diff processing test cases
created_tablespace = 0
#****************************************************************
#****************************************************************
sql_test_statements=[
( 0, 'Basic point count', 'SELECT count(*) FROM planet_osm_point;', 1342 ),
( 1, 'Basic line count', 'SELECT count(*) FROM planet_osm_line;', 3300 ),
( 2, 'Basic road count', 'SELECT count(*) FROM planet_osm_roads;', 375 ),
( 3, 'Basic polygon count', 'SELECT count(*) FROM planet_osm_polygon;', 4128 ),
( 4, 'Basic latlon line count', 'SELECT count(*) FROM planet_osm_line;', 3298 ),
( 5, 'Basic latlon road count', 'SELECT count(*) FROM planet_osm_roads;', 374 ),
( 6, 'Basic post-diff point count', 'SELECT count(*) FROM planet_osm_point;', 1457 ),
( 7, 'Basic post-diff line count', 'SELECT count(*) FROM planet_osm_line;', 3344 ),
( 8, 'Basic post-diff road count', 'SELECT count(*) FROM planet_osm_roads;', 381 ),
( 9, 'Basic post-diff polygon count', 'SELECT count(*) FROM planet_osm_polygon;', 4275 ),
( 10, 'Absence of nodes table', 'SELECT count(*) FROM pg_tables WHERE tablename = \'planet_osm_nodes\'', 0),
( 11, 'Absence of way table', 'SELECT count(*) FROM pg_tables WHERE tablename = \'planet_osm_ways\'', 0),
( 12, 'Absence of rel line', 'SELECT count(*) FROM pg_tables WHERE tablename = \'planet_osm_rels\'', 0),
( 13, 'Basic polygon area', 'SELECT round(sum(cast(ST_Area(way) as numeric)),0) FROM planet_osm_polygon;', 1223800814),
( 14, 'Gazetteer place count', 'SELECT count(*) FROM place', 4499),
( 15, 'Gazetteer place node count', 'SELECT count(*) FROM place WHERE osm_type = \'N\'', 779),
( 16, 'Gazetteer place way count', 'SELECT count(*) FROM place WHERE osm_type = \'W\'', 3697),
( 17, 'Gazetteer place rel count', 'SELECT count(*) FROM place WHERE osm_type = \'R\'', 23),
( 18, 'Gazetteer post-diff place count', 'SELECT count(*) FROM place', 4553),
( 19, 'Gazetteer post-diff place node count', 'SELECT count(*) FROM place WHERE osm_type = \'N\'', 788),
( 20, 'Gazetteer post-diff place way count', 'SELECT count(*) FROM place WHERE osm_type = \'W\'', 3742),
( 21, 'Gazetteer post-diff place rel count', 'SELECT count(*) FROM place WHERE osm_type = \'R\'', 23),
( 22, 'Gazetteer housenumber count', 'SELECT count(*) FROM place WHERE housenumber is not null', 199),
( 23, 'Gazetteer post-diff housenumber count count', 'SELECT count(*) FROM place WHERE housenumber is not null', 199),
( 24, 'Gazetteer isin count', 'SELECT count(*) FROM place WHERE isin is not null', 239),
( 25, 'Gazetteer post-diff isin count count', 'SELECT count(*) FROM place WHERE isin is not null', 239),
( 26, 'Multipolygon basic case (Tags from outer way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -15 and landuse = \'residential\' and name = \'Name_way\'', 12894),
( 27, 'Multipolygon basic case (Tags from relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -1 and landuse = \'residential\' and name = \'Name_rel\'', 12895),
( 28, 'Multipolygon named inner - outer (Tags from way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -16 and landuse = \'residential\' and name = \'Name_way2\'', 12895),
( 29, 'Multipolygon named inner - inner way',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = 4 and landuse = \'farmland\' and name = \'Name_way3\'', 3144),
( 30, 'Multipolygon named inner - outer (Tags from relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -8 and landuse = \'residential\' and name = \'Name_rel2\'', 12894),
( 31, 'Multipolygon named inner - inner way',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = 5 and landuse = \'farmland\' and name = \'Name_way4\'', 3144),
( 32, 'Multipolygon named same inner - outer (Tags from way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -17 and landuse = \'residential\' and name = \'Name_way16\'', 12895),
( 33, 'Multipolygon named same inner - inner way absent',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 15', 0),
( 34, 'Multipolygon non-area inner - outer (Tags from relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -14 and landuse = \'residential\' and name = \'Name_way5\'', 12893),
( 35, 'Multipolygon non-area inner - inner (Tags from way)',
'SELECT round(ST_Length(way)) FROM planet_osm_line WHERE osm_id = 6 and highway = \'residential\' and name = \'Name_way6\'', 228),
( 36, 'Multipolygon 2 holes (Tags from way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -18 and landuse = \'residential\' and name = \'Name_way7\'', 11823),
( 37, 'Multipolygon 2 holes (Tags from way)',
'SELECT ST_NumInteriorRing(way) FROM planet_osm_polygon WHERE osm_id = -18 and landuse = \'residential\' and name = \'Name_way7\'', 2),
( 38, 'Multipolygon from multiple outer ways 0 holes (Tags from relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -11 and landuse = \'residential\' and name = \'Name_rel6\'', 11528),
( 39, 'Multipolygon from multiple outer and multiple inner ways 2 holes (Tags from relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -3 and landuse = \'residential\' and name = \'Name_rel11\'', 9286),
( 40, 'Multipolygon 2 holes (Tags from way)',
'SELECT ST_NumInteriorRing(way) FROM planet_osm_polygon WHERE osm_id = -3 and landuse = \'residential\' and name = \'Name_rel11\'', 2),
( 41, 'Multipolygon with touching inner ways 1 hole (Tags from way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -19 and landuse = \'residential\' and name = \'Name_way8\'', 12167),
( 42, 'Multipolygon with touching inner ways 1 hole (Tags from way)',
'SELECT ST_NumInteriorRing(way) FROM planet_osm_polygon WHERE osm_id = -19 and landuse = \'residential\' and name = \'Name_way8\'', 1),
( 43, 'Multipolygon with 2 outer ways (Tags from relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -13 and landuse = \'farmland\' and name = \'Name_rel9\'', 17581),
( 44, 'Multipolygon with 2 outer ways (Tags from relation)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -13 and landuse = \'farmland\' and name = \'Name_rel9\'', 2),
( 45, 'Multipolygon with 2 outer ways (multigeometry)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -13 and landuse = \'farmland\' and name = \'Name_rel9\'', 1),
( 46, 'Multipolygon with 2 outer ways (multigeometry)',
'SELECT ST_NumGeometries(way) FROM planet_osm_polygon WHERE osm_id = -13 and landuse = \'farmland\' and name = \'Name_rel9\'', 2),
( 47, 'Multipolygon nested outer ways. Both outer and inner ways are from multiple ways (Tags from relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -7 and landuse = \'farmland\' and name = \'Name_rel15\'', 16169),
( 48, 'Multipolygon nested outer ways. Both outer and inner ways are from multiple ways (Tags from relation)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -7 and landuse = \'farmland\' and name = \'Name_rel15\'', 2),
( 49, 'Multipolygon nested outer ways. Both outer and inner ways are from multiple ways (multigeometry)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -7 and landuse = \'farmland\' and name = \'Name_rel15\'', 1),
( 50, 'Multipolygon nested outer ways. Both outer and inner ways are from multiple ways (multigeometry)',
'SELECT ST_NumGeometries(way) FROM planet_osm_polygon WHERE osm_id = -7 and landuse = \'farmland\' and name = \'Name_rel15\'', 2),
( 51, 'Basic hstore point count', 'SELECT count(*) FROM planet_osm_point;', 1360 ),
( 52, 'Basic hstore line count', 'SELECT count(*) FROM planet_osm_line;', 3323 ),
( 53, 'Basic hstore road count', 'SELECT count(*) FROM planet_osm_roads;', 375 ),
( 54, 'Basic hstore polygon count', 'SELECT count(*) FROM planet_osm_polygon;', 4128 ),
( 55, 'Basic post-diff point count', 'SELECT count(*) FROM planet_osm_point;', 1475 ),
( 56, 'Basic post-diff line count', 'SELECT count(*) FROM planet_osm_line;', 3367 ),
( 57, 'Basic post-diff road count', 'SELECT count(*) FROM planet_osm_roads;', 381 ),
( 58, 'Basic post-diff polygon count', 'SELECT count(*) FROM planet_osm_polygon;', 4275 ),
( 59, 'Extra hstore full tags point count',
'SELECT count(*) FROM planet_osm_point WHERE tags ? \'osm_user\' and tags ? \'osm_version\' and tags ? \'osm_uid\' and tags ? \'osm_changeset\'', 1360),
( 60, 'Extra hstore full tags line count',
'SELECT count(*) FROM planet_osm_line WHERE tags ? \'osm_user\' and tags ? \'osm_version\' and tags ? \'osm_uid\' and tags ? \'osm_changeset\'', 3323),
( 61, 'Extra hstore full tags polygon count',
'SELECT count(*) FROM planet_osm_polygon WHERE tags ? \'osm_user\' and tags ? \'osm_version\' and tags ? \'osm_uid\' and tags ? \'osm_changeset\'', 4128),
( 62, 'Multipolygon copying of tags from outer with extra tags on relation',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -22', 20879),
( 63, 'Multipolygon copying of tags from outer with extra tags on relation (abscence of way)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 84', 0),
( 64, 'Multipolygon non copying of tags from outer with polygon tags on relation',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -24 and "natural" = \'water\'', 18501),
( 65, 'Multipolygon non copying of tags from outer with polygon tags on relation (presence of way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = 83 and "landuse" = \'farmland\'', 24859),
( 66, 'Multipolygon diff moved point of outer way case (Tags from outer way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -15 and landuse = \'residential\' and name = \'Name_way\'', 24751),
( 67, 'Multipolygon diff moved point of inner way case (Tags from relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -1 and landuse = \'residential\' and name = \'Name_rel\'', 13949),
( 68, 'Multipolygon point of inner way case (Tags from relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -25 and landuse = \'farmland\' and name = \'my name\'', 23886),
( 69, 'Multipolygon point of inner way case (Tags from relation)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 90', 0),
( 70, 'Multipolygon diff remove relation (tagged outer way gets re added)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = 90 and landuse = \'farmland\'', 32626),
( 71, 'Multipolygon diff remove relation',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -25', 0),
( 72, 'Multipolygon tags on both inner and outer (presence of relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -34 and "natural" = \'water\'', 15246),
( 73, 'Multipolygon tags on both inner and outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 113', 0),
( 74, 'Multipolygon tags on both inner and outer (abscence of inner)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 118', 0),
( 75, 'Multipolygon tags on both inner and outer diff change outer (presence of relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -34 and "landuse" = \'farmland\'', 15246),
( 76, 'Multipolygon tags on both inner and outer diff change outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 113', 0),
( 77, 'Multipolygon tags on both inner and outer diff change on outer (creation of inner)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = 118 and "natural" = \'water\'', 1234),
( 78, 'Multipolygon tags on outer (presence of relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -33 and "natural" = \'water\'', 15612),
( 79, 'Multipolygon tags on outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 114', 0),
( 80, 'Multipolygon tags on outer change of way tags (presence of relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -33 and "landuse" = \'cemetery\'', 15612),
( 81, 'Multipolygon tags on outer (abscence of old relation)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -33 and "natural" = \'water\'', 0),
( 82, 'Multipolygon tags on relation two outer (presence of relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -29 and "natural" = \'water\'', 68492),
( 83, 'Multipolygon tags on relation two outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 109', 0),
( 84, 'Multipolygon tags on relation two outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 104', 0),
( 85, 'Multipolygon tags on relation two outer diff delete way (presence of relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -29 and "natural" = \'water\'', 29154),
( 86, 'Multipolygon tags on relation two outer (presence of relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -35 and "natural" = \'water\'', 28730),
( 87, 'Multipolygon tags on relation two outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 107', 0),
( 88, 'Multipolygon tags on relation two outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 102', 0),
( 89, 'Multipolygon tags on relation two outer diff remove way from relation (presence of relation)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = -35 and "natural" = \'water\'', 15736),
( 90, 'Multipolygon tags on relation two outer diff remove way from relation (presence of single way)',
'SELECT round(ST_Area(way)) FROM planet_osm_polygon WHERE osm_id = 102 and "natural" = \'water\'', 12994),
( 91, 'Basic line length', 'SELECT round(sum(ST_Length(way))) FROM planet_osm_line;', 4269394),
( 92, 'Basic line length', 'SELECT round(sum(ST_Length(way))) FROM planet_osm_roads;', 2032023),
( 93, 'Basic number of hstore points tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_point;', 4228),
( 94, 'Basic number of hstore roads tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_roads;', 2316),
( 95, 'Basic number of hstore lines tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_line;', 10897),
( 96, 'Basic number of hstore polygons tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_polygon;', 9540),
( 97, 'Diff import number of hstore points tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_point;', 4352),
( 98, 'Diff import number of hstore roads tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_roads;', 2340),
( 99, 'Diff import number of hstore lines tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_line;', 11020),
( 100, 'Diff import number of hstore polygons tags', 'SELECT sum(array_length(akeys(tags),1)) FROM planet_osm_polygon;', 9834),
#**** Tests to check if inner polygon appears when outer tags change after initially identicall inner and outer way tags in a multi-polygon ****
#**** These tests are currently broken and noted in trac ticket #2853 ****
( 101, 'Multipolygon identical tags on inner and outer (presence of relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -31 and "natural" = \'heath\'', 32702),
( 102, 'Multipolygon identical tags on inner and outer (abscence of outer)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 120', 0),
( 103, 'Multipolygon identical tags on inner and outer (abscence of inner)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 112', 0),
( 104, 'Multipolygon identical tags on inner and outer (presence of relation), post diff',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -31 and "natural" = \'water\'', 32702),
( 105, 'Multipolygon identical tags on inner and outer (presece of inner)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = 112 and "natural" = \'heath\'', 1234),
#**** Test to check that only polygon tags that are present on all outer ways get copied over to the multi-polygon relation ****
( 106, 'Multipolygon copy outer tags (presence of relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -38 and "natural" = \'water\'', 29340),
( 107, 'Multipolygon copy outer tags (absence of partial outer tags)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -38 and "natural" = \'water\' and "man_made" = \'pier\'', 0),
( 108, 'Multipolygon copy outer tags (absence of multi-polygon tagged outer way)',
'SELECT count(*) FROM planet_osm_line WHERE osm_id = 134 OR osm_id = 133', 0),
( 109, 'Multipolygon copy outer tags (presence of additionally tagged outer way)',
'SELECT round(sum(ST_length(way))) FROM planet_osm_line WHERE (osm_id = 136 OR osm_id = 132) AND "man_made" = \'pier\'', 407),
( 110, 'Multipolygon copy outer tags (presence of relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -37 and "natural" = \'water\'', 29952),
( 111, 'Multipolygon copy outer tags (absence of partial outer tags)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = -37 and "natural" = \'water\' and "man_made" = \'pier\'', 0),
( 112, 'Multipolygon copy outer tags (absence of multi-polygon tagged outer way)',
'SELECT count(*) FROM planet_osm_line WHERE osm_id = 128 OR osm_id = 125', 0),
( 113, 'Multipolygon copy outer tags (presence of additionally tagged outer way)',
'SELECT round(sum(ST_length(way))) FROM planet_osm_line WHERE (osm_id = 126 OR osm_id = 124) AND "man_made" = \'pier\'', 276),
( 114, 'Multipolygon copy outer tags (absence of multi-polygon tagged inner way)',
'SELECT count(*) FROM planet_osm_line WHERE osm_id = 123 OR osm_id = 121', 0),
( 115, 'Multipolygon copy outer tags (presence of additionally tagged inner way)',
'SELECT round(sum(ST_length(way))) FROM planet_osm_line WHERE (osm_id = 127 OR osm_id = 122) AND "man_made" = \'pier\'', 318),
#**** Test to check that if polygon tags are on both outer ways and relation, polygons don't get duplicated in the db ****
( 116, 'Multipolygon tags on both outer and relation (presence of relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -39 and "landuse" = \'forest\'', 10379),
( 117, 'Multipolygon tags on both outer and relation (absence of outer way)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 138', 0),
( 118, 'Multipolygon tags on both outer and relation with additional tags on relation (presence of relation)',
'SELECT round(sum(ST_Area(way))) FROM planet_osm_polygon WHERE osm_id = -40 and "landuse" = \'forest\'', 12397),
( 119, 'Multipolygon tags on both outer and relation with additional tags on relation (absence of outer way)',
'SELECT count(*) FROM planet_osm_polygon WHERE osm_id = 140', 0),
]
#****************************************************************
#****************************************************************
class NonSlimRenderingTestSuite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self,map(ThirdTestCase,
("testOne",
"testTwo")))
self.addTest(BasicNonSlimTestCase("basic case",[], [0,1,2,3,10,13, 91, 92]))
self.addTest(BasicNonSlimTestCase("slim --drop case",["--slim","--drop"], [0,1,2,3, 10, 11, 12, 13, 91, 92]))
self.addTest(BasicNonSlimTestCase("Hstore index drop", ["--slim", "--hstore", "--hstore-add-index", "--drop"], [51,52,53,54]))
self.addTest(BasicNonSlimTestCase("lat lon projection",["-l"], [0,4,5,3,10, 11, 12]))
#Failing test 3,13 due to difference in handling mixture of tags on ways and relations, where the correct behaviour is non obvious
#self.addTest(BasicNonSlimTestCase("--tag-transform-script", ["--tag-transform-script", "style.lua"], [0,1,2,3,10,13,91,92]))
self.addTest(BasicNonSlimTestCase("--tag-transform-script", ["--tag-transform-script", "style.lua"], [0,1,2,10,91,92]))
class SlimRenderingTestSuite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self,map(ThirdTestCase,
("testOne",
"testTwo")))
self.addTest(BasicSlimTestCase("basic case", [], [0,1,2,3,13, 91, 92],[6,7,8,9]))
self.addTest(BasicSlimTestCase("Parallel processing", ["--number-processes", "8", "-C100"], [0,1,2,3,13,91,92],[6,7,8,9]))
self.addTest(BasicSlimTestCase("Parallel processing with non 100% node-cache", ["--number-processes", "8", "-C1", "--cache-strategy=dense"], [0,1,2,3,13,91,92],[6,7,8,9]))
self.addTest(BasicSlimTestCase("Parallel processing with disabled node-cache", ["-C0"], [0,1,2,3,13,91,92],[6,7,8,9]))
# Failes to do correct error checking. This needs fixing in osm2pgsql
# self.addTest(BasicSlimTestCase("Parallel processing with failing database conneciton (connection limit exceeded)", ["--number-processes", "32", "-C100"], [0,1,2,3],[6,7,8,9]))
# Counts are expected to be different in hstore, needs adjusted tests
self.addTest(BasicSlimTestCase("Hstore match only", ["-k", "--hstore-match-only"], [0,1,2,3],[6,7,8,9]))
self.addTest(BasicSlimTestCase("Hstore name column", ["-z", "name:"], [0,1,2,3],[6,7,8,9]))
self.addTest(BasicSlimTestCase("Hstore", ["-k"], [51,52,53,54],[55,56,57,58]))
self.addTest(BasicSlimTestCase("Hstore all", ["-j"], [51,52,53,54,93,94,95,96],[55,56,57,58, 97, 98, 99, 100]))
self.addTest(BasicSlimTestCase("Hstore index", ["--hstore", "--hstore-add-index"], [51,52,53,54],[55,56,57,58]))
#tests dont check for osm_timestamp which is currently missing in the pbf parser
self.addTest(BasicSlimTestCase("Extra tags hstore match only", ["-x", "-k", "--hstore-match-only"], [0,1,2,3],[6,7,8,9]))
self.addTest(BasicSlimTestCase("Extra tags hstore all", ["-j", "-x"], [51,52,53,54,59,60,61],[55,56,57,58]))
self.addTest(BasicSlimTestCase("--tablespace-main-data", ["--tablespace-main-data", "tablespacetest"], [0,1,2,3,13,91,92],[6,7,8,9]))
self.addTest(BasicSlimTestCase("--tablespace-main-index", ["--tablespace-main-index", "tablespacetest"], [0,1,2,3,13,91,92],[6,7,8,9]))
self.addTest(BasicSlimTestCase("--tablespace-slim-data", ["--tablespace-slim-data", "tablespacetest"], [0,1,2,3,13,91,92],[6,7,8,9]))
self.addTest(BasicSlimTestCase("--tablespace-slim-index", ["--tablespace-slim-index", "tablespacetest"], [0,1,2,3,13,91,92],[6,7,8,9]))
#Failing test 3,13,9 due to difference in handling mixture of tags on ways and relations, where the correct behaviour is non obvious
#self.addTest(BasicNonSlimTestCase("--tag-transform-script", ["--tag-transform-script", "style.lua"], [0,1,2,3,10,13,91,92]))
self.addTest(BasicSlimTestCase("--tag-transform-script", ["--tag-transform-script", "style.lua"], [0,1,2,91,92],[6,7,8]))
class SlimGazetteerTestSuite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self,map(ThirdTestCase,
("testOne",
"testTwo")))
self.addTest(BasicGazetteerTestCase("basic case", [], [14,15,16,17,22,24],[18,19,20,21,23,25]))
class MultiPolygonSlimRenderingTestSuite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self,map(ThirdTestCase,
("testOne",
"testTwo")))
#Case 77 currently doesn't work
self.addTest(MultipolygonSlimTestCase("basic case", [],
[26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 68, 69, 72, 73, 74, 78, 79, 82, 83, 84, 86, 87, 88,
106,107,108,109,110,111,112,113,114,115,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 66, 67, 70, 71, 75, 76, 79, 80, 81, 83, 84, 85, 87, 89, 90]))
self.addTest(MultipolygonSlimTestCase("multi geometry", ["-G"],
[26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 45, 46, 47, 49, 50, 62, 63, 64, 65, 68, 69, 72, 73, 74, 78, 79, 82, 83, 84, 86, 87, 88,
106,107,108,109,110,111,112,113,114,115,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 45, 46, 47, 49, 50, 62, 63, 64, 65, 66, 67, 70, 71, 75, 76, 79, 80, 81, 83, 84, 85, 87, 89, 90]))
self.addTest(MultipolygonSlimTestCase("hstore case", ["-k"],
[26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,47,48,62,63,64,65,68,69, 72, 73, 74, 78, 79, 82, 83, 84, 86, 87, 88,
106,107,108,109,110,111,112,113,114,115,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 66, 67, 70, 71, 75, 76, 79, 80, 81, 83, 84, 85, 87, 89, 90]))
self.addTest(MultipolygonSlimTestCase("hstore case", ["-k", "--hstore-match-only"],
[26,27,28,29,30,31,32,33,34,35,36,37,38, 39, 40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 68, 69, 72, 73, 74, 78, 79, 82, 83, 84, 86, 87, 88,
106,107,108,109,110,111,112,113,114,115,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 66, 67, 70, 71, 75, 76, 79, 80, 81, 83, 84, 85, 87, 89, 90]))
self.addTest(MultipolygonSlimTestCase("Extra tags hstore match only", ["-x", "-k", "--hstore-match-only"],
[26,27,28,29,30,31,32,33,34,35,36,37,38, 39, 40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 68, 69, 72, 73, 74, 78, 79, 82, 83, 84, 86, 87, 88,
106,107,108,109,110,111,112,113,114,115,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 66, 67, 70, 71, 75, 76, 79, 80, 81, 83, 84, 85, 87, 89, 90]))
self.addTest(MultipolygonSlimTestCase("Extra tags hstore match only", ["-x", "-j"],
[26,27,28,29,30,31,32,33,34,35,36,37,38, 39, 40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 68, 69, 72, 73, 74, 78, 79, 82, 83, 84, 86, 87, 88,
106,107,108,109,110,111,112,113,114,115,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 44, 47, 48, 62, 63, 64, 65, 66, 67, 70, 71, 75, 76, 79, 80, 81, 83, 84, 85, 87, 89, 90]))
self.addTest(MultipolygonSlimTestCase("lua tagtransform case", ["--tag-transform-script", "style.lua"],
[26,27,28,29,30,31,32,33,34,35,36,37,38, 39, 40, 41, 42, 43, 44, 47, 48, 62, 64, 65,68,69, 72, 73, 74, 78, 79, 82, 83, 84, 86, 87, 88,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42, 43, 44, 47, 48, 62, 63,64, 65, 66, 67, 70, 71, 75, 76, 79, 80, 81, 83, 84, 85, 87, 89, 90]))
self.addTest(MultipolygonSlimTestCase("lua tagtransform case with hstore", ["--tag-transform-script", "style.lua", "-k"],
[26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,47,48,62,63,64,65,68,69,72,73,74,78,79,82,83,84,86,87,88,116,117,118,119],
[28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,47,48,62,63,64,65,66,67,70,71,75,76,79,80,81,83,84,85,87,89,90]))
class CompleteTestSuite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self, map(ThirdTestCase,
("testOne",
"testTwo")))
self.addTest(NonSlimRenderingTestSuite())
self.addTest(SlimRenderingTestSuite())
self.addTest(MultiPolygonSlimRenderingTestSuite())
self.addTest(SlimGazetteerTestSuite())
#****************************************************************
class ThirdTestCase(unittest.TestCase):
def testOne(self):
assert 1 == 1
def testTwo(self):
assert 2 == 2
#****************************************************************
class BaseTestCase(unittest.TestCase):
def dbConnect(self):
try:
self.conn=psycopg2.connect("dbname='osm2pgsql-test'")
self.conn.autocommit = True
self.cur = self.conn.cursor()
except Exception, e:
print "I am unable to connect to the database." + e
def dbClose(self):
self.cur.close()
self.conn.close()
def executeStatements(self, seq):
print "*********************************"
self.dbConnect()
try:
for i in seq:
self.assertEqual(sql_test_statements[i][0], i, "test case numbers don't match up: " + str(i) + " =/=" + str(sql_test_statements[i][0]))
try:
self.cur.execute(sql_test_statements[i][2])
res = self.cur.fetchall()
except Exception, e:
self.assertEqual(0, 1, str(sql_test_statements[i][0]) + ": Failed to execute " + sql_test_statements[i][1] +
" (" + sql_test_statements[i][2] + ") {" + str(self.parameters) +"}")
if (res == None):
self.assertEqual(0, 1, str(sql_test_statements[i][0]) + ": Sql statement returned no results: " +
sql_test_statements[i][1] + " (" + sql_test_statements[i][2] + ") {" + str(self.parameters) +"}")
self.assertEqual(len(res), 1, str(sql_test_statements[i][0]) + ": Sql statement returned more than one result: " +
str(res) + " -- " + sql_test_statements[i][1] + " (" + sql_test_statements[i][2] + ") {" + str(self.parameters) +"}")
self.assertEqual( res[0][0], sql_test_statements[i][3],
str(sql_test_statements[i][0]) + ": Failed " + sql_test_statements[i][1] + ", expected " + str(sql_test_statements[i][3]) + " but was " + str(res[0][0]) +
" (" + sql_test_statements[i][2] + ") {" + str(self.parameters) +"}")
finally:
self.dbClose()
#****************************************************************
class BaseNonSlimTestCase(BaseTestCase):
def setUpGeneric(self, parameters, file):
proc = subprocess.Popen(["./osm2pgsql", "-Sdefault.style", "-dosm2pgsql-test", "-C100"] + parameters + [full_import_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outp, outerr) = proc.communicate()
self.assertEqual (proc.returncode, 0, "Execution of osm2pgsql with options: '%s' failed:\n%s\n%s\n" % (str(parameters), outp, outerr))
class BaseSlimTestCase(BaseTestCase):
def setUpGeneric(self, parameters, file):
proc = subprocess.Popen(["./osm2pgsql", "--slim", "-Sdefault.style", "-dosm2pgsql-test", "-C100"] + parameters + [file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outp, outerr) = proc.communicate()
self.assertEqual (proc.returncode, 0, "Execution of osm2pgsql --slim with options: '%s' failed:\n%s\n%s\n" % (str(parameters), outp, outerr))
def updateGeneric(self, parameters, file):
proc = subprocess.Popen(["./osm2pgsql", "--slim", "--append", "-Sdefault.style", "-dosm2pgsql-test", "-C100"] + parameters + [file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outp, outerr) = proc.communicate()
self.assertEqual (proc.returncode, 0, "Execution of osm2pgsql --slim --append with options: '%s' failed:\n%s\n%s\n" % (str(parameters), outp, outerr))
class BaseGazetteerTestCase(BaseTestCase):
def setUpGeneric(self, parameters, file):
proc = subprocess.Popen(["./osm2pgsql", "--slim", "-Ogazetteer", "-Sdefault.style", "-dosm2pgsql-test"] + parameters + [file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outp, outerr) = proc.communicate()
self.assertEqual (proc.returncode, 0, "Execution of osm2pgsql --slim gazetteer options: '%s' failed:\n%s\n%s\n" % (str(parameters), outp, outerr))
def updateGeneric(self, parameters, file):
proc = subprocess.Popen(["./osm2pgsql", "--slim", "-Ogazetteer", "--append", "-Sdefault.style", "-dosm2pgsql-test"] + parameters + [file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outp, outerr) = proc.communicate()
self.assertEqual (proc.returncode, 0, "Execution of osm2pgsql --slim --append gazetteer options: '%s' failed:\n%s\n%s\n" % (str(parameters), outp, outerr))
#****************************************************************
class BasicNonSlimTestCase(BaseNonSlimTestCase):
def __init__(self, name, parameters, initialStatements):
BaseNonSlimTestCase.__init__(self)
self.name = name
self.parameters = parameters
self.initialStatements = initialStatements
def setUp(self):
self.setUpGeneric(self.parameters, full_import_file)
def runTest(self):
print "****************************************"
print "Running initial import for " + self.name
self.executeStatements(self.initialStatements)
class BasicSlimTestCase(BaseSlimTestCase):
def __init__(self, name, parameters, initialStatements, postDiffStatements):
BaseSlimTestCase.__init__(self)
self.name = name
self.parameters = parameters
self.initialStatements = initialStatements
self.postDiffStatements = postDiffStatements
def setUp(self):
self.setUpGeneric(self.parameters, full_import_file)
def runTest(self):
print "****************************************"
print "Running initial import for " + self.name
self.executeStatements(self.initialStatements)
print "Running diff-import for " + self.name
self.updateGeneric(self.parameters, diff_import_file)
self.executeStatements(self.postDiffStatements)
class MultipolygonSlimTestCase(BaseSlimTestCase):
def __init__(self, name, parameters, initialStatements, postDiffStatements):
BaseSlimTestCase.__init__(self)
self.name = name
self.parameters = parameters
self.initialStatements = initialStatements
self.postDiffStatements = postDiffStatements
def setUp(self):
self.setUpGeneric(self.parameters, multipoly_import_file)
def runTest(self):
print "****************************************"
print "Running initial import for " + self.name
self.executeStatements(self.initialStatements)
print "Running diff-import for " + self.name
self.updateGeneric(self.parameters, diff_multipoly_import_file)
self.executeStatements(self.postDiffStatements)
class BasicGazetteerTestCase(BaseGazetteerTestCase):
def __init__(self, name, parameters, initialStatements, postDiffStatements):
BaseGazetteerTestCase.__init__(self)
self.name = name
self.parameters = parameters
self.initialStatements = initialStatements
self.postDiffStatements = postDiffStatements
def setUp(self):
self.setUpGeneric(self.parameters, full_import_file)
def runTest(self):
print "****************************************"
print "Running initial import in gazetteer mode for " + self.name
self.executeStatements(self.initialStatements)
print "Running diff-import in gazetteer mode for " + self.name
self.updateGeneric(self.parameters, diff_import_file)
self.executeStatements(self.postDiffStatements)
#****************************************************************
#****************************************************************
def setupDB():
print "Setting up test database"
try:
gen_conn=psycopg2.connect("dbname='template1'")
gen_conn.autocommit = True
except Exception, e:
print "I am unable to connect to the database."
exit()
try:
gen_cur = gen_conn.cursor()
except Exception, e:
gen_conn.close()
print "I am unable to connect to the database."
exit()
try:
gen_cur.execute("""DROP DATABASE IF EXISTS \"osm2pgsql-test\"""")
gen_cur.execute("""CREATE DATABASE \"osm2pgsql-test\" WITH ENCODING 'UTF8'""")
except Exception, e:
print "Failed to create osm2pgsql-test db" + e.pgerror
exit();
finally:
gen_cur.close()
gen_conn.close()
try:
test_conn=psycopg2.connect("dbname='osm2pgsql-test'")
test_conn.autocommit = True
except Exception, e:
print "I am unable to connect to the database." + e
exit()
try:
test_cur = test_conn.cursor()
except Exception, e:
print "I am unable to connect to the database." + e
gen_conn.close()
exit()
try:
try:
global created_tablespace
test_cur.execute("""SELECT spcname FROM pg_tablespace WHERE spcname = 'tablespacetest'""")
if test_cur.fetchone():
print "We already have a tablespace, can use that"
created_tablespace = 0
else:
print "For the test, we need to create a tablespace. This needs root privileges"
created_tablespace = 1
### This makes postgresql read from /tmp
## Does this have security implications like opening this to a possible symlink attack?
try:
os.mkdir("/tmp/psql-tablespace")
returncode = subprocess.call(["/usr/bin/sudo", "/bin/chown", "postgres.postgres", "/tmp/psql-tablespace"])
test_cur.execute("""CREATE TABLESPACE tablespacetest LOCATION '/tmp/psql-tablespace'""")
except Exception, e:
os.rmdir("/tmp/psql-tablespace")
self.assertEqual(0, 1, "Failed to create tablespace")
except Exception, e:
print "Failed to create directory for tablespace" + str(e)
try:
test_cur.execute("""CREATE EXTENSION postgis;""")
except:
test_conn.rollback()
# Guess the directory from the postgres version.
# TODO: make the postgisdir configurable. Probably
# only works on Debian-based distributions at the moment.
postgisdir = ('/usr/share/postgresql/%d.%d/contrib' %
(test_conn.server_version / 10000, (test_conn.server_version / 100) % 100))
for fl in os.listdir(postgisdir):
if fl.startswith('postgis'):
newdir = os.path.join(postgisdir, fl)
if os.path.isdir(newdir):
postgisdir = newdir
break
else:
raise Exception('Cannot find postgis directory.')
pgscript = open(os.path.join(postgisdir, 'postgis.sql'),'r').read()
test_cur.execute(pgscript)
pgscript = open(os.path.join(postgisdir, 'spatial_ref_sys.sql'), 'r').read()
test_cur.execute(pgscript)
try:
test_cur.execute("""CREATE EXTENSION hstore;""")
except Exception, e:
print "I am unable to create extensions: " + e.pgerror
exit()
finally:
test_cur.close()
test_conn.close()
def tearDownDB():
print "Cleaning up test database"
try:
gen_conn=psycopg2.connect("dbname='template1'")
gen_conn.autocommit = True
gen_cur = gen_conn.cursor()
except Exception, e:
print "I am unable to connect to the database."
exit()
try:
gen_cur.execute("""DROP DATABASE IF EXISTS \"osm2pgsql-test\"""")
if (created_tablespace == 1):
gen_cur.execute("""DROP TABLESPACE IF EXISTS \"tablespacetest\"""")
except Exception, e:
print "Failed to clean up osm2pgsql-test db" + e.pgerror
exit();
gen_cur.close()
gen_conn.close()
if (created_tablespace == 1):
returncode = subprocess.call(["/usr/bin/sudo", "/bin/rmdir", "/tmp/psql-tablespace"])
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", dest="osm_file", action="store", metavar="FILE",
default=full_import_file,
help="Import a specific osm file [default=%default]")
(options, args) = parser.parse_args()
if options.osm_file:
full_import_file = options.osm_file
ts2 = CompleteTestSuite()
try:
setupDB()
runner = unittest.TextTestRunner()
runner.run(ts2)
finally:
tearDownDB()
| gpl-2.0 |
liuzzfnst/tp-libvirt | libvirt/tests/src/virtual_disks/at_dt_iscsi_disk.py | 3 | 11876 | import os
import re
import base64
import logging
from autotest.client.shared import error
from virttest import virsh
from virttest.remote import LoginError
from virttest.virt_vm import VMError
from virttest.aexpect import ShellError
from virttest.utils_test import libvirt
from virttest.libvirt_xml import vm_xml
from virttest.libvirt_xml import pool_xml
from virttest.libvirt_xml.secret_xml import SecretXML
from provider import libvirt_version
def run(test, params, env):
"""
Attach/Detach an iscsi network/volume disk to domain
1. For secret usage testing:
1.1. Setup an iscsi target with CHAP authentication.
1.2. Define a secret for iscsi target usage
1.3. Set secret value
2. Create
4. Create an iscsi network disk XML
5. Attach disk with the XML file and check the disk inside the VM
6. Detach the disk
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
disk_device = params.get("disk_device", "disk")
disk_type = params.get("disk_type", "network")
disk_src_protocol = params.get("disk_source_protocol", "iscsi")
disk_src_host = params.get("disk_source_host", "127.0.0.1")
disk_src_port = params.get("disk_source_port", "3260")
disk_src_pool = params.get("disk_source_pool")
disk_src_mode = params.get("disk_source_mode", "host")
pool_type = params.get("pool_type", "iscsi")
pool_src_host = params.get("pool_source_host", "127.0.0.1")
disk_target = params.get("disk_target", "vdb")
disk_target_bus = params.get("disk_target_bus", "virtio")
disk_readonly = params.get("disk_readonly", "no")
chap_auth = "yes" == params.get("chap_auth", "no")
chap_user = params.get("chap_username", "")
chap_passwd = params.get("chap_password", "")
secret_usage_target = params.get("secret_usage_target")
secret_ephemeral = params.get("secret_ephemeral", "no")
secret_private = params.get("secret_private", "yes")
status_error = "yes" == params.get("status_error", "no")
if disk_src_protocol == 'iscsi':
if not libvirt_version.version_compare(1, 0, 4):
raise error.TestNAError("'iscsi' disk doesn't support in"
" current libvirt version.")
if disk_type == "volume":
if not libvirt_version.version_compare(1, 0, 5):
raise error.TestNAError("'volume' type disk doesn't support in"
" current libvirt version.")
# Back VM XML
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
virsh_dargs = {'debug': True, 'ignore_status': True}
try:
if chap_auth:
# Create a secret xml to define it
secret_xml = SecretXML(secret_ephemeral, secret_private)
secret_xml.auth_type = "chap"
secret_xml.auth_username = chap_user
secret_xml.usage = disk_src_protocol
secret_xml.target = secret_usage_target
logging.debug("Define secret by XML: %s", open(secret_xml.xml).read())
# Define secret
cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
# Get secret uuid
try:
secret_uuid = cmd_result.stdout.strip().split()[1]
except IndexError:
raise error.TestError("Fail to get new created secret uuid")
# Set secret value
secret_string = base64.b64encode(chap_passwd)
cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
else:
# Set chap_user and chap_passwd to empty to avoid setup
# CHAP authentication when export iscsi target
chap_user = ""
chap_passwd = ""
# Setup iscsi target
iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
is_login=False,
chap_user=chap_user,
chap_passwd=chap_passwd,
portal_ip=disk_src_host)
# Create iscsi pool
if disk_type == "volume":
# Create an iscsi pool xml to create it
pool_src_xml = pool_xml.SourceXML()
pool_src_xml.host_name = pool_src_host
pool_src_xml.device_path = iscsi_target
poolxml = pool_xml.PoolXML(pool_type=pool_type)
poolxml.name = disk_src_pool
poolxml.set_source(pool_src_xml)
poolxml.target_path = "/dev/disk/by-path"
# Create iscsi pool
cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
# Get volume name
cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
try:
vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
str(cmd_result.stdout))[1][0]
except IndexError:
raise error.TestError("Fail to get volume name")
# Create iscsi network disk XML
disk_params = {'device_type': disk_device,
'type_name': disk_type,
'target_dev': disk_target,
'target_bus': disk_target_bus,
'readonly': disk_readonly}
disk_params_src = {}
if disk_type == "network":
disk_params_src = {'source_protocol': disk_src_protocol,
'source_name': iscsi_target + "/%s" % lun_num,
'source_host_name': disk_src_host,
'source_host_port': disk_src_port}
elif disk_type == "volume":
disk_params_src = {'source_pool': disk_src_pool,
'source_volume': vol_name,
'source_mode': disk_src_mode}
else:
error.TestNAError("Unsupport disk type in this test")
disk_params.update(disk_params_src)
if chap_auth:
disk_params_auth = {'auth_user': chap_user,
'secret_type': disk_src_protocol,
'secret_usage': secret_xml.target}
disk_params.update(disk_params_auth)
disk_xml = libvirt.create_disk_xml(disk_params)
start_vm = "yes" == params.get("start_vm", "yes")
if start_vm:
if vm.is_dead():
vm.start()
else:
if not vm.is_dead():
vm.destroy()
attach_option = params.get("attach_option", "")
disk_xml_f = open(disk_xml)
disk_xml_content = disk_xml_f.read()
disk_xml_f.close()
logging.debug("Attach disk by XML: %s", disk_xml_content)
cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
flagstr=attach_option,
dargs=virsh_dargs)
libvirt.check_exit_status(cmd_result, status_error)
if vm.is_dead():
cmd_result = virsh.start(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
# Wait for domain is stable
vm.wait_for_login().close()
domain_operation = params.get("domain_operation", "")
if domain_operation == "save":
save_file = os.path.join(test.tmpdir, "vm.save")
cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.restore(save_file)
libvirt.check_exit_status(cmd_result)
if os.path.exists(save_file):
os.remove(save_file)
elif domain_operation == "snapshot":
# Run snapshot related commands: snapshot-create-as, snapshot-list
# snapshot-info, snapshot-dumpxml, snapshot-create
snapshot_name1 = "snap1"
snapshot_name2 = "snap2"
cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
try:
virsh.snapshot_list(vm_name, **virsh_dargs)
except error.CmdError:
error.TestFail("Failed getting snapshots list for %s", vm_name)
try:
virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
except error.CmdError:
error.TestFail("Failed getting snapshots info for %s", vm_name)
cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
**virsh_dargs)
cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
pass
else:
logging.error("Unsupport operation %s in this case, so skip it",
domain_operation)
def find_attach_disk(expect=True):
"""
Find attached disk inside the VM
"""
found_disk = False
if vm.is_dead():
raise error.TestError("Domain %s is not running" % vm_name)
else:
try:
session = vm.wait_for_login()
cmd = "grep %s /proc/partitions" % disk_target
s, o = session.cmd_status_output(cmd)
logging.info("%s output: %s", cmd, o)
session.close()
if s == 0:
found_disk = True
except (LoginError, VMError, ShellError), e:
logging.error(str(e))
if found_disk == expect:
logging.debug("Check disk inside the VM PASS as expected")
else:
raise error.TestError("Check disk inside the VM FAIL")
# Check disk inside the VM, expect is False if status_error=True
find_attach_disk(not status_error)
# Detach disk
cmd_result = virsh.detach_disk(vm_name, disk_target)
libvirt.check_exit_status(cmd_result, status_error)
# Check disk inside the VM
find_attach_disk(False)
finally:
if vm.is_alive():
vm.destroy()
vmxml_backup.sync("--snapshots-metadata")
# Destroy pool and undefine secret, which may not exist
try:
if disk_type == "volume":
virsh.pool_destroy(disk_src_pool)
if chap_auth:
virsh.secret_undefine(secret_uuid)
except:
pass
libvirt.setup_or_cleanup_iscsi(is_setup=False)
| gpl-2.0 |
ofekd/servo | tests/wpt/web-platform-tests/2dcontext/tools/specextract.py | 75 | 2105 | import html5lib
import html5lib.treebuilders.dom
# Expected use:
# curl --compressed http://www.whatwg.org/specs/web-apps/current-work/ >current-work
# python specextract.py
#
# Generates current-work-canvas.xhtml, for use by gentest.py to create the annotated spec document
def extract():
parser = html5lib.html5parser.HTMLParser(tree=html5lib.treebuilders.dom.TreeBuilder)
doc = parser.parse(open('current-work', "r"), encoding='utf-8')
head = doc.getElementsByTagName('head')[0]
for n in head.childNodes:
if n.tagName == 'script':
head.removeChild(n)
header = doc.getElementsByTagName('header')[0]
#thecanvas = doc.getElementById('the-canvas') # doesn't work (?!)
thecanvas = [ n for n in doc.getElementsByTagName('h4') if n.getAttribute('id') == 'the-canvas-element' ][0]
keep = [header, thecanvas]
node = thecanvas.nextSibling
while node.nodeName != 'h4':
keep.append(node)
node = node.nextSibling
p = thecanvas.parentNode
for n in p.childNodes[:]:
if n not in keep:
p.removeChild(n)
for n in header.childNodes[3:-4]:
header.removeChild(n)
def make_absolute(uri):
if uri.startswith('data:'):
return uri
elif uri[0] == '/':
return 'http://www.whatwg.org' + uri
else:
return 'http://www.whatwg.org/specs/web-apps/current-work/' + uri
# Fix the stylesheet, icon and image references
for e in doc.getElementsByTagName('link'):
e.setAttribute('href', make_absolute(e.getAttribute('href')))
for img in doc.getElementsByTagName('img'):
img.setAttribute('src', make_absolute(img.getAttribute('src')))
# Convert to XHTML, because it's quicker to re-parse than HTML5
doc.documentElement.setAttribute('xmlns', 'http://www.w3.org/1999/xhtml')
doc.documentElement.setAttribute('xml:lang', doc.documentElement.getAttribute('lang'))
doc.removeChild(doc.firstChild) # remove the DOCTYPE
open('current-work-canvas.xhtml', 'w').write(doc.toxml(encoding = 'UTF-8'))
extract()
| mpl-2.0 |
eeshangarg/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_selector.py | 16 | 9978 | """
Selectors tests, common for all backends
"""
import re
import weakref
from twisted.trial import unittest
from scrapy.http import TextResponse, HtmlResponse, XmlResponse
from scrapy.selector import XmlXPathSelector, HtmlXPathSelector, \
XPathSelector
from scrapy.utils.test import libxml2debug
class XPathSelectorTestCase(unittest.TestCase):
xs_cls = XPathSelector
hxs_cls = HtmlXPathSelector
xxs_cls = XmlXPathSelector
@libxml2debug
def test_selector_simple(self):
"""Simple selector tests"""
body = "<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body)
xpath = self.hxs_cls(response)
xl = xpath.select('//input')
self.assertEqual(2, len(xl))
for x in xl:
assert isinstance(x, self.hxs_cls)
self.assertEqual(xpath.select('//input').extract(),
[x.extract() for x in xpath.select('//input')])
self.assertEqual([x.extract() for x in xpath.select("//input[@name='a']/@name")],
[u'a'])
self.assertEqual([x.extract() for x in xpath.select("number(concat(//input[@name='a']/@value, //input[@name='b']/@value))")],
[u'12.0'])
self.assertEqual(xpath.select("concat('xpath', 'rules')").extract(),
[u'xpathrules'])
self.assertEqual([x.extract() for x in xpath.select("concat(//input[@name='a']/@value, //input[@name='b']/@value)")],
[u'12'])
def test_selector_unicode_query(self):
body = u"<p><input name='\xa9' value='1'/></p>"
response = TextResponse(url="http://example.com", body=body, encoding='utf8')
xpath = self.hxs_cls(response)
self.assertEqual(xpath.select(u'//input[@name="\xa9"]/@value').extract(), [u'1'])
@libxml2debug
def test_selector_same_type(self):
"""Test XPathSelector returning the same type in x() method"""
text = '<p>test<p>'
assert isinstance(self.xxs_cls(text=text).select("//p")[0],
self.xxs_cls)
assert isinstance(self.hxs_cls(text=text).select("//p")[0],
self.hxs_cls)
@libxml2debug
def test_selector_boolean_result(self):
body = "<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body)
xs = self.hxs_cls(response)
true = xs.select("//input[@name='a']/@name='a'").extract()[0]
false = xs.select("//input[@name='a']/@name='n'").extract()[0]
# the actual result depends on the backend used
assert true in [u'1', u'True'], true
assert false in [u'0', u'False'], false
@libxml2debug
def test_selector_xml_html(self):
"""Test that XML and HTML XPathSelector's behave differently"""
# some text which is parsed differently by XML and HTML flavors
text = '<div><img src="a.jpg"><p>Hello</div>'
self.assertEqual(self.xxs_cls(text=text).select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
self.assertEqual(self.hxs_cls(text=text).select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
@libxml2debug
def test_selector_nested(self):
"""Nested selector tests"""
body = """<body>
<div class='one'>
<ul>
<li>one</li><li>two</li>
</ul>
</div>
<div class='two'>
<ul>
<li>four</li><li>five</li><li>six</li>
</ul>
</div>
</body>"""
response = HtmlResponse(url="http://example.com", body=body)
x = self.hxs_cls(response)
divtwo = x.select('//div[@class="two"]')
self.assertEqual(map(unicode.strip, divtwo.select("//li").extract()),
["<li>one</li>", "<li>two</li>", "<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(map(unicode.strip, divtwo.select("./ul/li").extract()),
["<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(map(unicode.strip, divtwo.select(".//li").extract()),
["<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(divtwo.select("./li").extract(),
[])
@libxml2debug
def test_dont_strip(self):
hxs = self.hxs_cls(text='<div>fff: <a href="#">zzz</a></div>')
self.assertEqual(hxs.select("//text()").extract(),
[u'fff: ', u'zzz'])
@libxml2debug
def test_selector_namespaces_simple(self):
body = """
<test xmlns:somens="http://scrapy.org">
<somens:a id="foo">take this</a>
<a id="bar">found</a>
</test>
"""
response = XmlResponse(url="http://example.com", body=body)
x = self.xxs_cls(response)
x.register_namespace("somens", "http://scrapy.org")
self.assertEqual(x.select("//somens:a/text()").extract(),
[u'take this'])
@libxml2debug
def test_selector_namespaces_multiple(self):
body = """<?xml version="1.0" encoding="UTF-8"?>
<BrowseNode xmlns="http://webservices.amazon.com/AWSECommerceService/2005-10-05"
xmlns:b="http://somens.com"
xmlns:p="http://www.scrapy.org/product" >
<b:Operation>hello</b:Operation>
<TestTag b:att="value"><Other>value</Other></TestTag>
<p:SecondTestTag><material>iron</material><price>90</price><p:name>Dried Rose</p:name></p:SecondTestTag>
</BrowseNode>
"""
response = XmlResponse(url="http://example.com", body=body)
x = self.xxs_cls(response)
x.register_namespace("xmlns", "http://webservices.amazon.com/AWSECommerceService/2005-10-05")
x.register_namespace("p", "http://www.scrapy.org/product")
x.register_namespace("b", "http://somens.com")
self.assertEqual(len(x.select("//xmlns:TestTag")), 1)
self.assertEqual(x.select("//b:Operation/text()").extract()[0], 'hello')
self.assertEqual(x.select("//xmlns:TestTag/@b:att").extract()[0], 'value')
self.assertEqual(x.select("//p:SecondTestTag/xmlns:price/text()").extract()[0], '90')
self.assertEqual(x.select("//p:SecondTestTag").select("./xmlns:price/text()")[0].extract(), '90')
self.assertEqual(x.select("//p:SecondTestTag/xmlns:material/text()").extract()[0], 'iron')
@libxml2debug
def test_selector_re(self):
body = """<div>Name: Mary
<ul>
<li>Name: John</li>
<li>Age: 10</li>
<li>Name: Paul</li>
<li>Age: 20</li>
</ul>
Age: 20
</div>
"""
response = HtmlResponse(url="http://example.com", body=body)
x = self.hxs_cls(response)
name_re = re.compile("Name: (\w+)")
self.assertEqual(x.select("//ul/li").re(name_re),
["John", "Paul"])
self.assertEqual(x.select("//ul/li").re("Age: (\d+)"),
["10", "20"])
@libxml2debug
def test_selector_over_text(self):
hxs = self.hxs_cls(text='<root>lala</root>')
self.assertEqual(hxs.extract(),
u'<html><body><root>lala</root></body></html>')
xxs = self.xxs_cls(text='<root>lala</root>')
self.assertEqual(xxs.extract(),
u'<root>lala</root>')
xxs = self.xxs_cls(text='<root>lala</root>')
self.assertEqual(xxs.select('.').extract(),
[u'<root>lala</root>'])
@libxml2debug
def test_selector_invalid_xpath(self):
response = XmlResponse(url="http://example.com", body="<html></html>")
x = self.hxs_cls(response)
xpath = "//test[@foo='bar]"
try:
x.select(xpath)
except ValueError, e:
assert xpath in str(e), "Exception message does not contain invalid xpath"
except Exception:
raise AssertionError("A invalid XPath does not raise ValueError")
else:
raise AssertionError("A invalid XPath does not raise an exception")
@libxml2debug
def test_http_header_encoding_precedence(self):
# u'\xa3' = pound symbol in unicode
# u'\xc2\xa3' = pound symbol in utf-8
# u'\xa3' = pound symbol in latin-1 (iso-8859-1)
meta = u'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">'
head = u'<head>' + meta + u'</head>'
body_content = u'<span id="blank">\xa3</span>'
body = u'<body>' + body_content + u'</body>'
html = u'<html>' + head + body + u'</html>'
encoding = 'utf-8'
html_utf8 = html.encode(encoding)
headers = {'Content-Type': ['text/html; charset=utf-8']}
response = HtmlResponse(url="http://example.com", headers=headers, body=html_utf8)
x = self.hxs_cls(response)
self.assertEquals(x.select("//span[@id='blank']/text()").extract(),
[u'\xa3'])
@libxml2debug
def test_empty_bodies(self):
r1 = TextResponse('http://www.example.com', body='')
self.hxs_cls(r1) # shouldn't raise error
self.xxs_cls(r1) # shouldn't raise error
@libxml2debug
def test_weakref_slots(self):
"""Check that classes are using slots and are weak-referenceable"""
for cls in [self.xs_cls, self.hxs_cls, self.xxs_cls]:
x = cls()
weakref.ref(x)
assert not hasattr(x, '__dict__'), "%s does not use __slots__" % \
x.__class__.__name__
| agpl-3.0 |
funkyfuture/docker-py | tests/integration/regression_test.py | 4 | 2232 | import io
import random
import docker
import six
from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
assert exc.value.is_error()
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
self.client.create_container(TEST_IMG, ['true'])
)
result = self.client.containers(all=True, trunc=True)
assert len(result[0]['Id']) == 12
def test_647_support_doubleslash_in_image_names(self):
with pytest.raises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
ctnr = self.client.create_container(TEST_IMG, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
assert self.client.port(
ctnr, 2000
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/tcp'
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/udp'
)[0]['HostPort'] == six.text_type(udp_port)
| apache-2.0 |
janeen666/mi-instrument | mi/idk/package_driver.py | 11 | 14932 | """
@file coi-services/mi.idk/package_driver.py
@author Bill French
@brief Main script class for running the package_driver process
"""
import sys
import os.path
import zipfile
import subprocess
import shutil
import re
import yaml
from mi.core.log import get_logger ; log = get_logger()
from mi.idk import prompt
from mi.idk.metadata import Metadata
from mi.idk.nose_test import NoseTest
from mi.idk.driver_generator import DriverGenerator
from mi.idk.egg_generator import EggGenerator
from mi.idk.exceptions import ValidationFailure
from mi.idk.exceptions import InvalidParameters
from mi.idk.exceptions import GitCommandException
REPODIR = '/tmp/repoclone'
class PackageManifest(object):
"""
Object to create and store a package file manifest
"""
###
# Configuration
###
def manifest_file(self):
return "file.lst"
def manifest_path(self):
return "%s/%s" % (self.metadata.idk_dir(), self.manifest_file())
###
# Public Methods
###
def __init__(self, metadata):
"""
@brief ctor
"""
self.metadata = metadata
self.data = {}
def add_file(self, source, description=None):
"""
@brief Add a file to the file manifest
@param source path the the file in the archive
@description one line description of the file
"""
if(not description): description = ''
log.debug( " ++ Adding " + source + " to manifest")
self.data[source] = description
self.save()
def serialize(self):
"""
@brief Serialize PackageManifest object data into a yaml string.
@retval yaml string
"""
return yaml.dump( self.data, default_flow_style=False )
def save(self):
"""
@brief Write YAML file with package manifest.
"""
outputFile = self.manifest_path()
if not os.path.exists(self.metadata.idk_dir()):
os.makedirs(self.metadata.idk_dir())
ofile = open( outputFile, 'w' )
ofile.write( self.serialize() )
ofile.close()
class PackageDriver(object):
"""
Main class for running the package driver process.
"""
###
# Configuration
###
def string_file(self):
return "strings.yml"
def log_file(self):
return "qualification.log"
def log_path(self):
return "%s/%s" % (self.metadata.idk_dir(), self.log_file())
def build_name(self):
return "%s_%s_%s" % (self.metadata.driver_make,
self.metadata.driver_model,
self.metadata.driver_name)
def archive_file(self):
return "%s-%s-driver.zip" % (self.build_name(),
self.metadata.version)
def archive_path(self):
return os.path.join(os.path.expanduser("~"),self.archive_file())
def get_metadata(self):
# get which dataset agent is selected from the current metadata, use
# this to get metadata from the cloned repo
tmp_metadata = Metadata()
# read metadata from the cloned repo
self.metadata = Metadata(tmp_metadata.driver_make,
tmp_metadata.driver_model,
tmp_metadata.driver_name,
REPODIR + '/marine-integrations')
return self.metadata
def get_nose_test(self):
return NoseTest(self.metadata, log_file=self.log_path())
def get_driver_generator(self):
return DriverGenerator(self.metadata)
def get_egg_generator(self):
return EggGenerator(self.metadata)
###
# Public Methods
###
def __init__(self):
"""
@brief ctor
"""
self._zipfile = None
self._manifest = None
self._compression = None
# Set compression level
self.zipfile_compression()
def run_qualification_tests(self):
"""
@brief Run all qualification tests for the driver and store the results for packaging
"""
log.info("-- Running qualification tests")
test = self.get_nose_test(self.metadata, log_file=self.log_path())
test.report_header()
if(test.run_qualification()):
log.info(" ++ Qualification tests passed")
return True
else:
log.error("Qualification tests have fail! No package created.")
return False
def clone_repo(self):
"""
clone the ooici repository into a temp location and navigate to it
"""
# make a temp dir to put the clone in
if not os.path.exists(REPODIR):
os.mkdir(REPODIR)
os.chdir(REPODIR)
# remove an old clone if one exists, start clean
if os.path.exists(REPODIR + '/marine-integrations'):
shutil.rmtree(REPODIR + '/marine-integrations')
# clone the ooici repository into a temporary location
log.debug('Attempting to clone repository into %s, REPODIR set to %s',
os.getcwd(), REPODIR)
ret = os.system('git clone git@github.com:ooici/marine-integrations.git')
if ret < 0:
raise GitCommandException("Bad return from git command")
# if the directory doesn't exist, something went wrong with cloning
if not os.path.exists(REPODIR + '/marine-integrations'):
raise GitCommandException('Error creating ooici repository clone with base: %s' % REPODIR)
# navigate into the cloned repository
os.chdir(REPODIR + '/marine-integrations')
log.debug('in cloned repository')
def get_repackage_version(self, tag_base):
"""
Get the driver version the user wants to repackage
"""
# suggest the current driver version as default
repkg_version = prompt.text( 'Driver Version to re-package', self.metadata.version )
# confirm this version has the correct format
self._verify_version(repkg_version)
# check to make sure this driver version exists
tag_name = 'release_' + tag_base + '_' + repkg_version.replace('.', '_')
cmd = 'git tag -l ' + tag_name
# find out if this tag name exists
output = subprocess.check_output(cmd, shell=True)
if len(output) > 0:
# this tag exists, check it out
os.system('git checkout tags/' + tag_name)
else:
log.error('No driver version %s found', tag_name)
raise InvalidParameters('No driver version %s found', tag_name)
def make_branch(self, base_name):
"""
Make a new branch for this release and tag it with the same name so we
can get back to it
@param base_name - the base name for this instrument used to make the
branch and tag names. The base should have the form:
'<driver_name>_<driver_version>', where the version has the format X_X_X.
This is equal to the branch name, and the tag will have 'release_'
prepended to the base name.
"""
# create a new branch name and check it out
cmd = 'git checkout -b ' + base_name
output = subprocess.check_output(cmd, shell=True)
log.debug('created new branch %s: %s', base_name, output)
# tag the initial branch so that we can get back to it later
cmd = 'git tag ' + 'release_' + base_name
output = subprocess.check_output(cmd, shell=True)
log.debug('created new tag %s: %s', 'release_' + base_name, output)
def update_version(self):
"""
Update the driver version for this package. By default increment by one.
After updating the metadata file, commit the change to git.
"""
last_dot = self.metadata.version.rfind('.')
last_version = int(self.metadata.version[last_dot+1:])
suggest_version = self.metadata.version[:last_dot+1] + str(last_version + 1)
new_version = prompt.text('Update Driver Version', suggest_version )
# confirm this version has the correct format
self._verify_version(new_version)
if new_version != self.metadata.version:
# search for the tag for this version, find out if it already exists
cmd = 'git tag -l ' + 'release_' + self.build_name() + '_' + new_version.replace('.', '_')
# find out if this tag name exists
output = subprocess.check_output(cmd, shell=True)
if len(output) > 0:
# this tag already exists and we are not repackaging
raise InvalidParameters("Version %s already exists. To repackage, run package driver with the --repackage option", new_version)
# set the new driver version in the metadata
self.metadata.set_driver_version(new_version)
# commit the changed file to git
cmd = 'git commit ' + str(self.metadata.metadata_path()) + ' -m \'Updated metadata driver version\''
os.system(cmd)
return new_version
def package_driver(self):
"""
@brief Store driver files in a zip package
"""
log.info("-- Building driver package")
self._store_package_files()
def run(self):
print "*** Starting Driver Packaging Process***"
# store the original directory since we will be navigating away from it
original_dir = os.getcwd()
# first create a temporary clone of ooici to work with
self.clone_repo()
self.metadata = self.get_metadata()
if "--repackage" in sys.argv:
self.get_repackage_version(self.build_name())
else:
new_version = self.update_version()
base_name = self.build_name() + '_' + new_version.replace('.', '_')
self.make_branch(base_name)
if "--no-test" in sys.argv:
f = open(self.log_path(), "w")
f.write("Tests manually bypassed with --no-test option\n")
f.close()
self.package_driver()
else:
if(self.run_qualification_tests()):
self.package_driver()
if not "--no-push" in sys.argv and not "--repackage" in sys.argv:
cmd = 'git push'
output = subprocess.check_output(cmd, shell=True)
if len(output) > 0:
log.debug('git push returned: %s', output)
# go back to the original directory
os.chdir(original_dir)
print "Package Created: " + self.archive_path()
def zipfile(self):
"""
@brief Return the ZipFile object. Create the file if it isn't already open
@retval ZipFile object
"""
if(not self._zipfile):
self._zipfile = zipfile.ZipFile(self.archive_path(), mode="w")
return self._zipfile
def zipfile_compression(self):
"""
@brief What type of compression should we use for the package file. If we have access to zlib, we will compress
@retval Compression type
"""
if(self._compression): return self._compression
try:
import zlib
self._compression = zipfile.ZIP_DEFLATED
log.info("Setting compression level to deflated")
except:
log.info("Setting compression level to store only")
self._compression = zipfile.ZIP_STORED
def manifest(self):
"""
@brief Return the PackageManifest object. Create it if it doesn't already exist
@retval PackageManifest object
"""
if(not self._manifest):
self._manifest = PackageManifest(self.metadata)
return self._manifest
###
# Private Methods
###
def _store_package_files(self):
"""
@brief Store all files in zip archive and add them to the manifest file
"""
# make sure metadata is up to date
self.metadata = self.get_metadata()
self.generator = self.get_driver_generator()
egg_generator = self.get_egg_generator()
egg_file = egg_generator.save()
# Add egg
self._add_file(egg_file, 'egg', 'python driver egg package')
# Add the package metadata file
self._add_file(self.metadata.metadata_path(), description = 'package metadata')
# Add the qualification test log
self._add_file(self.log_path(), description = 'qualification tests results')
# Store parameter/command string description file
str_path = "%s/%s" % (self.generator.resource_dir(), self.string_file())
if os.path.exists(str_path):
self._add_file(str_path, 'resource', 'driver string file')
# Store additional resource files
self._store_resource_files()
# Finally save the manifest file. This must be last of course
self._add_file(self.manifest().manifest_path(), description = 'package manifest file')
def _store_resource_files(self):
"""
@brief Store additional files added by the driver developer. These
files live in the driver resource dir.
"""
resource_dir = os.path.join(self.metadata.relative_driver_path(), "resource")
log.debug(" -- Searching for developer added resource files in dir: %s",
resource_dir)
stringfile = self.string_file()
if os.path.exists(resource_dir):
for file in os.listdir(resource_dir):
if file != stringfile:
log.debug(" ++ found: " + file)
desc = prompt.text('Describe ' + file)
self._add_file(resource_dir + "/" + file, 'resource', desc)
else:
log.debug(" --- No resource directory found, skipping...")
def _add_file(self, source, destdir=None, description=None):
"""
@brief Add a file to the zip package and store the file in the manifest.
"""
filename = os.path.basename(source)
dest = filename
if(destdir):
dest = "%s/%s" % (destdir, filename)
log.debug("archive %s to %s" % (filename, dest))
self.manifest().add_file(dest, description);
self.zipfile().write(source, dest, self.zipfile_compression())
def _verify_version(self, version = None):
"""
Ensure we have a good version number and that it has not already been packaged and published
"""
if version == None:
version = self.metadata.version
if not version:
raise ValidationFailure("Driver version required in metadata")
p = re.compile("^\d+\.\d+\.\d+$")
if not p.findall("%s" % version):
raise ValidationFailure("Version format incorrect '%s', should be x.x.x" % version)
if __name__ == '__main__':
app = PackageDriver()
app.run()
| bsd-2-clause |
JohnVCS/ModifiedDosocsWithMavenDependency | dosocs2/mavenDepUtil.py | 1 | 3459 | #!/usr/bin/python
# Copyright (C) 2016 Jesse Moseman, and John Carlo B. Viernes IV
#
# This file is part of fossologyFunTime.
#
# fossologyFunTime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# fossologyFunTime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fossologyFunTime. If not, see <http://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-2.0+
import subprocess
import os
devnull=open('/dev/null','w')
# creates the temporary directory to store the jar files
def createTempDirectoryIfDoesntExist():
import shutil
newpath = r'/tmp/mydep'
if os.path.exists(newpath):
shutil.rmtree(newpath)
os.makedirs(newpath)
# copies depedencies to folder
def copyDependencyToTempFolder(pom_path):
copyDepCmd=["mvn","-f",pom_path,"dependency:copy-dependencies","-DoutputDirectory=/tmp/mydep","-Dclassifier=sources"]
print copyDepCmd
copyDepMvnPluginProcess=subprocess.call(copyDepCmd, stdout=devnull)
# creates the graphML
# graphML is one of the supported output type format of Maven
def createGraphMl(pom_path):
createGraphMlCommand = ["mvn","-f",pom_path,"dependency:tree","-DoutputFile=/tmp/test.graphml", "-DoutputType=graphml"]
#.call(...) is for blocking
createGraphMlMvnPluginProcess = subprocess.call(createGraphMlCommand, stdout=devnull)
# parses graphml file and returns dependency tuples
# basically it shows the parent-child relationship in a sort of like a pair
def parseGraphMl():
import networkx
import os.path
import time
graph=networkx.read_graphml("/tmp/test.graphml")
# optional argument data="NodeLabel" since that is what we need
# nodes grabs all the nodes and nodelabel, and then returns a set of tuples
# dict changes the comma to colon so we can grab the id's and labels
# essentially, the purpose is to map the node-id to package name
nodesDict=dict(graph.nodes(data="NodeLabel"))
# print(dict(graph.nodes(data="NodeLabel")))
edgeLabels=[]
for e1,e2 in graph.edges():
edgeLabels.append((nodesDict[e1]['label'],nodesDict[e2]['label']))
return edgeLabels
def createDocumentForArtifact(artifact):
dosocsOneshotCommand = ["dosocs2", "oneshot",artifact]
#.call(...) is for blocking
dosocsOneshotProcess = subprocess.call(dosocsOneshotCommand)#, stdout=devnull)
def createDocumentsForDepedencies():
for filename in os.listdir('/tmp/mydep'):
dosocsOneshotCommand = ["dosocs2", "oneshot","mydep/"+filename]
#.call(...) is for blocking
dosocsOneshotProcess = subprocess.call(dosocsOneshotCommand)#, stdout=devnull)
# getlastLine=subprocess.Popen(["sed" ,"1,4d"], stdin=nomosProcess.stdout,stdout=subprocess.PIPE)
# end_of_pipe=deleteFirst4LinesProcess.stdout
# return end_of_pipe
def getDepAndGenDocsForDeps(pom_path):
createTempDirectoryIfDoesntExist()
copyDependencyToTempFolder(pom_path)
# createDocumentsForDepedencies()
# main method
if __name__ == '__main__':
# get dependencies and generate documents
# getDepAndGenDocsForDeps()
#createGraphMl()
edgeLabels=parseGraphMl()
for e in edgeLabels:
print e
| gpl-2.0 |
sodafree/backend | django/views/decorators/cache.py | 83 | 3975 | from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We need backwards compatibility with code which spells it this way:
# def my_view(): pass
# my_view = cache_page(my_view, 123)
# and this way:
# my_view = cache_page(123)(my_view)
# and this:
# my_view = cache_page(my_view, 123, key_prefix="foo")
# and this:
# my_view = cache_page(123, key_prefix="foo")(my_view)
# and possibly this way (?):
# my_view = cache_page(123, my_view)
# and also this way:
# my_view = cache_page(my_view)
# and also this way:
# my_view = cache_page()(my_view)
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
assert not kwargs, "The only keyword arguments are cache and key_prefix"
def warn():
import warnings
warnings.warn('The cache_page decorator must be called like: '
'cache_page(timeout, [cache=cache name], [key_prefix=key prefix]). '
'All other ways are deprecated.',
PendingDeprecationWarning,
stacklevel=3)
if len(args) > 1:
assert len(args) == 2, "cache_page accepts at most 2 arguments"
warn()
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[1], cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
elif callable(args[1]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)(args[1])
else:
assert False, "cache_page must be passed a view function if called with two arguments"
elif len(args) == 1:
if callable(args[0]):
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
else:
# The One True Way
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)
else:
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| bsd-3-clause |
yanikou19/pymatgen | pymatgen/entries/tests/test_compatibility.py | 1 | 22208 | # coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Mar 19, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 19, 2012"
import os
import unittest
from pymatgen.entries.compatibility import MaterialsProjectCompatibility, \
MITCompatibility, AqueousCorrection, MITAqueousCompatibility, MaterialsProjectAqueousCompatibility
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from pymatgen import Composition, Lattice, Structure, Element
class MaterialsProjectCompatibilityTest(unittest.TestCase):
def setUp(self):
self.entry1 = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.entry2 = ComputedEntry(
'Fe3O4', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.entry3 = ComputedEntry(
'FeO', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 4.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
def test_process_entry(self):
compat = MaterialsProjectCompatibility()
ggacompat = MaterialsProjectCompatibility("GGA")
#Correct parameters
self.assertIsNotNone(compat.process_entry(self.entry1))
self.assertIsNone(ggacompat.process_entry(self.entry1))
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False, "hubbards": {}, 'run_type': 'GGA',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
self.assertIsNotNone(ggacompat.process_entry(entry))
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
#Check actual correction
self.assertAlmostEqual(compat.process_entry(entry).correction,
- 2.733 * 2 - 0.70229 * 3)
entry = ComputedEntry(
'FeF3', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'F': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE F 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
#Check actual correction
self.assertAlmostEqual(compat.process_entry(entry).correction, -2.733)
#Wrong U value
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 5.2, 'O': 0}, 'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#GGA run of U
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': None,
'run_type': 'GGA',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#GGA+U run of non-U
entry = ComputedEntry(
'Al2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Al': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Al 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Materials project should not have a U for sulfides
entry = ComputedEntry(
'FeS2', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'S': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE S 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Wrong psp
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Testing processing of elements.
entry = ComputedEntry(
'O', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': {},
'potcar_symbols': ['PAW_PBE O 08Apr2002'],
'run_type': 'GGA'})
entry = compat.process_entry(entry)
# self.assertEqual(entry.entry_id, -8)
self.assertAlmostEqual(entry.energy, -1)
self.assertAlmostEqual(ggacompat.process_entry(entry).energy,
-1)
def test_get_corrections_dict(self):
compat = MaterialsProjectCompatibility()
ggacompat = MaterialsProjectCompatibility("GGA")
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
c = compat.get_corrections_dict(entry)
self.assertAlmostEqual(c["MP Gas Correction"], -2.10687)
self.assertAlmostEqual(c["MP Advanced Correction"], -5.466)
entry.parameters["is_hubbard"] = False
del entry.parameters["hubbards"]
c = ggacompat.get_corrections_dict(entry)
self.assertNotIn("MP Advanced Correction", c)
def test_process_entries(self):
compat = MaterialsProjectCompatibility()
entries = compat.process_entries([self.entry1, self.entry2,
self.entry3])
self.assertEqual(len(entries), 2)
class MITCompatibilityTest(unittest.TestCase):
def test_process_entry(self):
compat = MITCompatibility()
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
self.assertAlmostEqual(compat.process_entry(entry).correction,
- 1.723 * 2 -0.66975*3)
entry = ComputedEntry(
'FeF3', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'F': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE F 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
#Check actual correction
self.assertAlmostEqual(compat.process_entry(entry).correction, -1.723)
#MIT should not have a U for sulfides
entry = ComputedEntry(
'FeS2', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 1.9, 'S': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE S 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
self.assertAlmostEqual(compat.process_entry(entry).correction, -1.113)
#Wrong U value
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 5.2, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#GGA run
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Wrong psp
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Testing processing of elements.
entry = ComputedEntry(
'O', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': {},
'potcar_symbols': ['PAW_PBE O 08Apr2002'],
'run_type': 'GGA'})
entry = compat.process_entry(entry)
self.assertAlmostEqual(entry.energy, -1)
class OxideTypeCorrectionTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility()
def test_no_struct_compat(self):
lio2_entry_nostruct = ComputedEntry(Composition("Li2O4"), -3,
data={"oxide_type": "superoxide"},
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio2_entry_corrected = self.compat.process_entry(lio2_entry_nostruct)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 - 0.13893*4, 4)
def test_process_entry_superoxide(self):
el_li = Element("Li")
el_o = Element("O")
latt = Lattice([[3.985034, 0.0, 0.0],
[0.0, 4.881506, 0.0],
[0.0, 0.0, 2.959824]])
elts = [el_li, el_li, el_o, el_o, el_o, el_o]
coords = list()
coords.append([0.500000, 0.500000, 0.500000])
coords.append([0.0, 0.0, 0.0])
coords.append([0.632568, 0.085090, 0.500000])
coords.append([0.367432, 0.914910, 0.500000])
coords.append([0.132568, 0.414910, 0.000000])
coords.append([0.867432, 0.585090, 0.000000])
struct = Structure(latt, elts, coords)
lio2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio2_entry_corrected = self.compat.process_entry(lio2_entry)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 -0.13893*4, 4)
def test_process_entry_peroxide(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4)
def test_process_entry_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911,
133.847504, 102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0)
def test_process_entry_oxide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278,
60, 60, 60)
coords = [[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75],
[0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 -0.66975, 4)
class OxideTypeCorrectionNoPeroxideCorrTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(correct_peroxide=False)
def test_oxide_energy_corr(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278,
60, 60, 60)
coords = [[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75],
[0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 -0.66975, 4)
def test_peroxide_energy_corr(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertRaises(AssertionError, self.assertAlmostEqual,
*(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4))
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.66975 * 4, 4)
def test_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911,
133.847504, 102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0 - 3 * 0.66975)
class AqueousCorrectionTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.corr = AqueousCorrection(fp)
def test_compound_energy(self):
O2_entry = self.corr.correct_entry(ComputedEntry(Composition("O2"),
-4.9355 * 2))
H2_entry = self.corr.correct_entry(ComputedEntry(Composition("H2"), 3))
H2O_entry = self.corr.correct_entry(ComputedEntry(Composition("H2O"), 3))
H2O_formation_energy = H2O_entry.energy - (H2_entry.energy +
O2_entry.energy / 2.0)
self.assertAlmostEqual(H2O_formation_energy, -2.46, 2)
entry = ComputedEntry(Composition("H2O"), -16)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("H2O"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("Cl"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -24.344373, 4)
class TestMITAqueousCompatibility(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility()
self.aqcompat = MITAqueousCompatibility()
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.aqcorr = AqueousCorrection(fp)
def test_aqueous_compat(self):
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
lioh_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 17Jan2003', 'PAW_PBE O 08Apr2002', 'PAW_PBE H 15Jun2001']})
lioh_entry_compat = self.compat.process_entry(lioh_entry)
lioh_entry_compat_aqcorr = self.aqcorr.correct_entry(lioh_entry_compat)
lioh_entry_aqcompat = self.aqcompat.process_entry(lioh_entry)
self.assertAlmostEqual(lioh_entry_compat_aqcorr.energy, lioh_entry_aqcompat.energy, 4)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit |
darshanthaker/nupic | src/nupic/research/fast_temporal_memory.py | 23 | 6473 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal Memory implementation in Python.
"""
from nupic.research.temporal_memory import TemporalMemory
from nupic.bindings.algorithms import Connections, ConnectionsCell
class FastTemporalMemory(TemporalMemory):
"""
Class implementing the Temporal Memory algorithm.
Uses C++ Connections data structure for optimization.
"""
def __init__(self, *args, **kwargs):
super(FastTemporalMemory, self).__init__(*args, **kwargs)
self.connections = Connections(self.numberOfCells())
def burstColumns(self,
activeColumns,
predictedColumns,
prevActiveCells,
prevWinnerCells,
connections):
"""
Phase 2: Burst unpredicted columns.
Pseudocode:
- for each unpredicted active column
- mark all cells as active
- mark the best matching cell as winner cell
- (learning)
- if it has no matching segment
- (optimization) if there are prev winner cells
- add a segment to it
- mark the segment as learning
@param activeColumns (set) Indices of active columns in `t`
@param predictedColumns (set) Indices of predicted columns in `t`
@param prevActiveCells (set) Indices of active cells in `t-1`
@param prevWinnerCells (set) Indices of winner cells in `t-1`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeCells` (set),
`winnerCells` (set),
`learningSegments` (set)
"""
activeCells = set()
winnerCells = set()
learningSegments = set()
unpredictedColumns = activeColumns - predictedColumns
for column in unpredictedColumns:
cells = self.cellsForColumn(column)
activeCells.update(cells)
bestSegment = connections.mostActiveSegmentForCells(
list(cells), list(prevActiveCells), self.minThreshold)
if bestSegment is None:
bestCell = self.leastUsedCell(cells, connections)
if len(prevWinnerCells):
bestSegment = connections.createSegment(bestCell)
else:
# TODO: For some reason, bestSegment.cell is garbage-collected after
# this function returns. So we have to use the below hack. Figure out
# why and clean up.
bestCell = ConnectionsCell(bestSegment.cell.idx)
winnerCells.add(bestCell)
if bestSegment:
learningSegments.add(bestSegment)
return activeCells, winnerCells, learningSegments
def computePredictiveCells(self, activeCells, connections):
"""
Phase 4: Compute predictive cells due to lateral input
on distal dendrites.
Pseudocode:
- for each distal dendrite segment with activity >= activationThreshold
- mark the segment as active
- mark the cell as predictive
- for each distal dendrite segment with unconnected
activity >= minThreshold
- mark the segment as matching
- mark the cell as matching
Forward propagates activity from active cells to the synapses that touch
them, to determine which synapses are active.
@param activeCells (set) Indices of active cells in `t`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeSegments` (set),
`predictiveCells` (set),
`matchingSegments` (set),
`matchingCells` (set)
"""
activity = connections.computeActivity(list(activeCells),
self.connectedPermanence,
self.activationThreshold)
activeSegments = set(connections.activeSegments(activity))
predictiveCells = set(connections.activeCells(activity))
if self.predictedSegmentDecrement > 0:
activity = connections.computeActivity(list(activeCells),
0,
self.minThreshold)
matchingSegments = set(connections.activeSegments(activity))
matchingCells = set(connections.activeCells(activity))
else:
matchingSegments = set()
matchingCells = set()
return activeSegments, predictiveCells, matchingSegments, matchingCells
@staticmethod
def getCellIndex(cell):
return cell.idx
# ==============================
# Helper functions
# ==============================
def columnForCell(self, cell):
"""
Returns the index of the column that a cell belongs to.
@param cell (int) Cell index
@return (int) Column index
"""
self._validateCell(cell)
return int(cell.idx / self.cellsPerColumn)
def cellsForColumn(self, column):
"""
Returns the indices of cells that belong to a column.
@param column (int) Column index
@return (set) Cell indices
"""
self._validateColumn(column)
start = self.cellsPerColumn * column
end = start + self.cellsPerColumn
return set([ConnectionsCell(idx) for idx in xrange(start, end)])
def _validateCell(self, cell):
"""
Raises an error if cell index is invalid.
@param cell (int) Cell index
"""
if cell.idx >= self.numberOfCells() or cell.idx < 0:
raise IndexError("Invalid cell")
| agpl-3.0 |
queirozfcom/titanic | src/models/gendermodel.py | 3 | 2605 | """ This simple code is desinged to teach a basic user to read in the files in python, simply find what proportion of males and females survived and make a predictive model based on this
Author : AstroDave
Date : 18th September, 2012
"""
import csv as csv
import numpy as np
csv_file_object = csv.reader(open('train.csv', 'rb')) #Load in the csv file
header = csv_file_object.next() #Skip the fist line as it is a header
data=[] #Creat a variable called 'data'
for row in csv_file_object: #Skip through each row in the csv file
data.append(row[1:]) #adding each row to the data variable
data = np.array(data) #Then convert from a list to an array
#Now I have an array of 11 columns and 891 rows
#I can access any element I want so the entire first column would
#be data[0::,0].astype(np.flaot) This means all of the columen and column 0
#I have to add the astype command
#as when reading in it thought it was a string so needed to convert
number_passengers = np.size(data[0::,0].astype(np.float))
number_survived = np.sum(data[0::,0].astype(np.float))
proportion_survivors = number_passengers / number_survived
# I can now find the stats of all the women on board
women_only_stats = data[0::,3] == "female" #This finds where all the women are
men_only_stats = data[0::,3] != "female" #This finds where all the men are
# != means not equal
#I can now find for example the ages of all the women by just placing
#women_only_stats in the '0::' part of the array index. You can test it by
#placing it in the 4 column and it should all read 'female'
women_onboard = data[women_only_stats,0].astype(np.float)
men_onboard = data[men_only_stats,0].astype(np.float)
proportion_women_survived = np.sum(women_onboard) / np.size(women_onboard)
proportion_men_survived = np.sum(men_onboard) / np.size(men_onboard)
print 'Proportion of women who survived is %s' % proportion_women_survived
print 'Proportion of men who survived is %s' % proportion_men_survived
#Now I have my indicator I can read in the test file and write out
#if a women then survived(1) if a man then did not survived (0)
#1st Read in test
test_file_object = csv.reader(open('test.csv', 'rb'))
header = test_file_object.next()
#Now also open the a new file so we can write to it call it something
#descriptive
predictions_file = csv.writer(open("gendermodel.csv", "wb"))
predictions_file.writerow(["PassengerId", "Survived"])
for row in test_file_object:
if row[3] == 'female':
predictions_file.writerow([row[0], "1"])
else:
predictions_file.writerow([row[0], "0"]) | mit |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_3/tests/regressiontests/i18n/commands/compilation.py | 52 | 1032 | import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.management import CommandError
from django.core.management.commands.compilemessages import compile_messages
from django.test import TestCase
LOCALE='es_AR'
class MessageCompilationTests(TestCase):
MO_FILE='locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(__file__))
def tearDown(self):
os.chdir(self._cwd)
class PoFileTests(MessageCompilationTests):
def test_bom_rejection(self):
os.chdir(self.test_dir)
# We don't use the django.core.management intrastructure (call_command()
# et al) because CommandError's cause exit(1) there. We test the
# underlying compile_messages function instead
out = StringIO()
self.assertRaises(CommandError, compile_messages, out, locale=LOCALE)
self.assertFalse(os.path.exists(self.MO_FILE))
| apache-2.0 |
rodrigoasmacedo/l10n-brazil | __unported__/l10n_br_purchase/__init__.py | 2 | 1422 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import res_company
import purchase
| agpl-3.0 |
carljm/django | django/middleware/clickjacking.py | 118 | 2051 | """
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
class XFrameOptionsMiddleware(MiddlewareMixin):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
https://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options') is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
settings. If not found in settings, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
| bsd-3-clause |
cainmatt/django | tests/gis_tests/geoapp/test_functions.py | 262 | 21654 | from __future__ import unicode_literals
import re
from decimal import Decimal
from django.contrib.gis.db.models import functions
from django.contrib.gis.geos import (
LineString, Point, Polygon, fromstr, geos_version_info,
)
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from ..utils import mysql, oracle, postgis, spatialite
from .models import City, Country, State, Track
@skipUnlessDBFeature("gis_enabled")
class GISFunctionsTests(TestCase):
"""
Testing functions from django/contrib/gis/db/models/functions.py.
Several tests are taken and adapted from GeoQuerySetTest.
Area/Distance/Length/Perimeter are tested in distapp/tests.
Please keep the tests in function's alphabetic order.
"""
fixtures = ['initial']
def test_asgeojson(self):
# Only PostGIS and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
with self.assertRaises(NotImplementedError):
list(Country.objects.annotate(json=functions.AsGeoJSON('mpoly')))
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
with self.assertRaises(TypeError):
City.objects.annotate(geojson=functions.AsGeoJSON('point', precision='foo'))
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(
pueblo_json,
City.objects.annotate(geojson=functions.AsGeoJSON('point')).get(name='Pueblo').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(
houston_json,
City.objects.annotate(json=functions.AsGeoJSON('point', crs=True)).get(name='Houston').json
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(
victoria_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True)
).get(name='Victoria').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True, crs=True, precision=5)
).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_AsGML_function")
def test_asgml(self):
# Should throw a TypeError when tyring to obtain GML from a
# non-geometry field.
qs = City.objects.all()
with self.assertRaises(TypeError):
qs.annotate(gml=functions.AsGML('name'))
ptown = City.objects.annotate(gml=functions.AsGML('point', precision=9)).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(
r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." '
r'cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn(
'<gml:pos srsDimension="2">',
City.objects.annotate(gml=functions.AsGML('point', version=3)).get(name='Pueblo').gml
)
@skipUnlessDBFeature("has_AsKML_function")
def test_askml(self):
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
with self.assertRaises(TypeError):
City.objects.annotate(kml=functions.AsKML('name'))
# Ensuring the KML is as expected.
ptown = City.objects.annotate(kml=functions.AsKML('point', precision=9)).get(name='Pueblo')
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@skipUnlessDBFeature("has_AsSVG_function")
def test_assvg(self):
with self.assertRaises(TypeError):
City.objects.annotate(svg=functions.AsSVG('point', precision='foo'))
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.annotate(svg=functions.AsSVG('point')).get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.annotate(svg=functions.AsSVG('point', relative=5)).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_BoundingCircle_function")
def test_bounding_circle(self):
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly')).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, 168.89, 2)
self.assertAlmostEqual(qs[1].circle.area, 135.95, 2)
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly', num_seg=12)).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, 168.44, 2)
self.assertAlmostEqual(qs[1].circle.area, 135.59, 2)
@skipUnlessDBFeature("has_Centroid_function")
def test_centroid(self):
qs = State.objects.exclude(poly__isnull=True).annotate(centroid=functions.Centroid('poly'))
tol = 1.8 if mysql else (0.1 if oracle else 0.00001)
for state in qs:
self.assertTrue(state.poly.centroid.equals_exact(state.centroid, tol))
@skipUnlessDBFeature("has_Difference_function")
def test_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(diff=functions.Difference('mpoly', geom))
# For some reason SpatiaLite does something screwy with the Texas geometry here.
if spatialite:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertEqual(c.mpoly.difference(geom), c.diff)
@skipUnlessDBFeature("has_Difference_function")
def test_difference_mixed_srid(self):
"""Testing with mixed SRID (Country has default 4326)."""
geom = Point(556597.4, 2632018.6, srid=3857) # Spherical mercator
qs = Country.objects.annotate(difference=functions.Difference('mpoly', geom))
# For some reason SpatiaLite does something screwy with the Texas geometry here.
if spatialite:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertEqual(c.mpoly.difference(geom), c.difference)
@skipUnlessDBFeature("has_Envelope_function")
def test_envelope(self):
countries = Country.objects.annotate(envelope=functions.Envelope('mpoly'))
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("has_ForceRHR_function")
def test_force_rhr(self):
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
st = State.objects.annotate(force_rhr=functions.ForceRHR('poly')).get(name='Foo')
self.assertEqual(rhr_rings, st.force_rhr.coords)
@skipUnlessDBFeature("has_GeoHash_function")
def test_geohash(self):
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.annotate(geohash=functions.GeoHash('point')).get(name='Houston')
h2 = City.objects.annotate(geohash=functions.GeoHash('point', precision=5)).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
@skipUnlessDBFeature("has_Intersection_function")
def test_intersection(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(inter=functions.Intersection('mpoly', geom))
for c in qs:
if spatialite:
# When the intersection is empty, Spatialite returns None
expected = None
else:
expected = c.mpoly.intersection(geom)
self.assertEqual(c.inter, expected)
@skipUnlessDBFeature("has_MemSize_function")
def test_memsize(self):
ptown = City.objects.annotate(size=functions.MemSize('point')).get(name='Pueblo')
self.assertTrue(20 <= ptown.size <= 40) # Exact value may depend on PostGIS version
@skipUnlessDBFeature("has_NumGeom_function")
def test_num_geom(self):
# Both 'countries' only have two geometries.
for c in Country.objects.annotate(num_geom=functions.NumGeometries('mpoly')):
self.assertEqual(2, c.num_geom)
qs = City.objects.filter(point__isnull=False).annotate(num_geom=functions.NumGeometries('point'))
for city in qs:
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections, whereas PostGIS < 2.0.0 and MySQL
# will return None.
if (postgis and connection.ops.spatial_version < (2, 0, 0)) or mysql:
self.assertIsNone(city.num_geom)
else:
self.assertEqual(1, city.num_geom)
@skipUnlessDBFeature("has_NumPoint_function")
def test_num_points(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
qs = Track.objects.annotate(num_points=functions.NumPoints('line'))
self.assertEqual(qs.first().num_points, 2)
if spatialite or mysql:
# Spatialite and MySQL can only count points on LineStrings
return
for c in Country.objects.annotate(num_points=functions.NumPoints('mpoly')):
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.annotate(num_points=functions.NumPoints('point')):
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_PointOnSurface_function")
def test_point_on_surface(self):
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
qs = Country.objects.annotate(point_on_surface=functions.PointOnSurface('mpoly'))
for country in qs:
tol = 0.00001 # Spatialite might have WKT-translation-related precision issues
self.assertTrue(ref[country.name].equals_exact(country.point_on_surface, tol))
@skipUnlessDBFeature("has_Reverse_function")
def test_reverse_geom(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
track = Track.objects.annotate(reverse_geom=functions.Reverse('line')).get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), track.reverse_geom.coords)
@skipUnlessDBFeature("has_Scale_function")
def test_scale(self):
xfac, yfac = 2, 3
tol = 5 # The low precision tolerance is for SpatiaLite
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', xfac, yfac))
for country in qs:
for p1, p2 in zip(country.mpoly, country.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
# Test float/Decimal values
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', 1.5, Decimal('2.5')))
self.assertGreater(qs[0].scaled.area, qs[0].mpoly.area)
@skipUnlessDBFeature("has_SnapToGrid_function")
def test_snap_to_grid(self):
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
with self.assertRaises(ValueError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
with self.assertRaises(TypeError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.1)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23, 0.5, 0.17)
).get(name='San Marino').snap,
tol
)
)
@skipUnlessDBFeature("has_SymDifference_function")
def test_sym_difference(self):
if geos_version_info()['version'] < '3.3.0':
self.skipTest("GEOS >= 3.3 required")
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(sym_difference=functions.SymDifference('mpoly', geom))
for country in qs:
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in country.mpoly.sym_difference(geom)),
set(g.wkt for g in country.sym_difference))
@skipUnlessDBFeature("has_Transform_function")
def test_transform(self):
# Pre-transformed points for Houston and Pueblo.
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points.
h = City.objects.annotate(pt=functions.Transform('point', ptown.srid)).get(name='Pueblo')
self.assertEqual(2774, h.pt.srid)
self.assertAlmostEqual(ptown.x, h.pt.x, prec)
self.assertAlmostEqual(ptown.y, h.pt.y, prec)
@skipUnlessDBFeature("has_Translate_function")
def test_translate(self):
xfac, yfac = 5, -23
qs = Country.objects.annotate(translated=functions.Translate('mpoly', xfac, yfac))
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# Some combined function tests
@skipUnlessDBFeature(
"has_Difference_function", "has_Intersection_function",
"has_SymDifference_function", "has_Union_function")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23, srid=4326)
qs = Country.objects.all().annotate(
difference=functions.Difference('mpoly', geom),
sym_difference=functions.SymDifference('mpoly', geom),
union=functions.Union('mpoly', geom),
)
# For some reason SpatiaLite does something screwey with the Texas geometry here.
# Also, it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.annotate(intersection=functions.Intersection('mpoly', geom))
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
return
for c in qs:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnlessDBFeature("has_Union_function")
def test_union(self):
geom = Point(-95.363151, 29.763374, srid=4326)
ptown = City.objects.annotate(union=functions.Union('point', geom)).get(name='Dallas')
tol = 0.00001
# Undefined ordering
expected1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)', srid=4326)
expected2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)', srid=4326)
self.assertTrue(expected1.equals_exact(ptown.union, tol) or expected2.equals_exact(ptown.union, tol))
| bsd-3-clause |
morissette/devopsdays-hackathon-2016 | venv/lib/python2.7/site-packages/flask/_compat.py | 783 | 2164 | # -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
| gpl-3.0 |
ehashman/oh-mainline | vendor/packages/docutils/docutils/parsers/rst/languages/da.py | 120 | 3765 | # -*- coding: utf-8 -*-
# $Id: da.py 7678 2013-07-03 09:57:36Z milde $
# Author: E D
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Danish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'giv agt': 'attention',
u'pas på': 'caution',
u'kode': 'code',
u'kode-blok': 'code',
u'kildekode': 'code',
u'fare': 'danger',
u'fejl': 'error',
u'vink': 'hint',
u'vigtigt': 'important',
u'bemærk': 'note',
u'tips': 'tip',
u'advarsel': 'warning',
u'formaning': 'admonition',
u'sidebjælke': 'sidebar',
u'emne': 'topic',
u'linje-blok': 'line-block',
u'linie-blok': 'line-block',
u'parset-literal': 'parsed-literal',
u'rubrik': 'rubric',
u'epigraf': 'epigraph',
u'fremhævninger': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions': 'questions',
u'tabel': 'table',
u'csv-tabel': 'csv-table',
u'liste-tabel': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
u'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'billede': 'image',
u'figur': 'figure',
u'inkludér': 'include',
u'inkluder': 'include',
u'rå': 'raw',
u'erstat': 'replace',
u'unicode': 'unicode',
u'dato': 'date',
u'klasse': 'class',
u'rolle': 'role',
u'forvalgt-rolle': 'default-role',
u'titel': 'title',
u'indhold': 'contents',
u'sektnum': 'sectnum',
u'sektions-nummerering': 'sectnum',
u'sidehovede': 'header',
u'sidefod': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'target-notes (translation required)': 'target-notes',
u'restructuredtext-test-direktiv': 'restructuredtext-test-directive'}
"""Danish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'forkortelse': 'abbreviation',
u'fork': 'abbreviation',
u'akronym': 'acronym',
u'ac (translation required)': 'acronym',
u'kode': 'code',
u'indeks': 'index',
u'i': 'index',
u'subscript (translation required)': 'subscript',
u'sub (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'sup (translation required)': 'superscript',
u'titel-reference': 'title-reference',
u'titel': 'title-reference',
u't': 'title-reference',
u'pep-reference': 'pep-reference',
u'pep': 'pep-reference',
u'rfc-reference': 'rfc-reference',
u'rfc': 'rfc-reference',
u'emfase': 'emphasis',
u'kraftig': 'strong',
u'literal': 'literal',
u'math (translation required)': 'math',
u'navngivet-reference': 'named-reference',
u'anonym-reference': 'anonymous-reference',
u'fodnote-reference': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitutions-reference': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'rå': 'raw',}
"""Mapping of Danish role names to canonical role names for interpreted text.
"""
| agpl-3.0 |
pi19404/mbed | workspace_tools/dev/dsp_fir.py | 120 | 2714 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from numpy import sin, arange, pi
from scipy.signal import lfilter, firwin
from pylab import figure, plot, grid, show
#------------------------------------------------
# Create a signal for demonstration.
#------------------------------------------------
# 320 samples of (1000Hz + 15000 Hz) at 48 kHz
sample_rate = 48000.
nsamples = 320
F_1KHz = 1000.
A_1KHz = 1.0
F_15KHz = 15000.
A_15KHz = 0.5
t = arange(nsamples) / sample_rate
signal = A_1KHz * sin(2*pi*F_1KHz*t) + A_15KHz*sin(2*pi*F_15KHz*t)
#------------------------------------------------
# Create a FIR filter and apply it to signal.
#------------------------------------------------
# The Nyquist rate of the signal.
nyq_rate = sample_rate / 2.
# The cutoff frequency of the filter: 6KHz
cutoff_hz = 6000.0
# Length of the filter (number of coefficients, i.e. the filter order + 1)
numtaps = 29
# Use firwin to create a lowpass FIR filter
fir_coeff = firwin(numtaps, cutoff_hz/nyq_rate)
# Use lfilter to filter the signal with the FIR filter
filtered_signal = lfilter(fir_coeff, 1.0, signal)
#------------------------------------------------
# Plot the original and filtered signals.
#------------------------------------------------
# The first N-1 samples are "corrupted" by the initial conditions
warmup = numtaps - 1
# The phase delay of the filtered signal
delay = (warmup / 2) / sample_rate
figure(1)
# Plot the original signal
plot(t, signal)
# Plot the filtered signal, shifted to compensate for the phase delay
plot(t-delay, filtered_signal, 'r-')
# Plot just the "good" part of the filtered signal. The first N-1
# samples are "corrupted" by the initial conditions.
plot(t[warmup:]-delay, filtered_signal[warmup:], 'g', linewidth=4)
grid(True)
show()
#------------------------------------------------
# Print values
#------------------------------------------------
def print_values(label, values):
var = "float32_t %s[%d]" % (label, len(values))
print "%-30s = {%s}" % (var, ', '.join(["%+.10f" % x for x in values]))
print_values('signal', signal)
print_values('fir_coeff', fir_coeff)
print_values('filtered_signal', filtered_signal)
| apache-2.0 |
numericillustration/sdc-headnode | deps/restdown/tools/cutarelease.py | 19 | 21581 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2012 Trent Mick
"""cutarelease -- Cut a release of your project.
A script that will help cut a release for a git-based project that follows
a few conventions. It'll update your changelog (CHANGES.md), add a git
tag, push those changes, update your version to the next patch level release
and create a new changelog section for that new version.
Conventions:
- XXX
"""
__version_info__ = (1, 0, 6)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
from os.path import join, dirname, normpath, abspath, exists, basename, splitext
from glob import glob
from pprint import pprint
import re
import codecs
import logging
import optparse
import json
#---- globals and config
log = logging.getLogger("cutarelease")
class Error(Exception):
pass
#---- main functionality
def cutarelease(project_name, version_files, dry_run=False):
"""Cut a release.
@param project_name {str}
@param version_files {list} List of paths to files holding the version
info for this project.
If none are given it attempts to guess the version file:
package.json or VERSION.txt or VERSION or $project_name.py
or lib/$project_name.py or $project_name.js or lib/$project_name.js.
The version file can be in one of the following forms:
- A .py file, in which case the file is expect to have a top-level
global called "__version_info__" as follows. [1]
__version_info__ = (0, 7, 6)
Note that I typically follow that with the following to get a
string version attribute on my modules:
__version__ = '.'.join(map(str, __version_info__))
- A .js file, in which case the file is expected to have a top-level
global called "VERSION" as follows:
ver VERSION = "1.2.3";
- A "package.json" file, typical of a node.js npm-using project.
The package.json file must have a "version" field.
- TODO: A simple version file whose only content is a "1.2.3"-style version
string.
[1]: This is a convention I tend to follow in my projects.
Granted it might not be your cup of tea. I should add support for
just `__version__ = "1.2.3"`. I'm open to other suggestions too.
"""
dry_run_str = dry_run and " (dry-run)" or ""
if not version_files:
log.info("guessing version file")
candidates = [
"package.json",
"VERSION.txt",
"VERSION",
"%s.py" % project_name,
"lib/%s.py" % project_name,
"%s.js" % project_name,
"lib/%s.js" % project_name,
]
for candidate in candidates:
if exists(candidate):
version_files = [candidate]
break
else:
raise Error("could not find a version file: specify its path or "
"add one of the following to your project: '%s'"
% "', '".join(candidates))
log.info("using '%s' as version file", version_files[0])
parsed_version_files = [_parse_version_file(f) for f in version_files]
version_file_type, version_info = parsed_version_files[0]
version = _version_from_version_info(version_info)
# Confirm
if not dry_run:
answer = query_yes_no("* * *\n"
"Are you sure you want cut a %s release?\n"
"This will involved commits and a push." % version,
default="no")
print "* * *"
if answer != "yes":
log.info("user abort")
return
log.info("cutting a %s release%s", version, dry_run_str)
# Checks: Ensure there is a section in changes for this version.
changes_path = "CHANGES.md"
changes_txt, changes, nyr = parse_changelog(changes_path)
#pprint(changes)
top_ver = changes[0]["version"]
if top_ver != version:
raise Error("changelog '%s' top section says "
"version %r, expected version %r: aborting"
% (changes_path, top_ver, version))
top_verline = changes[0]["verline"]
if not top_verline.endswith(nyr):
answer = query_yes_no("\n* * *\n"
"The changelog '%s' top section doesn't have the expected\n"
"'%s' marker. Has this been released already?"
% (changes_path, nyr), default="yes")
print "* * *"
if answer != "no":
log.info("abort")
return
top_body = changes[0]["body"]
if top_body.strip() == "(nothing yet)":
raise Error("top section body is `(nothing yet)': it looks like "
"nothing has been added to this release")
# Commits to prepare release.
changes_txt_before = changes_txt
changes_txt = changes_txt.replace(" (not yet released)", "", 1)
if not dry_run and changes_txt != changes_txt_before:
log.info("prepare `%s' for release", changes_path)
f = codecs.open(changes_path, 'w', 'utf-8')
f.write(changes_txt)
f.close()
run('git commit %s -m "prepare for %s release"'
% (changes_path, version))
# Tag version and push.
curr_tags = set(t for t in _capture_stdout(["git", "tag", "-l"]).split('\n') if t)
if not dry_run and version not in curr_tags:
log.info("tag the release")
run('git tag -a "%s" -m "version %s"' % (version, version))
run('git push --tags')
# Optionally release.
if exists("package.json"):
answer = query_yes_no("\n* * *\nPublish to npm?", default="yes")
print "* * *"
if answer == "yes":
if dry_run:
log.info("skipping npm publish (dry-run)")
else:
run('npm publish')
elif exists("setup.py"):
answer = query_yes_no("\n* * *\nPublish to pypi?", default="yes")
print "* * *"
if answer == "yes":
if dry_run:
log.info("skipping pypi publish (dry-run)")
else:
run("%spython setup.py sdist --formats zip upload"
% _setup_command_prefix())
# Commits to prepare for future dev and push.
# - update changelog file
next_version_info = _get_next_version_info(version_info)
next_version = _version_from_version_info(next_version_info)
log.info("prepare for future dev (version %s)", next_version)
marker = "## " + changes[0]["verline"]
if marker.endswith(nyr):
marker = marker[0:-len(nyr)]
if marker not in changes_txt:
raise Error("couldn't find `%s' marker in `%s' "
"content: can't prep for subsequent dev" % (marker, changes_path))
next_verline = "%s %s%s" % (marker.rsplit(None, 1)[0], next_version, nyr)
changes_txt = changes_txt.replace(marker + '\n',
"%s\n\n(nothing yet)\n\n\n%s\n" % (next_verline, marker))
if not dry_run:
f = codecs.open(changes_path, 'w', 'utf-8')
f.write(changes_txt)
f.close()
# - update version file
next_version_tuple = _tuple_from_version(next_version)
for i, ver_file in enumerate(version_files):
ver_content = codecs.open(ver_file, 'r', 'utf-8').read()
ver_file_type, ver_info = parsed_version_files[i]
if ver_file_type == "json":
marker = '"version": "%s"' % version
if marker not in ver_content:
raise Error("couldn't find `%s' version marker in `%s' "
"content: can't prep for subsequent dev" % (marker, ver_file))
ver_content = ver_content.replace(marker,
'"version": "%s"' % next_version)
elif ver_file_type == "javascript":
marker = 'var VERSION = "%s";' % version
if marker not in ver_content:
raise Error("couldn't find `%s' version marker in `%s' "
"content: can't prep for subsequent dev" % (marker, ver_file))
ver_content = ver_content.replace(marker,
'var VERSION = "%s";' % next_version)
elif ver_file_type == "python":
marker = "__version_info__ = %r" % (version_info,)
if marker not in ver_content:
raise Error("couldn't find `%s' version marker in `%s' "
"content: can't prep for subsequent dev" % (marker, ver_file))
ver_content = ver_content.replace(marker,
"__version_info__ = %r" % (next_version_tuple,))
elif ver_file_type == "version":
ver_content = next_version
else:
raise Error("unknown ver_file_type: %r" % ver_file_type)
if not dry_run:
log.info("update version to '%s' in '%s'", next_version, ver_file)
f = codecs.open(ver_file, 'w', 'utf-8')
f.write(ver_content)
f.close()
if not dry_run:
run('git commit %s %s -m "prep for future dev"' % (
changes_path, ' '.join(version_files)))
run('git push')
#---- internal support routines
def _indent(s, indent=' '):
return indent + indent.join(s.splitlines(True))
def _tuple_from_version(version):
def _intify(s):
try:
return int(s)
except ValueError:
return s
return tuple(_intify(b) for b in version.split('.'))
def _get_next_version_info(version_info):
next = list(version_info[:])
next[-1] += 1
return tuple(next)
def _version_from_version_info(version_info):
v = str(version_info[0])
state_dot_join = True
for i in version_info[1:]:
if state_dot_join:
try:
int(i)
except ValueError:
state_dot_join = False
else:
pass
if state_dot_join:
v += "." + str(i)
else:
v += str(i)
return v
_version_re = re.compile(r"^(\d+)\.(\d+)(?:\.(\d+)([abc](\d+)?)?)?$")
def _version_info_from_version(version):
m = _version_re.match(version)
if not m:
raise Error("could not convert '%s' version to version info" % version)
version_info = []
for g in m.groups():
if g is None:
break
try:
version_info.append(int(g))
except ValueError:
version_info.append(g)
return tuple(version_info)
def _parse_version_file(version_file):
"""Get version info from the given file. It can be any of:
Supported version file types (i.e. types of files from which we know
how to parse the version string/number -- often by some convention):
- json: use the "version" key
- javascript: look for a `var VERSION = "1.2.3";`
- python: Python script/module with `__version_info__ = (1, 2, 3)`
- version: a VERSION.txt or VERSION file where the whole contents are
the version string
@param version_file {str} Can be a path or "type:path", where "type"
is one of the supported types.
"""
# Get version file *type*.
version_file_type = None
match = re.compile("^([a-z]+):(.*)$").search(version_file)
if match:
version_file = match.group(2)
version_file_type = match.group(1)
aliases = {
"js": "javascript"
}
if version_file_type in aliases:
version_file_type = aliases[version_file_type]
f = codecs.open(version_file, 'r', 'utf-8')
content = f.read()
f.close()
if not version_file_type:
# Guess the type.
base = basename(version_file)
ext = splitext(base)[1]
if ext == ".json":
version_file_type = "json"
elif ext == ".py":
version_file_type = "python"
elif ext == ".js":
version_file_type = "javascript"
elif content.startswith("#!"):
shebang = content.splitlines(False)[0]
shebang_bits = re.split(r'[/ \t]', shebang)
for name, typ in {"python": "python", "node": "javascript"}.items():
if name in shebang_bits:
version_file_type = typ
break
elif base in ("VERSION", "VERSION.txt"):
version_file_type = "version"
if not version_file_type:
raise RuntimeError("can't extract version from '%s': no idea "
"what type of file it it" % version_file)
if version_file_type == "json":
obj = json.loads(content)
version_info = _version_info_from_version(obj["version"])
elif version_file_type == "python":
m = re.search(r'^__version_info__ = (.*?)$', content, re.M)
version_info = eval(m.group(1))
elif version_file_type == "javascript":
m = re.search(r'^var VERSION = "(.*?)";$', content, re.M)
version_info = _version_info_from_version(m.group(1))
elif version_file_type == "version":
version_info = _version_info_from_version(content.strip())
else:
raise RuntimeError("unexpected version_file_type: %r"
% version_file_type)
return version_file_type, version_info
def parse_changelog(changes_path):
"""Parse the given changelog path and return `(content, parsed, nyr)`
where `nyr` is the ' (not yet released)' marker and `parsed` looks like:
[{'body': u'\n(nothing yet)\n\n',
'verline': u'restify 1.0.1 (not yet released)',
'version': u'1.0.1'}, # version is parsed out for top section only
{'body': u'...',
'verline': u'1.0.0'},
{'body': u'...',
'verline': u'1.0.0-rc2'},
{'body': u'...',
'verline': u'1.0.0-rc1'}]
A changelog (CHANGES.md) is expected to look like this:
# $project Changelog
## $next_version (not yet released)
...
## $version1
...
## $version2
... and so on
The version lines are enforced as follows:
- The top entry should have a " (not yet released)" suffix. "Should"
because recovery from half-cutarelease failures is supported.
- A version string must be extractable from there, but it tries to
be loose (though strict "X.Y.Z" versioning is preferred). Allowed
## 1.0.0
## my project 1.0.1
## foo 1.2.3-rc2
Basically, (a) the " (not yet released)" is stripped, (b) the
last token is the version, and (c) that version must start with
a digit (sanity check).
"""
if not exists(changes_path):
raise Error("changelog file '%s' not found" % changes_path)
content = codecs.open(changes_path, 'r', 'utf-8').read()
parser = re.compile(
r'^##\s*(?P<verline>[^\n]*?)\s*$(?P<body>.*?)(?=^##|\Z)',
re.M | re.S)
sections = parser.findall(content)
# Sanity checks on changelog format.
if not sections:
template = "## 1.0.0 (not yet released)\n\n(nothing yet)\n"
raise Error("changelog '%s' must have at least one section, "
"suggestion:\n\n%s" % (changes_path, _indent(template)))
first_section_verline = sections[0][0]
nyr = ' (not yet released)'
#if not first_section_verline.endswith(nyr):
# eg = "## %s%s" % (first_section_verline, nyr)
# raise Error("changelog '%s' top section must end with %r, "
# "naive e.g.: '%s'" % (changes_path, nyr, eg))
items = []
for i, section in enumerate(sections):
item = {
"verline": section[0],
"body": section[1]
}
if i == 0:
# We only bother to pull out 'version' for the top section.
verline = section[0]
if verline.endswith(nyr):
verline = verline[0:-len(nyr)]
version = verline.split()[-1]
try:
int(version[0])
except ValueError:
msg = ''
if version.endswith(')'):
msg = " (cutarelease is picky about the trailing %r " \
"on the top version line. Perhaps you misspelled " \
"that?)" % nyr
raise Error("changelog '%s' top section version '%s' is "
"invalid: first char isn't a number%s"
% (changes_path, version, msg))
item["version"] = version
items.append(item)
return content, items, nyr
## {{{ http://code.activestate.com/recipes/577058/ (r2)
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
## end of http://code.activestate.com/recipes/577058/ }}}
def _capture_stdout(argv):
import subprocess
p = subprocess.Popen(argv, stdout=subprocess.PIPE)
return p.communicate()[0]
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def run(cmd):
"""Run the given command.
Raises OSError is the command returns a non-zero exit status.
"""
log.debug("running '%s'", cmd)
fixed_cmd = cmd
if sys.platform == "win32" and cmd.count('"') > 2:
fixed_cmd = '"' + cmd + '"'
retval = os.system(fixed_cmd)
if hasattr(os, "WEXITSTATUS"):
status = os.WEXITSTATUS(retval)
else:
status = retval
if status:
raise OSError(status, "error running '%s'" % cmd)
def _setup_command_prefix():
prefix = ""
if sys.platform == "darwin":
# http://forums.macosxhints.com/archive/index.php/t-43243.html
# This is an Apple customization to `tar` to avoid creating
# '._foo' files for extended-attributes for archived files.
prefix = "COPY_EXTENDED_ATTRIBUTES_DISABLE=1 "
return prefix
#---- mainline
def main(argv):
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
log.setLevel(logging.INFO)
# Parse options.
parser = optparse.OptionParser(prog="cutarelease", usage='',
version="%prog " + __version__, description=__doc__,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("-q", "--quiet", dest="log_level",
action="store_const", const=logging.WARNING,
help="quieter output (just warnings and errors)")
parser.set_default("log_level", logging.INFO)
parser.add_option("--test", action="store_true",
help="run self-test and exit (use 'eol.py -v --test' for verbose test output)")
parser.add_option("-p", "--project-name", metavar="NAME",
help='the name of this project (default is the base dir name)',
default=basename(os.getcwd()))
parser.add_option("-f", "--version-file", metavar="[TYPE:]PATH",
action='append', dest="version_files",
help='The path to the project file holding the version info. Can be '
'specified multiple times if more than one file should be updated '
'with new version info. If excluded, it will be guessed.')
parser.add_option("-n", "--dry-run", action="store_true",
help='Do a dry-run', default=False)
opts, args = parser.parse_args()
log.setLevel(opts.log_level)
cutarelease(opts.project_name, opts.version_files, dry_run=opts.dry_run)
## {{{ http://code.activestate.com/recipes/577258/ (r5+)
if __name__ == "__main__":
try:
retval = main(sys.argv)
except KeyboardInterrupt:
sys.exit(1)
except SystemExit:
raise
except:
import traceback, logging
if not log.handlers and not logging.root.handlers:
logging.basicConfig()
skip_it = False
exc_info = sys.exc_info()
if hasattr(exc_info[0], "__name__"):
exc_class, exc, tb = exc_info
if isinstance(exc, IOError) and exc.args[0] == 32:
# Skip 'IOError: [Errno 32] Broken pipe': often a cancelling of `less`.
skip_it = True
if not skip_it:
tb_path, tb_lineno, tb_func = traceback.extract_tb(tb)[-1][:3]
log.error("%s (%s:%s in %s)", exc_info[1], tb_path,
tb_lineno, tb_func)
else: # string exception
log.error(exc_info[0])
if not skip_it:
if log.isEnabledFor(logging.DEBUG):
traceback.print_exception(*exc_info)
sys.exit(1)
else:
sys.exit(retval)
## end of http://code.activestate.com/recipes/577258/ }}}
| mpl-2.0 |
darktears/chromium-crosswalk | chrome/common/extensions/docs/server2/blob_reference_store.py | 146 | 1119 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from appengine_wrappers import db
from appengine_wrappers import BlobReferenceProperty
BLOB_REFERENCE_BLOBSTORE = 'BlobReferenceBlobstore'
class _Model(db.Model):
key_ = db.StringProperty()
value = BlobReferenceProperty()
class BlobReferenceStore(object):
"""A wrapper around the datastore API that can store blob keys.
"""
def _Query(self, namespace, key):
return _Model.gql('WHERE key_ = :1', self._MakeKey(namespace, key)).get()
def _MakeKey(self, namespace, key):
return '.'.join((namespace, key))
def Set(self, namespace, key, value):
_Model(key_=self._MakeKey(namespace, key), value=value).put()
def Get(self, namespace, key):
result = self._Query(namespace, key)
if not result:
return None
return result.value
def Delete(self, namespace, key):
result = self._Query(namespace, key)
if not result:
return None
blob_key = result.value
result.delete()
return blob_key
| bsd-3-clause |
alex/bcrypt | bcrypt/__about__.py | 4 | 1242 | # Author:: Donald Stufft (<donald@stufft.io>)
# Copyright:: Copyright (c) 2013 Donald Stufft
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "bcrypt"
__summary__ = "Modern password hashing for your software and your servers"
__uri__ = "https://github.com/dstufft/bcrypt/"
__version__ = "1.0.2"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2013 Donald Stufft"
| apache-2.0 |
DiptoDas8/Biponi | lib/python2.7/site-packages/PIL/SunImagePlugin.py | 26 | 1965 | #
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.3"
from PIL import Image, ImageFile, ImagePalette, _binary
i16 = _binary.i16be
i32 = _binary.i32be
def _accept(prefix):
return len(prefix) >= 4 and i32(prefix) == 0x59a66a95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self):
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59a66a95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
self.mode, rawmode = "RGB", "BGR"
else:
raise SyntaxError("unsupported mode")
compression = i32(s[20:24])
if i32(s[24:28]) != 0:
length = i32(s[28:32])
offset = offset + length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(length))
if self.mode == "L":
self.mode = rawmode = "P"
stride = (((self.size[0] * depth + 7) // 8) + 3) & (~3)
if compression == 1:
self.tile = [("raw", (0, 0)+self.size, offset, (rawmode, stride))]
elif compression == 2:
self.tile = [("sun_rle", (0, 0)+self.size, offset, rawmode)]
#
# registry
Image.register_open("SUN", SunImageFile, _accept)
Image.register_extension("SUN", ".ras")
| mit |
chengjf/database-interface-doc-management | flask-demo/flask/Lib/site-packages/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| apache-2.0 |
facelessuser/SublimeRandomCrap | toggle_white_space.py | 1 | 1912 | """
ToggleWhiteSpace Sublime Plugin.
Toggle showing white space in Sublime Text.
```
//////////////////////////////////
// Toggle White Space Command
//////////////////////////////////
{
"caption": "Toggle White Space",
"command": "toggle_white_space"
},
```
Licensed under MIT
Copyright (c) 2013-2019 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import sublime
import sublime_plugin
class ToggleWhiteSpaceCommand(sublime_plugin.ApplicationCommand):
"""Toggle the showing of whitespace in Sublime."""
def run(self):
"""Run the command."""
settings = sublime.load_settings("Preferences.sublime-settings")
white_space = "selection" if settings.get("draw_white_space", "selection") != "selection" else "all"
settings.set("draw_white_space", white_space)
sublime.save_settings("Preferences.sublime-settings")
| mit |
Saluev/cocos2d-gui | cocosgui/editors.py | 1 | 7325 | # built-in
from __future__ import print_function
# PyOpenGL
from OpenGL import GL
# pyglet
import pyglet
from pyglet.window import key
# gui
from .node import GUINode
# css
from css.color import Color
class Caret(GUINode):
def __init__(self):
super(Caret, self).__init__()
self.__glyph = None
@property
def glyph(self):
if self.__glyph is None:
self.__glyph = self.parent.font.get_glyphs('|')[0]
return self.__glyph
def get_content_size(self):
return (self.glyph.width, self.glyph.height)
def draw(self, *args, **kwargs):
if not self.parent.has_state('focus'):
return
selection = self.parent.selection
if selection[0] != selection[1]:
return
GL.glPushMatrix()
self.transform()
self.glyph.draw()
GL.glPopMatrix()
def apply_style(self, **options):
super(Caret, self).apply_style(**options)
self.__glyph = None # it will be updated on draw
class TextEdit(GUINode):
def __init__(self, *args, **kwargs):
super(TextEdit, self).__init__()
self.text_label = pyglet.text.Label(*args, **kwargs)
self.text_objects = (self.text_label,)
self.__selection = (0, 0)
self.__selection_focus = 0
self.update_glyphs()
self.caret = Caret()
self.add(self.caret) # styles can be applied to it now
## utility functions ##
@property
def font(self):
return self.text_label.document.get_font()
@property
def text(self):
return self.text_label.text
@property
def text_len(self):
# the fastest possible len (assuming glyphs are correct)
return len(self.glyphs)
@property
def selection(self):
return tuple(self.__selection)
def __selection_edge(self):
sel_left, sel_right = self.__selection
if sel_left == self.__selection_focus:
return sel_right
else:
return sel_left
## public api ##
def replace_selection(self, by_what):
sel_left, sel_right = self.selection
new_text = self.text[:sel_left] + by_what + self.text[sel_right:]
self.text_label.text = new_text
self.__selection = (sel_left + len(by_what),) * 2
self.update_glyphs()
self.update_caret()
## inner api ##
def update_glyphs(self):
self.glyphs = self.font.get_glyphs(self.text)
self.offsets = []
curr_offset = 0
for glyph in self.glyphs:
self.offsets.append(curr_offset)
curr_offset += glyph.advance
self.offsets.append(curr_offset)
def update_caret(self):
sel_left, sel_right = self.__selection
if sel_left == sel_right:
self.caret.x = self.content_box[0] + self.offsets[sel_left]
else:
self.caret.x = self.content_box[0] # TODO hide at all
self.caret.x -= self.caret.glyph.advance // 3 + 1
self.caret.y = self.content_box[1]
def get_caret_pos(self, x):
for i, offset in enumerate(self.offsets):
if x < offset:
return max(0, i - 1)
return len(self.glyphs)
def apply_style(self, **options):
super(TextEdit, self).apply_style(**options)
tl = self.text_label
tl.x, tl.y, tl.width, tl.height = self.content_box
self.update_caret()
def draw(self, *args, **kwargs):
super(TextEdit, self).draw(*args, **kwargs)
GL.glPushMatrix()
GL.glPushAttrib(GL.GL_SCISSOR_BIT)
self.transform()
GL.glEnable(GL.GL_SCISSOR_TEST) # TODO move this to style['overflow'] = 'hidden'
left, bottom = map(int, self.point_to_world(self.padding_box[:2]))
GL.glScissor(left, bottom, *self.padding_box[2:])
self.text_label.draw()
GL.glPopAttrib()
GL.glPopMatrix()
## event handlers ##
def key_press(self, button, modifiers):
if button == key.BACKSPACE:
# TODO Ctrl+Backspace erases a word
sel_left, sel_right = self.__selection
if sel_left == sel_right > 0:
self.__selection = (sel_left - 1, sel_right)
self.replace_selection('')
elif button == key.DELETE:
# TODO Ctrl+Delete erases a word
sel_left, sel_right = self.__selection
if sel_left == sel_right < self.text_len:
self.__selection = (sel_left, sel_right + 1)
self.replace_selection('')
elif button == key.HOME:
if modifiers & key.MOD_SHIFT:
self.__selection = (0, self.__selection_focus)
else:
self.__selection = (0, 0)
self.update_caret()
elif button == key.END:
if modifiers & key.MOD_SHIFT:
self.__selection = (self.__selection_focus, self.text_len)
else:
self.__selection = (self.text_len,) * 2
self.update_caret()
elif button == key.LEFT:
# TODO Ctrl+Left moves a word back
sel_left, sel_right = self.__selection
if modifiers & key.MOD_SHIFT:
new_sel_1 = max(0, self.__selection_edge() - 1)
new_sel_2 = self.__selection_focus
self.__selection = sorted((new_sel_1, new_sel_2))
else:
self.__selection_focus = max(0, self.__selection_edge() - 1)
self.__selection = (self.__selection_focus,) * 2
self.update_caret()
elif button == key.RIGHT:
# TODO Ctrl+Right moves a word right
sel_left, sel_right = self.__selection
if modifiers & key.MOD_SHIFT:
new_sel_1 = min(self.text_len, self.__selection_edge() + 1)
new_sel_2 = self.__selection_focus
self.__selection = sorted((new_sel_1, new_sel_2))
else:
self.__selection_focus = min(self.text_len, self.__selection_edge() + 1)
self.__selection = (self.__selection_focus,) * 2
self.update_caret()
try:
# TODO how to handle Cyrillic?..
letter = chr(button)
self.replace_selection(letter)
except ValueError:
print("Unrecognized key:", button, modifiers)
super(TextEdit, self).key_press(button, modifiers)
def mouse_press(self, x, y, button, modifiers):
x, y = self.point_to_local((x, y))
x = x - self.content_box[0]
# TODO handle Shift button here
curr_caret_pos = self.get_caret_pos(x)
new_selection = (curr_caret_pos,) * 2
old_selection = self.__selection
self.__selection = new_selection
self.__selection_focus = curr_caret_pos
super(TextEdit, self).mouse_press(x, y, button, modifiers)
self.selection_change(old_selection, new_selection)
def mouse_drag(self, x, y, dx, dy, button, modifiers):
x, y = self.point_to_local((x, y))
x = x - self.content_box[0]
curr_caret_pos = self.get_caret_pos(x)
sel_focus = self.__selection_focus
new_selection = sorted([sel_focus, curr_caret_pos])
old_selection = self.__selection
self.__selection = new_selection
super(TextEdit, self).mouse_motion(x, y, dx, dy)
self.selection_change(old_selection, new_selection)
def mouse_release(self, x, y, button, modifiers):
super(TextEdit, self).mouse_release(x, y, button, modifiers)
def selection_change(self, old_selection, new_selection):
if old_selection != new_selection:
self.update_caret()
self.dispatch_event('on_selection_change', old_selection, new_selection)
TextEdit.register_event_type('on_selection_change')
TextEdit.style['padding-bottom'] = 4
TextEdit.style['border'] = (2, 'inset', 'gray')
TextEdit.style['background-color'] = 'gray'
TextEdit.pseudostyle('focus')['background-color'] = 'lightgray'
TextEdit.pseudostyle('hover')['background-color'] = 'darkgray' | mit |
MinimalOS/external_skia | gm/rebaseline_server/download_actuals_test.py | 66 | 1478 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test download.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self._output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self._output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
# System-level imports
import os
import shutil
import tempfile
import urllib
# Imports from within Skia
import fix_pythonpath # must do this first
from pyutils import url_utils
import base_unittest
import download_actuals
class DownloadTest(base_unittest.TestCase):
def test_fetch(self):
"""Tests fetch() of GM results from actual-results.json ."""
downloader = download_actuals.Download(
actuals_base_url=url_utils.create_filepath_url(
os.path.join(self._input_dir, 'gm-actuals')),
gm_actuals_root_url=url_utils.create_filepath_url(
os.path.join(self._input_dir, 'fake-gm-imagefiles')))
downloader.fetch(
builder_name='Test-Android-GalaxyNexus-SGX540-Arm7-Release',
dest_dir=self._output_dir_actual)
def main():
base_unittest.main(DownloadTest)
if __name__ == '__main__':
main()
| bsd-3-clause |
izadorozhna/dashboard_integration_tests | openstack_dashboard/test/integration_tests/tests/test_login.py | 65 | 1212 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.pages import loginpage
class TestLogin(helpers.BaseTestCase):
"""This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
def test_login(self):
login_pg = loginpage.LoginPage(self.driver, self.CONFIG)
login_pg.go_to_login_page()
home_pg = login_pg.login()
if not home_pg.is_logged_in:
self.fail("Could not determine if logged in")
home_pg.log_out()
| apache-2.0 |
pein0119/girl-atlas-crawler | get_image.py | 1 | 2925 | # -*- coding: utf-8 -*-
# 非gevent版本
import os
import requests
import time
from lxml import html
def get_response(url):
# 填充请求的头部
headers = {
"headers" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.94 Safari/537.36"
}
response = requests.get(url, headers = headers)
return response
# 获得每个页面的url
# 起始url 为 http://girl-atlas.com/
# 我爬的时候网站一共有92个页面
def get_page_urls():
start_url = 'http://girl-atlas.com/'
response = get_response(start_url)
page_urls = []
page_urls.append(start_url)
while True:
parsed_body = html.fromstring(response.text)
# Xpath 提取访问下个页面的url
next_url = parsed_body.xpath('//a[@class="btn-form next"]/@href')
if not next_url:
break
next_url = start_url + next_url[0]
page_urls.append(next_url)
response = get_response(next_url)
print "get_page_urls done!!!"
return page_urls
# 获取每个girl专辑的Url
def get_girl_urls(page_urls):
girl_urls = []
for url in page_urls:
response = get_response(url)
parsed_body = html.fromstring(response.text)
# Xpath
girl = parsed_body.xpath('//div[@class="grid_title"]/a/@href')
girl_urls.extend(girl)
return girl_urls
def get_image_urls(girl_urls):
girl_list = []
for url in girl_urls:
# print "in get_image_urls" + url[0]
response = get_response(url)
parsed_body = html.fromstring(response.text)
# 专辑名
girl_title = parsed_body.xpath('//title/text()')
image_urls = parsed_body.xpath('//li[@class="slide "]/img/@src | //li[@class="slide "]/img/@delay')
girl_dict = {girl_title[0] : image_urls}
girl_list.append(girl_dict)
print "get_girl_urls done!!!"
return girl_list
# 开始下载图片
def get_images(girl_list):
count = 1
# 图片的默认存储文件夹
start_dir = '/home/pein/temp/'
for girl in girl_list:
dir_name = start_dir + girl.keys()[0]
urls = girl.values()[0]
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for url in urls:
print url
with open(dir_name + '/' + url.split('/')[-1], 'wb') as f:
r = get_response(url)
f.write(r.content)
print
print count, girl.keys()[0] + " done!!!"
count += 1
print
if __name__ == '__main__':
page_urls = get_page_urls()
start_time = time.time()
girl_urls = get_girl_urls(page_urls)
girl_list = get_image_urls(girl_urls)
print "girl %s" % len(girl_urls)
get_images(girl_list)
elapsed_time = time.time() - start_time
print
print "elasped %s seconds!!!!" % elapsed_time
| bsd-2-clause |
petrjasek/superdesk-server | superdesk/utc.py | 2 | 1232 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import arrow
import datetime
from pytz import utc, timezone # flake8: noqa
tzinfo = getattr(datetime, 'tzinfo', object)
def utcnow():
"""Get tz aware datetime object.
Remove microseconds which can't be persisted by mongo so we have
the values consistent in both mongo and elastic.
"""
if hasattr(datetime.datetime, 'now'):
now = datetime.datetime.now(tz=utc)
else:
now = datetime.datetime.utcnow()
return now.replace(microsecond=0)
def get_date(date_or_string):
if date_or_string:
return arrow.get(date_or_string).datetime
def get_expiry_date(minutes, offset=None):
if offset:
if type(offset) is not datetime:
return offset + datetime.timedelta(minutes=minutes)
else:
raise TypeError('offset must be a datetime.date, not a %s' % type(offset))
else:
return utcnow() + datetime.timedelta(minutes=minutes)
| agpl-3.0 |
rjshade/grpc | src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py | 9 | 2409 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Insecure client-server interoperability as a unit test."""
from concurrent import futures
import unittest
import grpc
from src.proto.grpc.testing import test_pb2
from tests.interop import _intraop_test_case
from tests.interop import methods
from tests.interop import server
class InsecureIntraopTest(_intraop_test_case.IntraopTestCase,
unittest.TestCase):
def setUp(self):
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
test_pb2.add_TestServiceServicer_to_server(methods.TestService(),
self.server)
port = self.server.add_insecure_port('[::]:0')
self.server.start()
self.stub = test_pb2.TestServiceStub(
grpc.insecure_channel('localhost:{}'.format(port)))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
googlearchive/big-rig | app/src/thirdparty/telemetry/internal/browser/browser_options.py | 3 | 15413 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import optparse
import os
import shlex
import socket
import sys
from telemetry.core import platform
from telemetry.core import util
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.browser import profile_types
from telemetry.internal.platform import device_finder
from telemetry.internal.platform.profiler import profiler_finder
from telemetry.util import wpr_modes
util.AddDirToPythonPath(
util.GetChromiumSrcDir(), 'third_party', 'webpagereplay')
import net_configs # pylint: disable=F0401
class BrowserFinderOptions(optparse.Values):
"""Options to be used for discovering a browser."""
def __init__(self, browser_type=None):
optparse.Values.__init__(self)
self.browser_type = browser_type
self.browser_executable = None
self.chrome_root = None
self.device = None
self.cros_ssh_identity = None
self.extensions_to_load = []
# If set, copy the generated profile to this path on exit.
self.output_profile_path = None
self.cros_remote = None
self.profiler = None
self.verbosity = 0
self.browser_options = BrowserOptions()
self.output_file = None
self.android_rndis = False
self.no_performance_mode = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
def Copy(self):
return copy.deepcopy(self)
def CreateParser(self, *args, **kwargs):
parser = optparse.OptionParser(*args, **kwargs)
# Selection group
group = optparse.OptionGroup(parser, 'Which browser to use')
group.add_option('--browser',
dest='browser_type',
default=None,
help='Browser type to run, '
'in order of priority. Supported values: list,%s' %
','.join(browser_finder.FindAllBrowserTypes(self)))
group.add_option('--browser-executable',
dest='browser_executable',
help='The exact browser to run.')
group.add_option('--chrome-root',
dest='chrome_root',
help='Where to look for chrome builds.'
'Defaults to searching parent dirs by default.')
group.add_option('--device',
dest='device',
help='The device ID to use.'
'If not specified, only 0 or 1 connected devices are supported. If'
'specified as "android", all available Android devices are used.')
group.add_option('--target-arch',
dest='target_arch',
help='The target architecture of the browser. Options available are: '
'x64, x86_64, arm, arm64 and mips. '
'Defaults to the default architecture of the platform if omitted.')
group.add_option(
'--remote',
dest='cros_remote',
help='The hostname of a remote ChromeOS device to use.')
group.add_option(
'--remote-ssh-port',
type=int,
default=socket.getservbyname('ssh'),
dest='cros_remote_ssh_port',
help='The SSH port of the remote ChromeOS device (requires --remote).')
identity = None
testing_rsa = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'chromite', 'ssh_keys', 'testing_rsa')
if os.path.exists(testing_rsa):
identity = testing_rsa
group.add_option('--identity',
dest='cros_ssh_identity',
default=identity,
help='The identity file to use when ssh\'ing into the ChromeOS device')
parser.add_option_group(group)
# Debugging options
group = optparse.OptionGroup(parser, 'When things go wrong')
profiler_choices = profiler_finder.GetAllAvailableProfilers()
group.add_option(
'--profiler', default=None, type='choice',
choices=profiler_choices,
help='Record profiling data using this tool. Supported values: ' +
', '.join(profiler_choices))
group.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
group.add_option('--print-bootstrap-deps',
action='store_true',
help='Output bootstrap deps list.')
parser.add_option_group(group)
# Platform options
group = optparse.OptionGroup(parser, 'Platform options')
group.add_option('--no-performance-mode', action='store_true',
help='Some platforms run on "full performance mode" where the '
'test is executed at maximum CPU speed in order to minimize noise '
'(specially important for dashboards / continuous builds). '
'This option prevents Telemetry from tweaking such platform settings.')
group.add_option('--android-rndis', dest='android_rndis', default=False,
action='store_true', help='Use RNDIS forwarding on Android.')
group.add_option('--no-android-rndis', dest='android_rndis',
action='store_false', help='Do not use RNDIS forwarding on Android.'
' [default]')
parser.add_option_group(group)
# Browser options.
self.browser_options.AddCommandLineArgs(parser)
real_parse = parser.parse_args
def ParseArgs(args=None):
defaults = parser.get_default_values()
for k, v in defaults.__dict__.items():
if k in self.__dict__ and self.__dict__[k] != None:
continue
self.__dict__[k] = v
ret = real_parse(args, self) # pylint: disable=E1121
if self.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif self.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
logging.basicConfig(
format='%(levelname)s:%(name)s:%(asctime)s:%(message)s')
if self.device == 'list':
devices = device_finder.GetDevicesMatchingOptions(self)
print 'Available devices:'
for device in devices:
print ' ', device.name
sys.exit(0)
if self.browser_executable and not self.browser_type:
self.browser_type = 'exact'
if self.browser_type == 'list':
devices = device_finder.GetDevicesMatchingOptions(self)
if not devices:
sys.exit(0)
browser_types = {}
for device in devices:
try:
possible_browsers = browser_finder.GetAllAvailableBrowsers(self,
device)
browser_types[device.name] = sorted(
[browser.browser_type for browser in possible_browsers])
except browser_finder_exceptions.BrowserFinderException as ex:
print >> sys.stderr, 'ERROR: ', ex
sys.exit(1)
print 'Available browsers:'
if len(browser_types) == 0:
print ' No devices were found.'
for device_name in sorted(browser_types.keys()):
print ' ', device_name
for browser_type in browser_types[device_name]:
print ' ', browser_type
sys.exit(0)
# Parse browser options.
self.browser_options.UpdateFromParseResults(self)
return ret
parser.parse_args = ParseArgs
return parser
def AppendExtraBrowserArgs(self, args):
self.browser_options.AppendExtraBrowserArgs(args)
def MergeDefaultValues(self, defaults):
for k, v in defaults.__dict__.items():
self.ensure_value(k, v)
class BrowserOptions(object):
"""Options to be used for launching a browser."""
def __init__(self):
self.browser_type = None
self.show_stdout = False
# When set to True, the browser will use the default profile. Telemetry
# will not provide an alternate profile directory.
self.dont_override_profile = False
self.profile_dir = None
self.profile_type = None
self._extra_browser_args = set()
self.extra_wpr_args = []
self.wpr_mode = wpr_modes.WPR_OFF
self.netsim = None
self.full_performance_mode = True
# The amount of time Telemetry should wait for the browser to start.
# This property is not exposed as a command line option.
self._browser_startup_timeout = 60
self.disable_background_networking = True
self.no_proxy_server = False
self.browser_user_agent_type = None
self.clear_sytem_cache_for_browser_and_profile_on_start = False
self.startup_url = 'about:blank'
# Background pages of built-in component extensions can interfere with
# performance measurements.
self.disable_component_extensions_with_background_pages = True
# Disable default apps.
self.disable_default_apps = True
# Whether to use the new code path for choosing an ephemeral port for
# DevTools. The bots set this to true. When Chrome 37 reaches stable,
# remove this setting and the old code path. http://crbug.com/379980
self.use_devtools_active_port = False
# TODO(danduong): Find a way to store target_os here instead of
# finder_options.
self._finder_options = None
def __repr__(self):
# This works around the infinite loop caused by the introduction of a
# circular reference with _finder_options.
obj = self.__dict__.copy()
del obj['_finder_options']
return str(sorted(obj.items()))
def IsCrosBrowserOptions(self):
return False
@classmethod
def AddCommandLineArgs(cls, parser):
############################################################################
# Please do not add any more options here without first discussing with #
# a telemetry owner. This is not the right place for platform-specific #
# options. #
############################################################################
group = optparse.OptionGroup(parser, 'Browser options')
profile_choices = profile_types.GetProfileTypes()
group.add_option('--profile-type',
dest='profile_type',
type='choice',
default='clean',
choices=profile_choices,
help=('The user profile to use. A clean profile is used by default. '
'Supported values: ' + ', '.join(profile_choices)))
group.add_option('--profile-dir',
dest='profile_dir',
help='Profile directory to launch the browser with. '
'A clean profile is used by default')
group.add_option('--extra-browser-args',
dest='extra_browser_args_as_string',
help='Additional arguments to pass to the browser when it starts')
group.add_option('--extra-wpr-args',
dest='extra_wpr_args_as_string',
help=('Additional arguments to pass to Web Page Replay. '
'See third_party/webpagereplay/replay.py for usage.'))
group.add_option('--netsim', default=None, type='choice',
choices=net_configs.NET_CONFIG_NAMES,
help=('Run benchmark under simulated network conditions. '
'Will prompt for sudo. Supported values: ' +
', '.join(net_configs.NET_CONFIG_NAMES)))
group.add_option('--show-stdout',
action='store_true',
help='When possible, will display the stdout of the process')
# This hidden option is to be removed, and the older code path deleted,
# once Chrome 37 reaches Stable. http://crbug.com/379980
group.add_option('--use-devtools-active-port',
action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Compatibility options')
group.add_option('--gtest_output',
help='Ignored argument for compatibility with runtest.py harness')
parser.add_option_group(group)
def UpdateFromParseResults(self, finder_options):
"""Copies our options from finder_options"""
browser_options_list = [
'extra_browser_args_as_string',
'extra_wpr_args_as_string',
'netsim',
'profile_dir',
'profile_type',
'show_stdout',
'use_devtools_active_port',
]
for o in browser_options_list:
a = getattr(finder_options, o, None)
if a is not None:
setattr(self, o, a)
delattr(finder_options, o)
self.browser_type = finder_options.browser_type
self._finder_options = finder_options
if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_browser_args_as_string) # pylint: disable=E1101
self.AppendExtraBrowserArgs(tmp)
delattr(self, 'extra_browser_args_as_string')
if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_wpr_args_as_string) # pylint: disable=E1101
self.extra_wpr_args.extend(tmp)
delattr(self, 'extra_wpr_args_as_string')
if self.profile_type == 'default':
self.dont_override_profile = True
if self.profile_dir and self.profile_type != 'clean':
logging.critical(
"It's illegal to specify both --profile-type and --profile-dir.\n"
"For more information see: http://goo.gl/ngdGD5")
sys.exit(1)
if self.profile_dir and not os.path.isdir(self.profile_dir):
logging.critical(
"Directory specified by --profile-dir (%s) doesn't exist "
"or isn't a directory.\n"
"For more information see: http://goo.gl/ngdGD5" % self.profile_dir)
sys.exit(1)
if not self.profile_dir:
self.profile_dir = profile_types.GetProfileDir(self.profile_type)
# This deferred import is necessary because browser_options is imported in
# telemetry/telemetry/__init__.py.
finder_options.browser_options = CreateChromeBrowserOptions(self)
@property
def finder_options(self):
return self._finder_options
@property
def extra_browser_args(self):
return self._extra_browser_args
@property
def browser_startup_timeout(self):
return self._browser_startup_timeout
@browser_startup_timeout.setter
def browser_startup_timeout(self, value):
self._browser_startup_timeout = value
def AppendExtraBrowserArgs(self, args):
if isinstance(args, list):
self._extra_browser_args.update(args)
else:
self._extra_browser_args.add(args)
def CreateChromeBrowserOptions(br_options):
browser_type = br_options.browser_type
if (platform.GetHostPlatform().GetOSName() == 'chromeos' or
(browser_type and browser_type.startswith('cros'))):
return CrosBrowserOptions(br_options)
return br_options
class ChromeBrowserOptions(BrowserOptions):
"""Chrome-specific browser options."""
def __init__(self, br_options):
super(ChromeBrowserOptions, self).__init__()
# Copy to self.
self.__dict__.update(br_options.__dict__)
class CrosBrowserOptions(ChromeBrowserOptions):
"""ChromeOS-specific browser options."""
def __init__(self, br_options):
super(CrosBrowserOptions, self).__init__(br_options)
# Create a browser with oobe property.
self.create_browser_with_oobe = False
# Clear enterprise policy before logging in.
self.clear_enterprise_policy = True
# Disable GAIA/enterprise services.
self.disable_gaia_services = True
self.auto_login = True
self.gaia_login = False
self.username = 'test@test.test'
self.password = ''
def IsCrosBrowserOptions(self):
return True
| apache-2.0 |
qldhpc/eb_local | ebfiles/r/RLibs/getCRANpackages.py | 1 | 1537 | import os
import re
def myreadlines(f, newline):
buf = ""
while True:
while newline in buf:
pos = buf.index(newline)
yield buf[:pos]
buf = buf[pos + len(newline):]
chunk = f.read(4096)
if not chunk:
yield buf
break
buf += chunk
webIndex='available_packages_by_name.html'
webCRAN='https://cran.r-project.org'
cranPackages='./CRAN-packages'
if not os.access('./'+cranPackages,os.R_OK):
cP = open(cranPackages,'w+');
if not os.access('./'+webIndex,os.R_OK):
os.system('wget -q '+webCRAN+'/web/packages/'+webIndex+' >/dev/null');
try:
with open(webIndex,'r') as wI:
i=0
for line in myreadlines(wI,'<tr>'):
if line.find("/web/") != -1:
pName=line.split('/')[4]
pDesc=(re.split('\<\/td\>|\n',line))
pDesc1=pDesc[2][4:]+' '+re.split('<\/tr>',pDesc[3])[0]
pVersion='0.0.0'
try:
os.system('wget -q -O ./r-pack/'+pName+'.html '+webCRAN+'/web/packages/'+pName+'/index.html')
with open('./r-pack/'+pName+'.html','r') as pN:
for v in myreadlines(pN,'<tr>'):
if v.find("Version:") != -1:
pVersion=re.split('[</td>]|\n|[<td>]',v)[15]
pN.close();
os.system('rm ./r-pack/'+pName+'.html')
except:
print "Couldn't get the version information for "+pName
output=pName+','+pVersion+','+pDesc1
cP.write(output+'\n')
except:
print "Error!! not really a helpful message"
cP.close()
cP = open(cranPackages,'r')
eb_rlibs=open('./rlibs_auto',w)
eb_rlibs.write('\
\
\
\
\
\
\
\
')
for line in cP:
| apache-2.0 |
riccardomc/moto | tests/test_opsworks/test_layers.py | 5 | 1812 | from __future__ import unicode_literals
import boto3
import sure # noqa
import re
from moto import mock_opsworks
@mock_opsworks
def test_create_layer_response():
client = boto3.client('opsworks', region_name='us-east-1')
stack_id = client.create_stack(
Name="test_stack_1",
Region="us-east-1",
ServiceRoleArn="service_arn",
DefaultInstanceProfileArn="profile_arn"
)['StackId']
response = client.create_layer(
StackId=stack_id,
Type="custom",
Name="TestLayer",
Shortname="TestLayerShortName"
)
response.should.contain("LayerId")
# ClientError
client.create_layer.when.called_with(
StackId=stack_id,
Type="custom",
Name="TestLayer",
Shortname="_"
).should.throw(
Exception, re.compile(r'already a layer named "TestLayer"')
)
# ClientError
client.create_layer.when.called_with(
StackId=stack_id,
Type="custom",
Name="_",
Shortname="TestLayerShortName"
).should.throw(
Exception, re.compile(r'already a layer with shortname "TestLayerShortName"')
)
@mock_opsworks
def test_describe_layers():
client = boto3.client('opsworks', region_name='us-east-1')
stack_id = client.create_stack(
Name="test_stack_1",
Region="us-east-1",
ServiceRoleArn="service_arn",
DefaultInstanceProfileArn="profile_arn"
)['StackId']
layer_id = client.create_layer(
StackId=stack_id,
Type="custom",
Name="TestLayer",
Shortname="TestLayerShortName"
)['LayerId']
rv1 = client.describe_layers(StackId=stack_id)
rv2 = client.describe_layers(LayerIds=[layer_id])
rv1.should.equal(rv2)
rv1['Layers'][0]['Name'].should.equal("TestLayer")
| apache-2.0 |
fbalak/usmqe-tests | usmqe_tests/api/ceph/test_ceph_cluster.py | 2 | 3315 | """
REST API test suite - ceph cluster import
"""
from json.decoder import JSONDecodeError
import pytest
from usmqe.api.tendrlapi import cephapi
LOGGER = pytest.get_logger('test_cluster', module=True)
"""@pylatest default
Setup
=====
"""
"""@pylatest default
Teardown
========
"""
"""@pylatest api/ceph.cluster_import
API-ceph: cluster_import
***************************
.. test_metadata:: author mkudlej@redhat.com
Description
===========
Positive import Ceph cluster.
"""
def test_cluster_import_valid(valid_session_credentials):
"""@pylatest api/ceph.cluster_import
.. test_step:: 1
Get list of ids of availible nodes.
.. test_result:: 1
Server should return response in JSON format:
{
...
{
"fqdn": hostname,
"machine_id": some_id,
"node_id": node_id
},
...
}
Return code should be **200** with data ``{"message": "OK"}``.
"""
api = cephapi.TendrlApiCeph(auth=valid_session_credentials)
nodes = api.get_nodes()
ceph_nodes = [node["node_id"] for node in nodes["nodes"]
if "ceph" in node["tags"]]
LOGGER.debug("Nodes for importing: {}".format(ceph_nodes))
"""@pylatest api/ceph.cluster_import
.. test_step:: 2
Send POST request to Tendrl API ``APIURL/CephImportCluster
.. test_result:: 2
Server should return response in JSON format:
{
"job_id": job_id
}
Return code should be **202**
with data ``{"message": "Accepted"}``.
"""
job_id = api.import_cluster(ceph_nodes)["job_id"]
"""@pylatest api/ceph.cluster_import
.. test_step:: 3
Wait till job is finished.
.. test_result:: 3
Job is succesfully finished.
"""
api.wait_for_job_status(job_id)
"""@pylatest api/ceph.cluster_import
.. test_step:: 4
Check if cluster import status.
.. test_result:: 4
Cluster is properly imported and can be found in cluster list.
"""
integration_id = api.get_job_attribute(
job_id=job_id,
attribute="TendrlContext.integration_id",
section="parameters")
LOGGER.debug("integration_id: %s" % integration_id)
try:
cl_list_id = [x for x in api.get_cluster_list()
if x.get("integration_id", "") == integration_id]
pytest.check(
"There should be only one integration_id '{}'".format(integration_id),
len(cl_list_id) == 1)
pytest.check(
"Job list integration_id '{}' should be present in cluster list.".format(
integration_id),
integration_id in cl_list_id,
issue="https://github.com/Tendrl/api/issues/154")
except JSONDecodeError:
pytest.check(False,
"Job list integration_id '{}' should be present in cluster list.".format(
integration_id),
issue="https://github.com/Tendrl/api/issues/166")
# TODO add test case for checking imported machines
| gpl-3.0 |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Foursquare/Users/Leaderboard.py | 5 | 3552 | # -*- coding: utf-8 -*-
###############################################################################
#
# Leaderboard
# Return the user's Leaderboard
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Leaderboard(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Leaderboard Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Leaderboard, self).__init__(temboo_session, '/Library/Foursquare/Users/Leaderboard')
def new_input_set(self):
return LeaderboardInputSet()
def _make_result_set(self, result, path):
return LeaderboardResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return LeaderboardChoreographyExecution(session, exec_id, path)
class LeaderboardInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Leaderboard
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Neighbors(self, value):
"""
Set the value of the Neighbors input for this Choreo. ((optional, integer) The number of friends' scores to return that are adjacent to your score, in ranked order.)
"""
super(LeaderboardInputSet, self)._set_input('Neighbors', value)
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((required, string) The Foursquare API OAuth token string.)
"""
super(LeaderboardInputSet, self)._set_input('OauthToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(LeaderboardInputSet, self)._set_input('ResponseFormat', value)
class LeaderboardResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Leaderboard Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class LeaderboardChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return LeaderboardResultSet(response, path)
| gpl-2.0 |
ManuSchmi88/landlab | landlab/grid/tests/test_raster_funcs/test_gradients_across_cell_corners.py | 6 | 2146 | import numpy as np
from numpy.testing import assert_array_equal
from nose import with_setup
from nose.tools import assert_equal
try:
from nose.tools import assert_is
except ImportError:
from landlab.testing.tools import assert_is
from landlab.grid.raster_gradients import (
calc_grad_across_cell_corners)
def setup_unit_grid():
"""Set up a test grid with unit spacing."""
from landlab import RasterModelGrid
globals().update({
'rmg': RasterModelGrid(4, 5),
'values_at_nodes': np.arange(20.),
})
def setup_grid():
"""Set up a test grid."""
from landlab import RasterModelGrid
globals().update({
'rmg': RasterModelGrid(4, 5, 2.),
'values_at_nodes': np.arange(20.),
})
@with_setup(setup_unit_grid)
def test_scalar_arg():
"""Test using a scalar for cell arg."""
grads = calc_grad_across_cell_corners(
rmg, values_at_nodes, 0)
assert_array_equal(grads, np.array([[6., 4., -6., -4.]]) / np.sqrt(2.))
@with_setup(setup_unit_grid)
def test_iterable():
"""Test using an iterable for cell arg."""
grads = rmg.calc_grad_across_cell_corners(values_at_nodes, [0, 4])
assert_array_equal(grads, np.array([[6., 4., -6., -4.],
[6., 4., -6., -4.]]) / np.sqrt(2.))
@with_setup(setup_unit_grid)
def test_with_no_cell_id_arg():
"""Test without using an arg for cell id."""
values = np.array([0, 1, 3, 6, 10,
0, 1, 3, 6, 10,
0, 1, 3, 5, 10,
0, 1, -3, 6, 10], dtype=float)
grads = rmg.calc_grad_across_cell_corners(values)
assert_array_equal(grads, (1. / np.sqrt(2.)) * np.array([
[2., -1., -1., 2.], [2., -2., -2., 3.], [4., -3., -3., 4.],
[-4., -1., -1., 2.], [3., -2., -2., 3.], [5., -8., -2., 5.]]))
@with_setup(setup_unit_grid)
def test_with_out_keyword():
"""Test with out keyword."""
out = np.empty((1, 4))
rtn = rmg.calc_grad_across_cell_corners(
values_at_nodes, 5, out=out)
assert_is(rtn, out)
assert_array_equal(out, np.array([[6., 4., -6., -4.]]) / np.sqrt(2))
| mit |
camilonova/django | tests/delete/tests.py | 27 | 19444 | from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models import (
MR, A, Avatar, Base, Child, HiddenUser, HiddenUserProfile, M, M2MFrom,
M2MTo, MRNull, Parent, R, RChild, S, T, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
with self.assertRaises(IntegrityError):
a.protect.delete()
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
A models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
RChild.objects.create(r_ptr=r)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_delete_with_keeping_parents_relationships(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
parent_referent_id = S.objects.create(r=child.r_ptr).pk
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
self.assertTrue(S.objects.filter(pk=parent_referent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_proxied_model_duplicate_queries(self):
"""
#25685 - Deleting instances of a model with existing proxy
classes should not issue multiple queries during cascade
deletion of referring models.
"""
avatar = Avatar.objects.create()
# One query for the Avatar table and a second for the User one.
with self.assertNumQueries(2):
avatar.delete()
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
self.assertNumQueries(2, c.delete)
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
def test_fast_delete_empty_no_update_can_self_select(self):
"""
#25932 - Fast deleting on backends that don't have the
`no_update_can_self_select` feature should work even if the specified
filter doesn't match any row.
"""
with self.assertNumQueries(1):
self.assertEqual(
User.objects.filter(avatar__desc='missing').delete(),
(0, {'delete.User': 0})
)
| bsd-3-clause |
emetsger/osf.io | website/addons/mendeley/tests/test_api.py | 42 | 1292 | from nose.tools import *
import mock
import mendeley
import time
import datetime
from tests.base import OsfTestCase
from website.util import web_url_for
from website.addons.mendeley import model
from website.addons.mendeley.api import APISession
class MendeleyApiTestCase(OsfTestCase):
def setUp(self):
super(MendeleyApiTestCase, self).setUp()
self.provider = model.Mendeley()
self.mock_partial = mendeley.Mendeley(
client_id='1234567890',
client_secret='1a2s3d4f5g',
redirect_uri='/api/v1/some/fake/url/mendeley'
)
self.mock_credentials = {
'access_token': '1234567890987654321',
'refresh_token': 'asdfghjklkjhgfdsa',
'expires_at': time.mktime((datetime.datetime.utcnow() + datetime.timedelta(days=10)).timetuple()),
'token_type': 'bearer',
}
@mock.patch('website.addons.mendeley.api.MendeleySession.request')
def test_request_params(self, mock_request):
# All GET requests to Mendeley should have the param "view=all"
client = APISession(self.mock_partial, self.mock_credentials)
client.request()
args, kwargs = mock_request.call_args
assert_equal(kwargs['params'], {'view': 'all', 'limit': '500'})
| apache-2.0 |
oli-kester/advanced-av-examples | amp-osc-lv2/.waf-1.8.5-3556be08f33a5066528395b11fed89fa/waflib/Tools/fc_scan.py | 183 | 1883 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils,Task,TaskGen,Logs
from waflib.TaskGen import feature,before_method,after_method,extension
from waflib.Configure import conf
INC_REGEX="""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
USE_REGEX="""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
MOD_REGEX="""(?:^|;)\s*MODULE(?!\s*PROCEDURE)(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
re_inc=re.compile(INC_REGEX,re.I)
re_use=re.compile(USE_REGEX,re.I)
re_mod=re.compile(MOD_REGEX,re.I)
class fortran_parser(object):
def __init__(self,incpaths):
self.seen=[]
self.nodes=[]
self.names=[]
self.incpaths=incpaths
def find_deps(self,node):
txt=node.read()
incs=[]
uses=[]
mods=[]
for line in txt.splitlines():
m=re_inc.search(line)
if m:
incs.append(m.group(1))
m=re_use.search(line)
if m:
uses.append(m.group(1))
m=re_mod.search(line)
if m:
mods.append(m.group(1))
return(incs,uses,mods)
def start(self,node):
self.waiting=[node]
while self.waiting:
nd=self.waiting.pop(0)
self.iter(nd)
def iter(self,node):
path=node.abspath()
incs,uses,mods=self.find_deps(node)
for x in incs:
if x in self.seen:
continue
self.seen.append(x)
self.tryfind_header(x)
for x in uses:
name="USE@%s"%x
if not name in self.names:
self.names.append(name)
for x in mods:
name="MOD@%s"%x
if not name in self.names:
self.names.append(name)
def tryfind_header(self,filename):
found=None
for n in self.incpaths:
found=n.find_resource(filename)
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
| gpl-2.0 |
TiVoMaker/boto | boto/https_connection.py | 143 | 5135 | # Copyright 2007,2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is derived from
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
"""Extensions to allow HTTPS requests with SSL certificate validation."""
import re
import socket
import ssl
import boto
from boto.compat import six, http_client
class InvalidCertificateException(http_client.HTTPException):
"""Raised when a certificate is provided with an invalid hostname."""
def __init__(self, host, cert, reason):
"""Constructor.
Args:
host: The hostname the connection was made to.
cert: The SSL certificate (as a dictionary) the host returned.
"""
http_client.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s): %s' %
(self.host, self.reason, self.cert))
def GetValidHostsForCert(cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def ValidateCertificateHostname(cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = GetValidHostsForCert(cert)
boto.log.debug(
"validating server certificate: hostname=%s, certificate hosts=%s",
hostname, hosts)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
class CertValidatingHTTPSConnection(http_client.HTTPConnection):
"""An HTTPConnection that connects over SSL and validates certificates."""
default_port = http_client.HTTPS_PORT
def __init__(self, host, port=default_port, key_file=None, cert_file=None,
ca_certs=None, strict=None, **kwargs):
"""Constructor.
Args:
host: The hostname. Can be in 'host:port' form.
port: The port. Defaults to 443.
key_file: A file containing the client's private key
cert_file: A file containing the client's certificates
ca_certs: A file contianing a set of concatenated certificate authority
certs for validating the server against.
strict: When true, causes BadStatusLine to be raised if the status line
can't be parsed as a valid HTTP/1.0 or 1.1 status line.
"""
if six.PY2:
# Python 3.2 and newer have deprecated and removed the strict
# parameter. Since the params are supported as keyword arguments
# we conditionally add it here.
kwargs['strict'] = strict
http_client.HTTPConnection.__init__(self, host=host, port=port, **kwargs)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, "timeout"):
sock = socket.create_connection((self.host, self.port), self.timeout)
else:
sock = socket.create_connection((self.host, self.port))
msg = "wrapping ssl socket; "
if self.ca_certs:
msg += "CA certificate file=%s" % self.ca_certs
else:
msg += "using system provided SSL certs"
boto.log.debug(msg)
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file,
certfile=self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not ValidateCertificateHostname(cert, hostname):
raise InvalidCertificateException(hostname,
cert,
'remote hostname "%s" does not match '
'certificate' % hostname)
| mit |
esikachev/scenario | sahara/tests/integration/tests/gating/test_spark_gating.py | 1 | 6192 | # Copyright 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
from sahara.tests.integration.configs import config as cfg
from sahara.tests.integration.tests import base as b
from sahara.tests.integration.tests import edp
from sahara.tests.integration.tests import scaling
from sahara.tests.integration.tests import swift
from sahara.utils import edp as utils_edp
class SparkGatingTest(swift.SwiftTest, scaling.ScalingTest,
edp.EDPTest):
config = cfg.ITConfig().spark_config
SKIP_EDP_TEST = config.SKIP_EDP_TEST
def setUp(self):
super(SparkGatingTest, self).setUp()
self.cluster_id = None
self.cluster_template_id = None
self.ng_template_ids = []
def get_plugin_config(self):
return cfg.ITConfig().spark_config
@b.errormsg("Failure while 'm-nn' node group template creation: ")
def _create_m_nn_ng_template(self):
template = {
'name': 'test-node-group-template-spark-m-nn',
'plugin_config': self.plugin_config,
'description': 'test node group template for Spark plugin',
'node_processes': self.plugin_config.MASTER_NODE_PROCESSES,
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': {}
}
self.ng_tmpl_m_nn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_m_nn_id)
self.addCleanup(self.delete_objects,
node_group_template_id_list=[self.ng_tmpl_m_nn_id])
@b.errormsg("Failure while 's-dn' node group template creation: ")
def _create_s_dn_ng_template(self):
template = {
'name': 'test-node-group-template-spark-s-dn',
'plugin_config': self.plugin_config,
'description': 'test node group template for Spark plugin',
'node_processes': self.plugin_config.WORKER_NODE_PROCESSES,
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': {}
}
self.ng_tmpl_s_dn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_s_dn_id)
self.addCleanup(self.delete_objects,
node_group_template_id_list=[self.ng_tmpl_s_dn_id])
@b.errormsg("Failure while cluster template creation: ")
def _create_cluster_template(self):
template = {
'name': 'test-cluster-template-spark',
'plugin_config': self.plugin_config,
'description': 'test cluster template for Spark plugin',
'cluster_configs': {'HDFS': {'dfs.replication': 1}},
'node_groups': [
{
'name': 'master-node',
'node_group_template_id': self.ng_tmpl_m_nn_id,
'count': 1
},
{
'name': 'worker-node',
'node_group_template_id': self.ng_tmpl_s_dn_id,
'count': 1
}
],
'net_id': self.internal_neutron_net
}
self.cluster_template_id = self.create_cluster_template(**template)
self.addCleanup(self.delete_objects,
cluster_template_id=self.cluster_template_id)
@b.errormsg("Failure while cluster creation: ")
def _create_cluster(self):
cluster_name = '%s-%s' % (self.common_config.CLUSTER_NAME,
self.plugin_config.PLUGIN_NAME)
cluster = {
'name': cluster_name,
'plugin_config': self.plugin_config,
'cluster_template_id': self.cluster_template_id,
'description': 'test cluster',
'cluster_configs': {}
}
cluster_id = self.create_cluster(**cluster)
self.addCleanup(self.delete_objects, cluster_id=cluster_id)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.plugin_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.plugin_config)
@b.errormsg("Failure while EDP testing: ")
def _check_edp(self):
self._edp_test()
def _edp_test(self):
# check spark
spark_jar = self.edp_info.read_spark_example_jar()
spark_configs = self.edp_info.spark_example_configs()
job_id = self.edp_testing(
utils_edp.JOB_TYPE_SPARK,
job_data_list=[{'jar': spark_jar}],
lib_data_list=[],
configs=spark_configs)
self.poll_jobs_status([job_id])
@b.errormsg("Failure while cluster scaling: ")
def _check_scaling(self):
pass
@b.errormsg("Failure while EDP testing after cluster scaling: ")
def _check_edp_after_scaling(self):
# Leave this blank until scaling is implemented
pass
@testcase.attr('spark')
@testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
'All tests for Spark plugin were skipped')
def test_spark_plugin_gating(self):
self._create_m_nn_ng_template()
self._create_s_dn_ng_template()
self._create_cluster_template()
self._create_cluster()
self._test_event_log(self.cluster_id)
self._check_edp()
if not self.plugin_config.SKIP_SCALING_TEST:
self._check_scaling()
self._test_event_log(self.cluster_id)
self._check_edp_after_scaling()
def tearDown(self):
super(SparkGatingTest, self).tearDown()
| apache-2.0 |
grburgess/astromodels | astromodels/tests/test_functions.py | 2 | 20709 | import pytest
import os
import astropy.units as u
import numpy as np
import pickle
from astromodels.functions.function import FunctionMeta, Function1D, Function2D, FunctionDefinitionError, \
UnknownParameter, DesignViolation, get_function, get_function_class, UnknownFunction, list_functions
from astromodels.functions.functions import Powerlaw, Line
from astromodels.functions.functions_2D import Gaussian_on_sphere, SpatialTemplate_2D
from astromodels.functions.functions_3D import Continuous_injection_diffusion
from astromodels.functions import function as function_module
from astropy.io import fits
__author__ = 'giacomov'
def get_a_function_class():
# Try to create a function inheriting from Function with meta FunctionMeta
class Test_function(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
# a has units of y_unit / x_unit, so that a*x has units of y_unit
self.a.unit = y_unit / x_unit
# b has units of y
self.b.unit = y_unit
def evaluate(self, x, a, b):
return a * x + b
return Test_function
def test_function_meta():
with pytest.raises(AttributeError):
# .evaluate is lacking, ._set_units is lacking, docstring is lacking
class Wrong_test_function1():
__metaclass__ = FunctionMeta
with pytest.raises(AttributeError):
# .evaluate is lacking, ._set_units is lacking
class Wrong_test_function2(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
with pytest.raises(AttributeError):
# _set_units is lacking
class Wrong_test_function3(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def evaluate(self, x, a, b):
return a * x + b
with pytest.raises(AssertionError):
# Signature of evaluate is wrong
class Wrong_test_function4(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self):
return self.a * self.x + self.b
with pytest.raises(FunctionDefinitionError):
# Signature of evaluate is wrong
class Wrong_test_function5(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x):
return self.a * x + self.b
with pytest.raises(FunctionDefinitionError):
# Signature of evaluate is wrong
class Wrong_test_function6(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a):
return a * x + self.b
with pytest.raises(FunctionDefinitionError):
# Signature of evaluate does not match docstring
class Wrong_test_function7(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
c :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a, b):
return a * x + b
with pytest.raises(FunctionDefinitionError):
# Definition of parameter b is not legal
class Wrong_test_function8(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a, b):
return a * x + b
with pytest.raises(FunctionDefinitionError):
# Parameter c declared but not used
class Wrong_test_function9(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
c :
desc : dumb
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a, b):
return a * x + b
with pytest.raises(FunctionDefinitionError):
# Parameter c used but not declared
class Wrong_test_function10(Function1D):
r"""
description :
A test function
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a, b, c):
return a * x + b + c
with pytest.raises(AssertionError):
# Docstring lacking description
class Wrong_test_function11(Function1D):
r"""
latex : $ a * x + b $
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
c :
desc : dumb
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a, b):
return a * x + b
with pytest.raises(FunctionDefinitionError):
# Parameter lacking description
class Wrong_test_function12(Function1D):
r"""
description: useless
latex : $ a * x + b $
parameters :
a :
initial value : 1
b :
desc : intercept
initial value : 1
c :
desc : dumb
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a, b):
return a * x + b
with pytest.raises(AssertionError):
# Parameters out of order in evaluate
class Wrong_test_function13(Function2D):
r"""
description: useless
latex : $ a * x + b $
parameters :
a :
desc : blah
initial value : 1
b :
desc : intercept
initial value : 1
c :
desc : dumb
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit, z_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, y, x, a, b, c):
return a * x + b
# A function with no latex formula (which is optional)
class NoLatex_test_function11(Function1D):
r"""
description:
A function without latex (should be fine)
parameters :
a :
desc : linear coefficient
initial value : 1
b :
desc : intercept
initial value : 1
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit):
self.a.unit = y_unit / x_unit
self.b.unit = y_unit
def evaluate(self, x, a, b):
return a * x + b
def test_function_constructor():
Test_function = get_a_function_class()
# Instance with default parameters
my_function = Test_function()
assert my_function.a.value == 1.0
assert my_function.b.value == 1.0
# Instance with explicit parameters' values
my_function = Test_function(b=3.2, a=-2.5)
assert my_function.a.value == -2.5
assert my_function.b.value == 3.2
Test_function.info()
function_module.has_ipython = False
Test_function.info()
print(my_function.free_parameters)
with pytest.raises(UnknownParameter):
f = Test_function(d=3.5)
f = Test_function()
print(f.description)
print(f.latex)
assert f.fixed_units is None
assert f.has_fixed_units() == False
with pytest.raises(DesignViolation):
_ = f.get_boundaries()
def test_function_values():
# Get a function class, and test the various exceptions
Test_function = get_a_function_class()
# Try to instance it
my_function = Test_function()
# Test basic functionalities
assert my_function(1.0)==2
my_function.a = 2.5
assert my_function(10.0) == 26.0
my_function.b = -1.0
assert my_function(10.0) == 24.0
# Now test with list and np.array
my_function.a.value = 2.0
my_function.b.value = 1.0
assert np.all(my_function([1,2,3]) == np.array([3.0, 5.0, 7.0]))
assert np.all(my_function(np.array([3, 4, 5])) == np.array([7.0, 9.0, 11.0]))
def test_function_values_units():
# Test units functionality
# Get a function class, and test the various exceptions
Test_function = get_a_function_class()
# Try to instance it
my_function = Test_function()
diff_flux = 1.0 / (u.keV * u.cm**2 * u.s)
my_function.set_units(u.keV, diff_flux)
# Test basic functionalities
assert my_function(1.0 * u.keV) == 2 * diff_flux
my_function.a = 2.5 * diff_flux / u.keV
assert my_function(10.0 * u.keV) == 26.0 * diff_flux
my_function.b = -1.0 * diff_flux
assert my_function(10.0 * u.keV) == 24.0 * diff_flux
# Now test with list and np.array
my_function.a.value = 2.0 * diff_flux / u.keV
my_function.b.value = 1.0 * diff_flux
assert np.all(my_function([1, 2, 3] * u.keV) == np.array([3.0, 5.0, 7.0]) * diff_flux)
# Using one unit for each element will fail
# (depending on the version of astropy, it might raise ValueError or TypeError)
with pytest.raises((ValueError, TypeError)):
_ = my_function([1 * u.keV, 2 * u.keV, 3 * u.keV])
assert np.all(my_function(np.array([3, 4, 5]) * u.keV) == np.array([7.0, 9.0, 11.0]) * diff_flux)
# Now test that an error is raised if units are not intelligible
with pytest.raises(TypeError):
_ = my_function.set_units("non_existent","non_existent")
def test_function_composition():
Test_function = get_a_function_class()
line = Test_function()
powerlaw = Powerlaw()
composite = powerlaw + line
composite.set_units(u.keV, 1.0 / (u.keV * u.cm**2 * u.s))
for x in ([1,2,3,4],[1,2,3,4] * u.keV, 1.0, np.array([1.0, 2.0, 3.0, 4.0])):
assert np.all(composite(x) == line(x) + powerlaw(x))
# Test -
po = Powerlaw()
li = Line()
composite = po - li
assert composite(1.0) == (po(1.0) - li(1.0))
# test *
composite = po * li
assert composite(2.25) == po(2.25) * li(2.25)
# test /
composite = po / li
assert composite(2.25) == po(2.25) / li(2.25)
# test .of
composite = po.of(li)
assert composite(2.25) == po(li(2.25))
# test power
composite = po**li
assert composite(2.25) == po(2.25)**li(2.25)
# test negation
neg_po = -po
assert neg_po(2.25) == -po(2.25)
# test abs
new_li = Line()
new_li.b = -10.0
abs_new_li = abs(new_li)
assert new_li(1.0) < 0
assert abs_new_li(1.0) == abs(new_li(1.0))
# test rpower
composite = 2.0**new_li
assert composite(2.25) == 2.0**(new_li(2.25))
# test multiplication by a number
composite = 2.0 * po
assert composite(2.25) == 2.0 * po(2.25)
# Number divided by
composite = 1.0 / li
assert composite(2.25) == 1.0 / li(2.25)
# Composite of composite
composite = po*li + po - li + 2*po / li
assert composite(2.25) == po(2.25) * li(2.25) + po(2.25) - li(2.25) + 2*po(2.25) / li(2.25)
print(composite)
def test_duplicate():
instance = Powerlaw()
instance.index = -2.25
instance.K = 0.5
# Duplicate it
duplicate = instance.duplicate()
# Check that we have the same results
assert duplicate(2.25) == instance(2.25)
# Check that the parameters are not linked anymore
instance.index = -1.12
assert instance.index.value != duplicate.index.value
print(instance)
print(duplicate)
def test_pickling_unpickling():
# 1d function
po = Powerlaw()
po.K = 5.35
new_po = pickle.loads(pickle.dumps(po))
assert new_po.K.value == po.K.value
# 2d function
gs = Gaussian_on_sphere()
_ = pickle.loads(pickle.dumps(gs))
# 3d function
c = Continuous_injection_diffusion()
_ = pickle.loads(pickle.dumps(c))
# composite function
po2 = Powerlaw()
li = Line()
composite = po2*li + po2 - li + 2*po2 / li # type: Function1D
# Change some parameter
composite.K_1 = 3.2
composite.a_2 = 1.56
dump = pickle.dumps(composite)
new_composite = pickle.loads(dump)
assert new_composite.K_1.value == composite.K_1.value
assert new_composite.a_2.value == composite.a_2.value
def test_get_function():
po = get_function("Powerlaw")
_ = po(1.0)
with pytest.raises(UnknownFunction):
_ = get_function("not_existant")
def test_get_function_class():
po_class = get_function_class("Powerlaw")
assert po_class == Powerlaw
with pytest.raises(UnknownFunction):
_ = get_function_class("not_existant")
def test_list_functions():
print list_functions()
def test_function2D():
c = Gaussian_on_sphere()
f1 = c(1, 1)
assert np.isclose( f1, 38.285617800653434, rtol=1e-10)
a = np.array([1.0, 2.0])
fa = c(a, a)
assert np.isclose( fa, [3.82856178e+01, 2.35952748e-04], rtol=1e-10).all()
c.set_units(u.deg, u.deg, 1.0 / u.deg**2)
f1d = c(1 * u.deg, 1.0 * u.deg)
assert np.isclose( f1d.value, 38.285617800653434, rtol=1e-10)
assert f1d.unit == u.deg**-2
assert c.x_unit == u.deg
assert c.y_unit == u.deg
assert c.z_unit == u.deg**-2
assert c.get_total_spatial_integral( 1 ) == 1
assert np.isclose( c.get_total_spatial_integral( [1,1] ) , [1,1], rtol=1e-10).all()
with pytest.raises(TypeError):
c.set_units("not existent", u.deg, u.keV)
def test_function3D():
c = Continuous_injection_diffusion()
f1 = c(1, 1, 1)
assert np.isclose(f1, 134.95394313247866, rtol = 1e-10)
a = np.array([1.0, 2.0])
fa = c(a, a, a)
assert np.isclose( fa, [[134.95394313, 132.19796573], [ 25.40751507, 27.321443 ]], rtol=1e-10).all()
c.set_units(u.deg, u.deg, u.keV, 1.0 / u.deg**2)
f1d = c(1 * u.deg, 1.0 * u.deg, 1.0 * u.keV)
assert np.isclose(f1d.value, 134.95394313247866, rtol = 1e-10)
assert f1d.unit == u.deg**-2
assert c.x_unit == u.deg
assert c.y_unit == u.deg
assert c.z_unit == u.keV
assert c.w_unit == u.deg**-2
assert c.get_total_spatial_integral( 1 ) == 1
assert np.isclose( c.get_total_spatial_integral( [1,1] ) , [1,1], rtol=1e-10).all()
with pytest.raises(TypeError):
c.set_units("not existent", u.deg, u.keV, 1.0 / (u.keV * u.s * u.deg**2 * u.cm**2))
def test_spatial_template_2D():
#make the fits files with templates to test.
cards = {
"SIMPLE": "T",
"BITPIX": -32,
"NAXIS" : 2,
"NAXIS1": 360,
"NAXIS2": 360,
"DATE": '2018-06-15',
"CUNIT1": 'deg',
"CRVAL1": 83,
"CRPIX1": 0,
"CDELT1": -0.0166667,
"CUNIT2": 'deg',
"CRVAL2": -2.0,
"CRPIX2": 0,
"CDELT2": 0.0166667,
"CTYPE1": 'GLON-CAR',
"CTYPE2": 'GLAT-CAR' }
data = np.zeros([400,400])
data[0:100,0:100] = 1
hdu = fits.PrimaryHDU(data=data, header=fits.Header(cards))
hdu.writeto("test1.fits", overwrite=True)
data[:,:]=0
data[200:300,200:300] = 1
hdu = fits.PrimaryHDU(data=data, header=fits.Header(cards))
hdu.writeto("test2.fits", overwrite=True)
#Now load template files and test their evaluation
shape1=SpatialTemplate_2D()
shape1.load_file("test1.fits")
shape1.K = 1
shape2=SpatialTemplate_2D()
shape2.load_file("test2.fits")
shape2.K = 1
assert shape1.hash != shape2.hash
assert np.all ( shape1.evaluate( [312, 306], [41, 41], [1,1], [40, 2]) == [1., 0.] )
assert np.all ( shape2.evaluate( [312, 306], [41, 41], [1,1], [40, 2]) == [0., 1.] )
assert np.all ( shape1.evaluate( [312, 306], [41, 41], [1,10], [40, 2]) == [1., 0.] )
assert np.all ( shape2.evaluate( [312, 306], [41, 41], [1,10], [40, 2]) == [0., 10.] )
shape1.K = 1
shape2.K = 1
assert np.all ( shape1( [312, 306], [41, 41]) == [1., 0.] )
assert np.all ( shape2( [312, 306], [41, 41]) == [0., 1.] )
shape1.K = 1
shape2.K = 10
assert np.all ( shape1( [312, 306], [41, 41]) == [1., 0.] )
assert np.all ( shape2( [312, 306], [41, 41]) == [0., 10.] )
os.remove("test1.fits")
os.remove("test2.fits")
| bsd-3-clause |
Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/pywin32-219-py2.7-win32.egg/win32comext/axdebug/documents.py | 18 | 4415 | """ Management of documents for AXDebugging.
"""
import axdebug, gateways
import pythoncom
from util import _wrap, _wrap_remove, RaiseNotImpl, trace
from win32com.server.util import unwrap
import codecontainer
import contexts
from win32com.server.exception import Exception
import win32api, winerror, os, string, sys
#def trace(*args):
# pass
def GetGoodFileName(fname):
if fname[0] != "<":
return win32api.GetFullPathName(fname)
return fname
class DebugDocumentProvider(gateways.DebugDocumentProvider):
def __init__(self, doc):
self.doc = doc
def GetName(self, dnt):
return self.doc.GetName(dnt)
def GetDocumentClassId(self):
return self.doc.GetDocumentClassId()
def GetDocument(self):
return self.doc
class DebugDocumentText(gateways.DebugDocumentInfo, gateways.DebugDocumentText, gateways.DebugDocument):
_com_interfaces_ = gateways.DebugDocumentInfo._com_interfaces_ + \
gateways.DebugDocumentText._com_interfaces_ + \
gateways.DebugDocument._com_interfaces_
_public_methods_ = gateways.DebugDocumentInfo._public_methods_ + \
gateways.DebugDocumentText._public_methods_ + \
gateways.DebugDocument._public_methods_
# A class which implements a DebugDocumentText, using the functionality
# provided by a codeContainer
def __init__(self, codeContainer):
gateways.DebugDocumentText.__init__(self)
gateways.DebugDocumentInfo.__init__(self)
gateways.DebugDocument.__init__(self)
self.codeContainer = codeContainer
def _Close(self):
self.docContexts = None
# self.codeContainer._Close()
self.codeContainer = None
# IDebugDocumentInfo
def GetName(self, dnt):
return self.codeContainer.GetName(dnt)
def GetDocumentClassId(self):
return "{DF630910-1C1D-11d0-AE36-8C0F5E000000}"
# IDebugDocument has no methods!
#
# IDebugDocumentText methods.
# def GetDocumentAttributes
def GetSize(self):
# trace("GetSize")
return self.codeContainer.GetNumLines(), self.codeContainer.GetNumChars()
def GetPositionOfLine(self, cLineNumber):
return self.codeContainer.GetPositionOfLine(cLineNumber)
def GetLineOfPosition(self, charPos):
return self.codeContainer.GetLineOfPosition(charPos)
def GetText(self, charPos, maxChars, wantAttr):
# Get all the attributes, else the tokenizer will get upset.
# XXX - not yet!
# trace("GetText", charPos, maxChars, wantAttr)
cont = self.codeContainer
attr = cont.GetSyntaxColorAttributes()
return cont.GetText(), attr
def GetPositionOfContext(self, context):
trace("GetPositionOfContext", context)
context = unwrap(context)
return context.offset, context.length
# Return a DebugDocumentContext.
def GetContextOfPosition(self, charPos, maxChars):
# Make one
doc = _wrap(self, axdebug.IID_IDebugDocument)
rc = self.codeContainer.GetCodeContextAtPosition(charPos)
return rc.QueryInterface(axdebug.IID_IDebugDocumentContext)
class CodeContainerProvider:
"""An abstract Python class which provides code containers!
Given a Python file name (as the debugger knows it by) this will
return a CodeContainer interface suitable for use.
This provides a simple base imlpementation that simply supports
a dictionary of nodes and providers.
"""
def __init__(self):
self.ccsAndNodes = {}
def AddCodeContainer(self, cc, node = None):
fname = GetGoodFileName(cc.fileName)
self.ccsAndNodes[fname] = cc, node
def FromFileName(self, fname):
cc, node = self.ccsAndNodes.get(GetGoodFileName(fname), (None, None))
# if cc is None:
# print "FromFileName for %s returning None" % fname
return cc
def Close(self):
for cc, node in self.ccsAndNodes.itervalues():
try:
# Must close the node before closing the provider
# as node may make calls on provider (eg Reset breakpoints etc)
if node is not None:
node.Close()
cc._Close()
except pythoncom.com_error:
pass
self.ccsAndNodes = {}
| mit |
geekboxzone/lollipop_external_chromium_org_third_party_WebKit | Source/bindings/scripts/compute_interfaces_info_overall.py | 9 | 14098 | #!/usr/bin/python
#
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compute global interface information, including public information, dependencies, and inheritance.
Computed data is stored in a global variable, |interfaces_info|, and written as
output (concretely, exported as a pickle). This is then used by the IDL compiler
itself, so it does not need to compute global information itself, and so that
inter-IDL dependencies are clear, since they are all computed here.
The |interfaces_info| pickle is a *global* dependency: any changes cause a full
rebuild. This is to avoid having to compute which public data is visible by
which IDL files on a file-by-file basis, which is very complex for little
benefit.
|interfaces_info| should thus only contain data about an interface that
contains paths or is needed by *other* interfaces, e.g., path data (to abstract
the compiler from OS-specific file paths) or public data (to avoid having to
read other interfaces unnecessarily).
It should *not* contain full information about an interface (e.g., all
extended attributes), as this would cause unnecessary rebuilds.
|interfaces_info| is a dict, keyed by |interface_name|.
Current keys are:
* dependencies:
'implements_interfaces': targets of 'implements' statements
'referenced_interfaces': reference interfaces that are introspected
(currently just targets of [PutForwards])
* inheritance:
'ancestors': all ancestor interfaces
'inherited_extended_attributes': inherited extended attributes
(all controlling memory management)
* public:
'is_callback_interface': bool, callback interface or not
'implemented_as': value of [ImplementedAs=...] on interface (C++ class name)
* paths:
'full_path': path to the IDL file, so can lookup an IDL by interface name
'include_path': path for use in C++ #include directives
'dependencies_full_paths': paths to dependencies (for merging into main)
'dependencies_include_paths': paths for use in C++ #include directives
Note that all of these are stable information, unlikely to change without
moving or deleting files (hence requiring a full rebuild anyway) or significant
code changes (for inherited extended attributes).
Design doc: http://www.chromium.org/developers/design-documents/idl-build
"""
from collections import defaultdict
import cPickle as pickle
import optparse
import sys
from utilities import idl_filename_to_component, read_pickle_files, write_pickle_file
INHERITED_EXTENDED_ATTRIBUTES = set([
'ActiveDOMObject',
'DependentLifetime',
'GarbageCollected',
'NotScriptWrappable',
'WillBeGarbageCollected',
])
# Main variable (filled in and exported)
interfaces_info = {}
# Auxiliary variables (not visible to future build steps)
partial_interface_files = defaultdict(lambda: {
'full_paths': [],
'include_paths': [],
})
parent_interfaces = {}
inherited_extended_attributes_by_interface = {} # interface name -> extended attributes
class IdlInterfaceFileNotFoundError(Exception):
"""Raised if the IDL file implementing an interface cannot be found."""
pass
def parse_options():
usage = 'Usage: %prog [InfoIndividual.pickle]... [Info.pickle]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--write-file-only-if-changed', type='int', help='if true, do not write an output file if it would be identical to the existing one, which avoids unnecessary rebuilds in ninja')
options, args = parser.parse_args()
if options.write_file_only_if_changed is None:
parser.error('Must specify whether file is only written if changed using --write-file-only-if-changed.')
options.write_file_only_if_changed = bool(options.write_file_only_if_changed)
return options, args
def dict_of_dicts_of_lists_update_or_append(existing, other):
"""Updates an existing dict of dicts of lists, or appends to lists if key already present.
Needed for merging partial_interface_files across components.
"""
for key, value in other.iteritems():
if key not in existing:
existing[key] = value
continue
existing_value = existing[key]
for inner_key, inner_value in value.iteritems():
existing_value[inner_key].extend(inner_value)
################################################################################
# Computations
################################################################################
def compute_inheritance_info(interface_name):
"""Compute inheritance information, namely ancestors and inherited extended attributes."""
def generate_ancestors(interface_name):
while interface_name in parent_interfaces:
interface_name = parent_interfaces[interface_name]
yield interface_name
ancestors = list(generate_ancestors(interface_name))
inherited_extended_attributes = inherited_extended_attributes_by_interface[interface_name]
for ancestor in ancestors:
# Ancestors may not be present, notably if an ancestor is a generated
# IDL file and we are running this script from run-bindings-tests,
# where we don't generate these files.
ancestor_extended_attributes = inherited_extended_attributes_by_interface.get(ancestor, {})
inherited_extended_attributes.update(ancestor_extended_attributes)
interfaces_info[interface_name].update({
'ancestors': ancestors,
'inherited_extended_attributes': inherited_extended_attributes,
})
def compute_global_type_info():
ancestors = {}
dictionaries = {}
component_dirs = {}
implemented_as_interfaces = {}
will_be_garbage_collected_interfaces = set()
garbage_collected_interfaces = set()
callback_interfaces = set()
for interface_name, interface_info in interfaces_info.iteritems():
component_dirs[interface_name] = idl_filename_to_component(interface_info['full_path'])
if interface_info['ancestors']:
ancestors[interface_name] = interface_info['ancestors']
if interface_info['is_callback_interface']:
callback_interfaces.add(interface_name)
if interface_info['is_dictionary']:
dictionaries[interface_name] = interface_info['is_dictionary']
if interface_info['implemented_as']:
implemented_as_interfaces[interface_name] = interface_info['implemented_as']
inherited_extended_attributes = interface_info['inherited_extended_attributes']
if 'WillBeGarbageCollected' in inherited_extended_attributes:
will_be_garbage_collected_interfaces.add(interface_name)
if 'GarbageCollected' in inherited_extended_attributes:
garbage_collected_interfaces.add(interface_name)
interfaces_info['ancestors'] = ancestors
interfaces_info['callback_interfaces'] = callback_interfaces
interfaces_info['dictionaries'] = dictionaries
interfaces_info['implemented_as_interfaces'] = implemented_as_interfaces
interfaces_info['garbage_collected_interfaces'] = garbage_collected_interfaces
interfaces_info['will_be_garbage_collected_interfaces'] = will_be_garbage_collected_interfaces
interfaces_info['component_dirs'] = component_dirs
def compute_interfaces_info_overall(info_individuals):
"""Compute information about IDL files.
Information is stored in global interfaces_info.
"""
for info in info_individuals:
# No overlap between interface names, so ok to use dict.update
interfaces_info.update(info['interfaces_info'])
# Interfaces in one component may have partial interfaces in
# another component. This is ok (not a layering violation), since
# partial interfaces are used to *extend* interfaces.
# We thus need to update or append if already present
dict_of_dicts_of_lists_update_or_append(
partial_interface_files, info['partial_interface_files'])
# Record inheritance information individually
for interface_name, interface_info in interfaces_info.iteritems():
extended_attributes = interface_info['extended_attributes']
inherited_extended_attributes_by_interface[interface_name] = dict(
(key, value)
for key, value in extended_attributes.iteritems()
if key in INHERITED_EXTENDED_ATTRIBUTES)
parent = interface_info['parent']
if parent:
parent_interfaces[interface_name] = parent
# Once all individual files handled, can compute inheritance information
# and dependencies
# Compute inheritance info
for interface_name in interfaces_info:
compute_inheritance_info(interface_name)
# Compute dependencies
# Move implements info from implement*ed* interface (rhs of 'implements')
# to implement*ing* interface (lhs of 'implements').
# Note that moving an 'implements' statement between implementing and
# implemented files does not change the info (or hence cause a rebuild)!
for right_interface_name, interface_info in interfaces_info.iteritems():
for left_interface_name in interface_info['implemented_by_interfaces']:
interfaces_info[left_interface_name]['implements_interfaces'].append(right_interface_name)
del interface_info['implemented_by_interfaces']
# An IDL file's dependencies are partial interface files that extend it,
# and files for other interfaces that this interfaces implements.
for interface_name, interface_info in interfaces_info.iteritems():
partial_interface_paths = partial_interface_files[interface_name]
partial_interfaces_full_paths = partial_interface_paths['full_paths']
# Partial interface definitions each need an include, as they are
# implemented in separate classes from the main interface.
partial_interfaces_include_paths = partial_interface_paths['include_paths']
implemented_interfaces = interface_info['implements_interfaces']
try:
implemented_interfaces_info = [
interfaces_info[interface]
for interface in implemented_interfaces]
except KeyError as key_name:
raise IdlInterfaceFileNotFoundError('Could not find the IDL file where the following implemented interface is defined: %s' % key_name)
implemented_interfaces_full_paths = [
implemented_interface_info['full_path']
for implemented_interface_info in implemented_interfaces_info]
# Implemented interfaces don't need includes, as this is handled in
# the Blink implementation (they are implemented on |impl| itself,
# hence header is included in implementing class).
# However, they are needed for legacy implemented interfaces that
# are being treated as partial interfaces, until we remove these.
# http://crbug.com/360435
implemented_interfaces_include_paths = []
for implemented_interface_info in implemented_interfaces_info:
if (implemented_interface_info['is_legacy_treat_as_partial_interface'] and
implemented_interface_info['include_path']):
implemented_interfaces_include_paths.append(implemented_interface_info['include_path'])
interface_info.update({
'dependencies_full_paths': (partial_interfaces_full_paths +
implemented_interfaces_full_paths),
'dependencies_include_paths': (partial_interfaces_include_paths +
implemented_interfaces_include_paths),
})
# Clean up temporary private information
for interface_info in interfaces_info.itervalues():
del interface_info['extended_attributes']
del interface_info['is_legacy_treat_as_partial_interface']
del interface_info['parent']
# Compute global_type_info to interfaces_info so that idl_compiler does
# not need to always calculate the info in __init__.
compute_global_type_info()
################################################################################
def main():
options, args = parse_options()
# args = Input1, Input2, ..., Output
interfaces_info_filename = args.pop()
info_individuals = read_pickle_files(args)
compute_interfaces_info_overall(info_individuals)
write_pickle_file(interfaces_info_filename,
interfaces_info,
options.write_file_only_if_changed)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
oihane/odoo | addons/report_webkit/ir_report.py | 382 | 3807 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import openerp
from openerp.osv import fields, orm
from webkit_report import WebKitParser
class ir_actions_report_xml(orm.Model):
_inherit = 'ir.actions.report.xml'
_columns = {
'webkit_header': fields.property(
type='many2one', relation='ir.header_webkit',
string='Webkit Header', help="The header linked to the report",
required=True),
'webkit_debug': fields.boolean('Webkit debug',
help="Enable the webkit engine debugger"),
'report_webkit_data': fields.text('Webkit Template',
help="This template will be used if the main report file is not found"),
'precise_mode': fields.boolean('Precise Mode',
help="This mode allow more precise element position as each object"
" is printed on a separate HTML but memory and disk usage are wider.")
}
def _lookup_report(self, cr, name):
"""
Look up a report definition.
"""
import operator
import os
opj = os.path.join
# First lookup in the deprecated place, because if the report definition
# has not been updated, it is more likely the correct definition is there.
# Only reports with custom parser specified in Python are still there.
if 'report.' + name in openerp.report.interface.report_int._reports:
new_report = openerp.report.interface.report_int._reports['report.' + name]
if not isinstance(new_report, WebKitParser):
new_report = None
else:
cr.execute("SELECT * FROM ir_act_report_xml WHERE report_name=%s and report_type=%s", (name, 'webkit'))
r = cr.dictfetchone()
if r:
if r['parser']:
parser = operator.attrgetter(r['parser'])(openerp.addons)
kwargs = { 'parser': parser }
else:
kwargs = {}
new_report = WebKitParser('report.'+r['report_name'],
r['model'], opj('addons',r['report_rml'] or '/'),
header=r['header'], register=False, **kwargs)
else:
new_report = None
if new_report:
return new_report
else:
return super(ir_actions_report_xml, self)._lookup_report(cr, name)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cwill747/rtc2git | tests/test_configuration.py | 2 | 10200 | import unittest
import os
from configuration import Builder
import configuration
import shell
from tests import testhelper
class ConfigurationTestCase(unittest.TestCase):
def setUp(self):
self.workdirectory = os.path.dirname(os.path.realpath(__file__))
# reset global shell variables
shell.logcommands = False
shell.encoding = None
configuration.setconfigfile(None)
configuration.setUser(None)
configuration.setPassword(None)
def test_DeletionOfFolder(self):
config = Builder().setworkdirectory(self.workdirectory).build()
samplepath = os.path.dirname(config.getlogpath("anyPath"))
self.assertTrue(os.path.exists(samplepath))
config.deletelogfolder()
self.assertFalse(os.path.exists(samplepath))
def test_ReaddingLogFolderAfterDeletion(self):
config = Builder().setworkdirectory(self.workdirectory).build()
samplepath = os.path.dirname(config.getlogpath("anyPath"))
self.assertTrue(os.path.exists(samplepath))
config.deletelogfolder()
self.assertFalse(os.path.exists(samplepath))
samplepath = os.path.dirname(config.getlogpath("anyPath"))
self.assertTrue(os.path.exists(samplepath))
def test_sampleBoolConfigEntrySetToFalse_ShouldBeFalse(self):
config = Builder().setuseautomaticconflictresolution("False").build()
self.assertFalse(config.useautomaticconflictresolution)
def test_sampleBoolConfigEntrySetToTrue_ShouldBeTrue(self):
config = Builder().setuseautomaticconflictresolution("True").build()
self.assertTrue(config.useautomaticconflictresolution)
def test_getSampleConfig_ExpectInitializedConfigWithDefaultValues(self):
config = configuration.read(testhelper.getrelativefilename("../config.ini.sample"))
self.assertEqual("lscm", config.scmcommand)
self.assertEqual(config, configuration.get())
def test_fileExtensionsToBeIgnored_ShouldBeEmpty_FromNone(self):
config = Builder().setignorefileextensions(configuration.parsesplittedproperty(None)).build()
self.assertEqual(0, len(config.ignorefileextensions))
def test_fileExtensionsToBeIgnored_ShouldBeEmpty_FromEmpty(self):
config = Builder().setignorefileextensions("").build()
self.assertEqual(0, len(config.ignorefileextensions))
def test_fileExtensionsToBeIgnored_SingleExtension(self):
config = Builder().setignorefileextensions(configuration.parsesplittedproperty(" .zip ")).build()
self.assertEqual(1, len(config.ignorefileextensions))
self.assertEqual(['.zip'], config.ignorefileextensions)
def test_fileExtensionsToBeIgnored_MultipleExtensions(self):
config = Builder().setignorefileextensions(configuration.parsesplittedproperty(".zip; .jar; .exe")) \
.build()
self.assertEqual(3, len(config.ignorefileextensions))
self.assertEqual(['.zip', '.jar', '.exe'], config.ignorefileextensions)
def test_directoriesToBeIgnored_ShouldBeEmpty_FromNone(self):
config = Builder().setignoredirectories(configuration.parsesplittedproperty(None)).build()
self.assertEqual(0, len(config.ignoredirectories))
def test_directoriesToBeIgnored_ShouldBeEmpty_FromEmpty(self):
config = Builder().setignoredirectories("").build()
self.assertEqual(0, len(config.ignoredirectories))
def test_directoriesToBeIgnored_SingleExtension(self):
config = Builder().setignoredirectories(configuration.parsesplittedproperty(" project/dist ")).build()
self.assertEqual(1, len(config.ignoredirectories))
self.assertEqual(['project/dist'], config.ignoredirectories)
def test_directoriesToBeIgnored_MultipleExtensions(self):
config = Builder().setignoredirectories(configuration.parsesplittedproperty(" project/dist ; project/lib ; out ")) \
.build()
self.assertEqual(3, len(config.ignoredirectories))
self.assertEqual(['project/dist', 'project/lib', 'out'], config.ignoredirectories)
def test_gitattributes_ShouldBeEmpty_FromNone(self):
config = Builder().setgitattributes(configuration.parsesplittedproperty(None)).build()
self.assertEqual(0, len(config.gitattributes))
def test_gitattributes_ShouldBeEmpty_FromEmpty(self):
config = Builder().setgitattributes(configuration.parsesplittedproperty("")).build()
self.assertEqual(0, len(config.gitattributes))
def test_gitattributes__SingleProperty(self):
config = Builder().setgitattributes(configuration.parsesplittedproperty(" * text=auto ")).build()
self.assertEqual(1, len(config.gitattributes))
self.assertEqual(['* text=auto'], config.gitattributes)
def test_gitattributes__MultipleProperties(self):
config = Builder().setgitattributes(configuration.parsesplittedproperty(" # some comment ; * text=auto ; *.sql text ")).build()
self.assertEqual(3, len(config.gitattributes))
self.assertEqual(['# some comment', '* text=auto', '*.sql text'], config.gitattributes)
def test_read_passedin_configfile(self):
self._assertTestConfig(configuration.read(testhelper.getrelativefilename('resources/test_config.ini')))
def test_read_passedin_configfile_expect_override_user_password(self):
configuration.setUser('newUser')
configuration.setPassword('newPassword')
self._assertTestConfig(configuration.read(testhelper.getrelativefilename('resources/test_config.ini')),
user='newUser', password='newPassword')
def test_read_configfile_from_configuration(self):
configuration.setconfigfile(testhelper.getrelativefilename('resources/test_config.ini'))
self._assertTestConfig(configuration.read())
def test_read_minimumconfigfile_shouldrelyonfallbackvalues(self):
configuration.setconfigfile(testhelper.getrelativefilename('resources/test_minimum_config.ini'))
self._assertDefaultConfig(configuration.read())
def _assertTestConfig(self, config, user=None, password=None):
# [General]
self.assertEqual('https://rtc.supercompany.com/ccm/', config.repo)
if not user:
self.assertEqual('superuser', config.user)
else:
self.assertEqual(user, config.user)
if not password:
self.assertEqual('supersecret', config.password)
else:
self.assertEqual(password, config.password)
self.assertEqual('super.git', config.gitRepoName)
self.assertEqual('Superworkspace', config.workspace)
self.assertEqual('/tmp/migration', config.workDirectory)
self.assertTrue(config.useexistingworkspace)
self.assertEqual('scm', config.scmcommand)
self.assertEqual('UTF-8', shell.encoding) # directly deviated to shell
# [Migration]
self.assertEqual('Superstream', config.streamname)
self.assertEqual('Previousstream', config.previousstreamname)
initialcomponentbaselines = config.initialcomponentbaselines
self.assertEqual(2, len(initialcomponentbaselines))
initialcomponentbaseline = initialcomponentbaselines[0]
self.assertEqual('Component1', initialcomponentbaseline.componentname)
self.assertEqual('Baseline1', initialcomponentbaseline.baselinename)
initialcomponentbaseline = initialcomponentbaselines[1]
self.assertEqual('Component2', initialcomponentbaseline.componentname)
self.assertEqual('Baseline2', initialcomponentbaseline.baselinename)
self.assertTrue(config.useprovidedhistory)
self.assertTrue(config.useautomaticconflictresolution)
self.assertEqual(100, config.maxchangesetstoaccepttogether)
self.assertEqual("UP-", config.commitmessageprefix)
gitattributes = config.gitattributes
self.assertEqual(4, len(gitattributes))
self.assertEqual('# Handle line endings automatically for text files', gitattributes[0])
self.assertEqual('# and leave binary files untouched', gitattributes[1])
self.assertEqual('* text=auto', gitattributes[2])
self.assertEqual('*.sql text', gitattributes[3])
# [Miscellaneous]
self.assertTrue(shell.logcommands) # directly deviated to shell
ignorefileextensions = config.ignorefileextensions
self.assertEqual(2, len(ignorefileextensions))
self.assertEqual('.zip', ignorefileextensions[0])
self.assertEqual('.jar', ignorefileextensions[1])
self.assertTrue(config.includecomponentroots)
ignoredirectories = config.ignoredirectories
self.assertEqual(2, len(ignoredirectories))
self.assertEqual('projectX/WebContent/node_modules', ignoredirectories[0])
self.assertEqual('projectY/distribution', ignoredirectories[1])
def _assertDefaultConfig(self, config):
# [General]
self.assertEqual('https://rtc.minicompany.com/ccm/', config.repo)
self.assertEqual('miniuser', config.user)
self.assertEqual('minisecret', config.password)
self.assertEqual('mini.git', config.gitRepoName)
self.assertEqual('Miniworkspace', config.workspace)
self.assertEqual(os.getcwd(), config.workDirectory)
self.assertFalse(config.useexistingworkspace)
self.assertEqual('lscm', config.scmcommand)
self.assertEqual(None, shell.encoding) # directly deviated to shell
# [Migration]
self.assertEqual('Ministream', config.streamname)
self.assertEqual('', config.previousstreamname)
self.assertEqual(0, len(config.initialcomponentbaselines))
self.assertFalse(config.useprovidedhistory)
self.assertFalse(config.useautomaticconflictresolution)
self.assertEqual(10, config.maxchangesetstoaccepttogether)
self.assertEqual("", config.commitmessageprefix)
self.assertEqual(0, len(config.gitattributes))
# [Miscellaneous]
self.assertFalse(shell.logcommands) # directly deviated to shell
self.assertEqual(0, len(config.ignorefileextensions))
self.assertFalse(config.includecomponentroots)
| mit |
calliope-project/calliope | calliope/backend/pyomo/objective.py | 1 | 3227 | """
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
objective.py
~~~~~~~~~~~~
Objective functions.
"""
import pyomo.core as po
from calliope.core.util.tools import load_function
def minmax_cost_optimization(backend_model):
"""
Minimize or maximise total system cost for specified cost class or a set of cost classes.
cost_class is a string or dictionary. If a string, it is automatically converted to a
dictionary with a single key:value pair where value == 1. The dictionary provides a weight
for each cost class of interest: {cost_1: weight_1, cost_2: weight_2, etc.}.
If unmet_demand is in use, then the calculated cost of unmet_demand is
added or subtracted from the total cost in the opposite sense to the
objective.
.. container:: scrolling-wrapper
.. math::
min: z = \\sum_{loc::tech_{cost},k} (cost(loc::tech, cost=cost_{k}) \\times weight_{k}) +
\\sum_{loc::carrier,timestep} (unmet\\_demand(loc::carrier, timestep) \\times bigM)
max: z = \\sum_{loc::tech_{cost},k} (cost(loc::tech, cost=cost_{k}) \\times weight_{k}) -
\\sum_{loc::carrier,timestep} (unmet\\_demand(loc::carrier, timestep) \\times bigM)
"""
def obj_rule(backend_model):
if backend_model.__calliope_run_config.get("ensure_feasibility", False):
unmet_demand = (
po.quicksum(
(
backend_model.unmet_demand[carrier, node, timestep]
- backend_model.unused_supply[carrier, node, timestep]
)
* backend_model.timestep_weights[timestep]
for [carrier, node, timestep] in backend_model.carriers
* backend_model.nodes
* backend_model.timesteps
if [carrier, node, timestep] in backend_model.unmet_demand._index
)
* backend_model.bigM
)
if backend_model.objective_sense == "maximize":
unmet_demand *= -1
else:
unmet_demand = 0
return (
po.quicksum(
po.quicksum(
backend_model.cost[class_name, node, tech]
for [node, tech] in backend_model.nodes * backend_model.techs
if [class_name, node, tech] in backend_model.cost._index
)
* weight
for class_name, weight in backend_model.objective_cost_class.items()
)
+ unmet_demand
)
backend_model.obj = po.Objective(
sense=load_function("pyomo.core." + backend_model.objective_sense),
rule=obj_rule,
)
backend_model.obj.domain = po.Reals
def check_feasibility(backend_model):
"""
Dummy objective, to check that there are no conflicting constraints.
.. container:: scrolling-wrapper
.. math::
min: z = 1
"""
def obj_rule(backend_model):
return 1
backend_model.obj = po.Objective(sense=po.minimize, rule=obj_rule)
backend_model.obj.domain = po.Reals
| apache-2.0 |
detiber/lib_openshift | test/test_v1_handler.py | 2 | 1236 | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_handler import V1Handler
class TestV1Handler(unittest.TestCase):
""" V1Handler unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Handler(self):
"""
Test V1Handler
"""
model = lib_openshift.models.v1_handler.V1Handler()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
lowitty/server | libsLinux/twisted/python/test/test_versions.py | 8 | 10681 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.versions}.
"""
from __future__ import division, absolute_import
import sys
import operator
from io import BytesIO
from twisted.python.versions import getVersionString, IncomparableVersions
from twisted.python.versions import Version, _inf
from twisted.python.filepath import FilePath
from twisted.trial.unittest import SynchronousTestCase as TestCase
VERSION_4_ENTRIES = b"""\
<?xml version="1.0" encoding="utf-8"?>
<wc-entries
xmlns="svn:">
<entry
committed-rev="18210"
name=""
committed-date="2006-09-21T04:43:09.542953Z"
url="svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk/twisted"
last-author="exarkun"
kind="dir"
uuid="bbbe8e31-12d6-0310-92fd-ac37d47ddeeb"
repos="svn+ssh://svn.twistedmatrix.com/svn/Twisted"
revision="18211"/>
</wc-entries>
"""
VERSION_8_ENTRIES = b"""\
8
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
VERSION_9_ENTRIES = b"""\
9
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
VERSION_10_ENTRIES = b"""\
10
dir
22715
svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk
"""
class VersionsTests(TestCase):
def test_versionComparison(self):
"""
Versions can be compared for equality and order.
"""
va = Version("dummy", 1, 0, 0)
vb = Version("dummy", 0, 1, 0)
self.assertTrue(va > vb)
self.assertTrue(vb < va)
self.assertTrue(va >= vb)
self.assertTrue(vb <= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("dummy", 0, 1, 0))
self.assertTrue(vb == vb)
def test_comparingPrereleasesWithReleases(self):
"""
Prereleases are always less than versions without prereleases.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0)
self.assertTrue(va < vb)
self.assertFalse(va > vb)
self.assertNotEquals(vb, va)
def test_comparingPrereleases(self):
"""
The value specified as the prerelease is used in version comparisons.
"""
va = Version("whatever", 1, 0, 0, prerelease=1)
vb = Version("whatever", 1, 0, 0, prerelease=2)
self.assertTrue(va < vb)
self.assertTrue(vb > va)
self.assertTrue(va <= vb)
self.assertTrue(vb >= va)
self.assertTrue(va != vb)
self.assertTrue(vb == Version("whatever", 1, 0, 0, prerelease=2))
self.assertTrue(va == va)
def test_infComparison(self):
"""
L{_inf} is equal to L{_inf}.
This is a regression test.
"""
self.assertEqual(_inf, _inf)
def test_disallowBuggyComparisons(self):
"""
The package names of the Version objects need to be the same,
"""
self.assertRaises(IncomparableVersions,
operator.eq,
Version("dummy", 1, 0, 0),
Version("dumym", 1, 0, 0))
def test_notImplementedComparisons(self):
"""
Comparing a L{Version} to some other object type results in
C{NotImplemented}.
"""
va = Version("dummy", 1, 0, 0)
vb = ("dummy", 1, 0, 0) # a tuple is not a Version object
self.assertEqual(va.__cmp__(vb), NotImplemented)
def test_repr(self):
"""
Calling C{repr} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3)),
"Version('dummy', 1, 2, 3)")
def test_reprWithPrerelease(self):
"""
Calling C{repr} on a version with a prerelease returns a human-readable
string representation of the version including the prerelease.
"""
self.assertEqual(repr(Version("dummy", 1, 2, 3, prerelease=4)),
"Version('dummy', 1, 2, 3, prerelease=4)")
def test_str(self):
"""
Calling C{str} on a version returns a human-readable string
representation of the version.
"""
self.assertEqual(str(Version("dummy", 1, 2, 3)),
"[dummy, version 1.2.3]")
def test_strWithPrerelease(self):
"""
Calling C{str} on a version with a prerelease includes the prerelease.
"""
self.assertEqual(str(Version("dummy", 1, 0, 0, prerelease=1)),
"[dummy, version 1.0.0pre1]")
def testShort(self):
self.assertEqual(Version('dummy', 1, 2, 3).short(), '1.2.3')
def test_goodSVNEntries_4(self):
"""
Version should be able to parse an SVN format 4 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_4(BytesIO(VERSION_4_ENTRIES)), b'18211')
def test_goodSVNEntries_8(self):
"""
Version should be able to parse an SVN format 8 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_8(BytesIO(VERSION_8_ENTRIES)), b'22715')
def test_goodSVNEntries_9(self):
"""
Version should be able to parse an SVN format 9 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntries_9(BytesIO(VERSION_9_ENTRIES)), b'22715')
def test_goodSVNEntriesTenPlus(self):
"""
Version should be able to parse an SVN format 10 entries file.
"""
version = Version("dummy", 1, 0, 0)
self.assertEqual(
version._parseSVNEntriesTenPlus(BytesIO(VERSION_10_ENTRIES)), b'22715')
def test_getVersionString(self):
"""
L{getVersionString} returns a string with the package name and the
short version number.
"""
self.assertEqual(
'Twisted 8.0.0', getVersionString(Version('Twisted', 8, 0, 0)))
def test_getVersionStringWithPrerelease(self):
"""
L{getVersionString} includes the prerelease, if any.
"""
self.assertEqual(
getVersionString(Version("whatever", 8, 0, 0, prerelease=1)),
"whatever 8.0.0pre1")
def test_base(self):
"""
The L{base} method returns a very simple representation of the version.
"""
self.assertEqual(Version("foo", 1, 0, 0).base(), "1.0.0")
def test_baseWithPrerelease(self):
"""
The base version includes 'preX' for versions with prereleases.
"""
self.assertEqual(Version("foo", 1, 0, 0, prerelease=8).base(),
"1.0.0pre8")
class FormatDiscoveryTests(TestCase):
"""
Tests which discover the parsing method based on the imported module name.
"""
def mktemp(self):
return TestCase.mktemp(self).encode("utf-8")
def setUp(self):
"""
Create a temporary directory with a package structure in it.
"""
self.entry = FilePath(self.mktemp())
self.preTestModules = sys.modules.copy()
sys.path.append(self.entry.path.decode('utf-8'))
pkg = self.entry.child(b"twisted_python_versions_package")
pkg.makedirs()
pkg.child(b"__init__.py").setContent(
b"from twisted.python.versions import Version\n"
b"version = Version('twisted_python_versions_package', 1, 0, 0)\n")
self.svnEntries = pkg.child(b".svn")
self.svnEntries.makedirs()
def tearDown(self):
"""
Remove the imported modules and sys.path modifications.
"""
sys.modules.clear()
sys.modules.update(self.preTestModules)
sys.path.remove(self.entry.path.decode('utf-8'))
def checkSVNFormat(self, formatVersion, entriesText, expectedRevision):
"""
Check for the given revision being detected after setting the SVN
entries text and format version of the test directory structure.
"""
self.svnEntries.child(b"format").setContent(formatVersion + b"\n")
self.svnEntries.child(b"entries").setContent(entriesText)
self.assertEqual(self.getVersion()._getSVNVersion(), expectedRevision)
def getVersion(self):
"""
Import and retrieve the Version object from our dynamically created
package.
"""
import twisted_python_versions_package
return twisted_python_versions_package.version
def test_detectVersion4(self):
"""
Verify that version 4 format file will be properly detected and parsed.
"""
self.checkSVNFormat(b"4", VERSION_4_ENTRIES, b'18211')
def test_detectVersion8(self):
"""
Verify that version 8 format files will be properly detected and
parsed.
"""
self.checkSVNFormat(b"8", VERSION_8_ENTRIES, b'22715')
def test_detectVersion9(self):
"""
Verify that version 9 format files will be properly detected and
parsed.
"""
self.checkSVNFormat(b"9", VERSION_9_ENTRIES, b'22715')
def test_unparseableEntries(self):
"""
Verify that the result is C{b"Unknown"} for an apparently supported
version for which parsing of the entries file fails.
"""
self.checkSVNFormat(b"4", b"some unsupported stuff", b"Unknown")
def test_detectVersion10(self):
"""
Verify that version 10 format files will be properly detected and
parsed.
Differing from previous formats, the version 10 format lacks a
I{format} file and B{only} has the version information on the first
line of the I{entries} file.
"""
self.svnEntries.child(b"entries").setContent(VERSION_10_ENTRIES)
self.assertEqual(self.getVersion()._getSVNVersion(), b'22715')
def test_detectUnknownVersion(self):
"""
Verify that a new version of SVN will result in the revision 'Unknown'.
"""
self.checkSVNFormat(b"some-random-new-version", b"ooga booga!", b'Unknown')
def test_getVersionStringWithRevision(self):
"""
L{getVersionString} includes the discovered revision number.
"""
self.svnEntries.child(b"format").setContent(b"9\n")
self.svnEntries.child(b"entries").setContent(VERSION_10_ENTRIES)
version = getVersionString(self.getVersion())
self.assertEqual(
"twisted_python_versions_package 1.0.0+r22715",
version)
self.assertTrue(isinstance(version, type("")))
| mit |
andrius-preimantas/odoo | openerp/report/render/rml2html/utils.py | 438 | 2386 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2005, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import re
import reportlab
import reportlab.lib.units
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*px$'), 0.7),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
for unit in units:
res = unit[0].search(size, 0)
if res:
return int(unit[1]*float(res.group(1))*1.3)
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
res = [int(x) for x in node.get(attr_name).split(',')]
return res
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = str(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
charlesvdv/servo | tests/wpt/css-tests/css-text-decor-3_dev/html/reference/support/generate-text-emphasis-style-property-tests.py | 841 | 3434 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-style-property-011 ~ 020 which
cover all possible values of text-emphasis-style property, except none
and <string>, with horizontal writing mode. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-style-property-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis-style: {title}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-style-property">
<meta name="assert" content="'text-emphasis-style: {value}' produces {code} as emphasis marks.">
<link rel="match" href="text-emphasis-style-property-{index:03}-ref.html">
<p>Pass if there is a '{char}' above every character below:</p>
<div style="line-height: 5; text-emphasis-style: {value}">試験テスト</div>
'''
REF_FILE = 'text-emphasis-style-property-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis-style: {0}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if there is a '{1}' above every character below:</p>
<div style="line-height: 5;"><ruby>試<rt>{1}</rt>験<rt>{1}</rt>テ<rt>{1}</rt>ス<rt>{1}</rt>ト<rt>{1}</rt></ruby></div>
'''
DATA_SET = [
('dot', 0x2022, 0x25e6),
('circle', 0x25cf, 0x25cb),
('double-circle', 0x25c9, 0x25ce),
('triangle', 0x25b2, 0x25b3),
('sesame', 0xfe45, 0xfe46),
]
SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e']
def get_html_entity(code):
return '&#x{:04X};'.format(code)
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
def write_test_file(idx, suffix, style, code, name=None):
if not name:
name = style
filename = TEST_FILE.format(idx, suffix)
write_file(filename, TEST_TEMPLATE.format(index=idx, value=style,
char=get_html_entity(code),
code='U+{:04X}'.format(code),
title=name))
print("== {} {}".format(filename, REF_FILE.format(idx)))
idx = 10
def write_files(style, code):
global idx
idx += 1
fill, shape = style
basic_style = "{} {}".format(fill, shape)
write_file(REF_FILE.format(idx),
REF_TEMPLATE.format(basic_style, get_html_entity(code)))
suffix = iter(SUFFIXES)
write_test_file(idx, next(suffix), basic_style, code)
write_test_file(idx, next(suffix), "{} {}".format(shape, fill), code)
if fill == 'filled':
write_test_file(idx, next(suffix), shape, code)
if shape == 'circle':
write_test_file(idx, next(suffix), fill, code, fill + ', horizontal')
print("# START tests from {}".format(__file__))
for name, code, _ in DATA_SET:
write_files(('filled', name), code)
for name, _, code in DATA_SET:
write_files(('open', name), code)
print("# END tests from {}".format(__file__))
| mpl-2.0 |
frnhr/django-cms | cms/test_utils/project/objectpermissionsapp/backends.py | 76 | 1632 | # -*- coding: utf-8 -*-
from django.db.models import Model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
class ObjectPermissionBackend(object):
def has_perm(self, user_obj, perm, obj=None):
if user_obj and user_obj.is_superuser:
return True
elif obj is None or not isinstance(obj, Model) or \
not user_obj.is_authenticated() or not user_obj.is_active:
return False
if len(perm.split('.')) > 1:
app_label, perm = perm.split('.')
if app_label != obj._meta.app_label:
raise Exception("Passed perm has app label of '%s' and "
"given obj has '%s'" % (app_label, obj._meta.app_label))
perm = perm.split('.')[-1]
return perm in self.get_perms(user_obj, obj)
def get_perms(self, user_obj, obj):
"""
Returns list of ``codename``'s of all permissions for given ``obj``.
"""
from cms.test_utils.project.objectpermissionsapp.models import UserObjectPermission
ctype = ContentType.objects.get_for_model(obj)
related_name = UserObjectPermission.permission.field.related_query_name()
user_filters = {
'%s__user' % related_name: user_obj,
'%s__content_type' % related_name: ctype,
'%s__object_pk' % related_name: obj.pk,
}
return Permission.objects.filter(content_type=ctype) \
.filter(**user_filters) \
.values_list("codename", flat=True)
def authenticate(self):
return True
| bsd-3-clause |
akrherz/iem | htdocs/plotting/auto/scripts/p8.py | 1 | 3318 | """ Monthly precip reliability"""
import calendar
import datetime
import psycopg2.extras
import numpy as np
import pandas as pd
from pyiem import network
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
y2 = datetime.date.today().year
y1 = y2 - 20
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0200",
label="Select Station:",
network="IACLIMATE",
),
dict(type="year", name="syear", default=y1, label="Enter Start Year:"),
dict(
type="year",
name="eyear",
default=y2,
label="Enter End Year (inclusive):",
),
dict(
type="int",
name="threshold",
default="80",
label="Threshold Percentage [%]:",
),
]
desc["data"] = True
desc[
"description"
] = """This plot presents the frequency of having
a month's preciptation at or above some threshold. This threshold
is compared against the long term climatology for the site and month. This
plot is designed to answer the question about reliability of monthly
precipitation for a period of your choice. """
return desc
def plotter(fdict):
""" Go """
coop = get_dbconn("coop")
cursor = coop.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
syear = ctx["syear"]
eyear = ctx["eyear"]
threshold = ctx["threshold"]
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
cursor.execute(
f"""
with months as (
select year, month, p, avg(p) OVER (PARTITION by month) from (
select year, month, sum(precip) as p from {table}
where station = %s and year < extract(year from now())
GROUP by year, month) as foo)
SELECT month, sum(case when p > (avg * %s / 100.0) then 1 else 0 end)
from months WHERE year >= %s and year < %s
GROUP by month ORDER by month ASC
""",
(station, threshold, syear, eyear),
)
vals = []
years = float(1 + eyear - syear)
for row in cursor:
vals.append(row[1] / years * 100.0)
if not vals:
raise NoDataFound("No Data Found!")
df = pd.DataFrame(
dict(freq=pd.Series(vals, index=range(1, 13))),
index=pd.Series(range(1, 13), name="month"),
)
(fig, ax) = plt.subplots(1, 1)
ax.bar(np.arange(1, 13), vals, align="center")
ax.set_xticks(np.arange(1, 13))
ax.set_ylim(0, 100)
ax.set_yticks(np.arange(0, 101, 10))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.grid(True)
ax.set_xlim(0.5, 12.5)
ax.set_ylabel("Percentage of Months, n=%.0f years" % (years,))
ax.set_title(
(
"%s [%s] Monthly Precipitation Reliability\n"
"Period: %s-%s, %% of Months above %s%% of Long Term Avg"
)
% (nt.sts[station]["name"], station, syear, eyear, threshold)
)
return fig, df
if __name__ == "__main__":
plotter(dict())
| mit |
Sciumo/PDAL | vendor/gtest-1.7.0/test/gtest_xml_outfiles_test.py | 2526 | 5340 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| bsd-3-clause |
viggates/nova | nova/tests/api/openstack/compute/contrib/test_consoles.py | 4 | 13601 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_vnc_console(self, _context, _instance, _console_type):
return {'url': 'http://fake'}
def fake_get_spice_console(self, _context, _instance, _console_type):
return {'url': 'http://fake'}
def fake_get_rdp_console(self, _context, _instance, _console_type):
return {'url': 'http://fake'}
def fake_get_vnc_console_invalid_type(self, _context,
_instance, _console_type):
raise exception.ConsoleTypeInvalid(console_type=_console_type)
def fake_get_spice_console_invalid_type(self, _context,
_instance, _console_type):
raise exception.ConsoleTypeInvalid(console_type=_console_type)
def fake_get_rdp_console_invalid_type(self, _context,
_instance, _console_type):
raise exception.ConsoleTypeInvalid(console_type=_console_type)
def fake_get_vnc_console_not_ready(self, _context, instance, _console_type):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_get_spice_console_not_ready(self, _context, instance, _console_type):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_get_rdp_console_not_ready(self, _context, instance, _console_type):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_get_vnc_console_not_found(self, _context, instance, _console_type):
raise exception.InstanceNotFound(instance_id=instance["uuid"])
def fake_get_spice_console_not_found(self, _context, instance, _console_type):
raise exception.InstanceNotFound(instance_id=instance["uuid"])
def fake_get_rdp_console_not_found(self, _context, instance, _console_type):
raise exception.InstanceNotFound(instance_id=instance["uuid"])
def fake_get(self, context, instance_uuid, want_objects=False):
return {'uuid': instance_uuid}
def fake_get_not_found(self, context, instance_uuid, want_objects=False):
raise exception.InstanceNotFound(instance_id=instance_uuid)
class ConsolesExtensionTest(test.NoDBTestCase):
def setUp(self):
super(ConsolesExtensionTest, self).setUp()
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console)
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console)
self.stubs.Set(compute_api.API, 'get_rdp_console',
fake_get_rdp_console)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Consoles'])
self.app = fakes.wsgi_app(init_only=('servers',))
def test_get_vnc_console(self):
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
output = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(output,
{u'console': {u'url': u'http://fake', u'type': u'novnc'}})
def test_get_vnc_console_not_ready(self):
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_not_ready)
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
jsonutils.loads(res.body)
self.assertEqual(res.status_int, 409)
def test_get_vnc_console_no_type(self):
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_invalid_type)
body = {'os-getVNCConsole': {}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_vnc_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_vnc_console_no_instance_on_console_get(self):
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_not_found)
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_invalid_type)
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_vnc_console_not_implemented(self):
self.stubs.Set(compute_api.API, 'get_vnc_console',
fakes.fake_not_implemented)
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 501)
def test_get_spice_console(self):
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
output = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(output,
{u'console': {u'url': u'http://fake', u'type': u'spice-html5'}})
def test_get_spice_console_not_ready(self):
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_not_ready)
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
jsonutils.loads(res.body)
self.assertEqual(res.status_int, 409)
def test_get_spice_console_no_type(self):
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_invalid_type)
body = {'os-getSPICEConsole': {}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_spice_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_spice_console_no_instance_on_console_get(self):
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_not_found)
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_spice_console_invalid_type(self):
body = {'os-getSPICEConsole': {'type': 'invalid'}}
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_invalid_type)
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_spice_console_not_implemented(self):
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
self.stubs.Set(compute_api.API, 'get_spice_console',
fakes.fake_not_implemented)
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 501)
def test_get_rdp_console(self):
body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
output = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(output,
{u'console': {u'url': u'http://fake', u'type': u'rdp-html5'}})
def test_get_rdp_console_not_ready(self):
self.stubs.Set(compute_api.API, 'get_rdp_console',
fake_get_rdp_console_not_ready)
body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
jsonutils.loads(res.body)
self.assertEqual(res.status_int, 409)
def test_get_rdp_console_no_type(self):
self.stubs.Set(compute_api.API, 'get_rdp_console',
fake_get_rdp_console_invalid_type)
body = {'os-getRDPConsole': {}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_rdp_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_rdp_console_no_instance_on_console_get(self):
self.stubs.Set(compute_api.API, 'get_rdp_console',
fake_get_rdp_console_not_found)
body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_rdp_console_invalid_type(self):
body = {'os-getRDPConsole': {'type': 'invalid'}}
self.stubs.Set(compute_api.API, 'get_rdp_console',
fake_get_rdp_console_invalid_type)
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
| apache-2.0 |
sestrella/ansible | test/units/modules/source_control/gitlab/test_gitlab_user.py | 13 | 6080 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import pytest
from ansible.modules.source_control.gitlab.gitlab_user import GitLabUser
def _dummy(x):
"""Dummy function. Only used as a placeholder for toplevel definitions when the test is going
to be skipped anyway"""
return x
pytestmark = []
try:
from .gitlab import (GitlabModuleTestCase,
python_version_match_requirement,
resp_find_user, resp_get_user, resp_get_user_keys,
resp_create_user_keys, resp_create_user, resp_delete_user,
resp_get_member, resp_get_group, resp_add_member,
resp_update_member, resp_get_member)
# GitLab module requirements
if python_version_match_requirement():
from gitlab.v4.objects import User
except ImportError:
pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
# Need to set these to something so that we don't fail when parsing
GitlabModuleTestCase = object
resp_find_user = _dummy
resp_get_user = _dummy
resp_get_user_keys = _dummy
resp_create_user_keys = _dummy
resp_create_user = _dummy
resp_delete_user = _dummy
resp_get_member = _dummy
resp_get_group = _dummy
resp_add_member = _dummy
resp_update_member = _dummy
resp_get_member = _dummy
# Unit tests requirements
try:
from httmock import with_httmock # noqa
except ImportError:
pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
with_httmock = _dummy
class TestGitlabUser(GitlabModuleTestCase):
def setUp(self):
super(TestGitlabUser, self).setUp()
self.moduleUtil = GitLabUser(module=self.mock_module, gitlab_instance=self.gitlab_instance)
@with_httmock(resp_find_user)
def test_exist_user(self):
rvalue = self.moduleUtil.existsUser("john_smith")
self.assertEqual(rvalue, True)
rvalue = self.moduleUtil.existsUser("paul_smith")
self.assertEqual(rvalue, False)
@with_httmock(resp_find_user)
def test_find_user(self):
user = self.moduleUtil.findUser("john_smith")
self.assertEqual(type(user), User)
self.assertEqual(user.name, "John Smith")
self.assertEqual(user.id, 1)
@with_httmock(resp_create_user)
def test_create_user(self):
user = self.moduleUtil.createUser({'email': 'john@example.com', 'password': 's3cur3s3cr3T',
'username': 'john_smith', 'name': 'John Smith'})
self.assertEqual(type(user), User)
self.assertEqual(user.name, "John Smith")
self.assertEqual(user.id, 1)
@with_httmock(resp_get_user)
def test_update_user(self):
user = self.gitlab_instance.users.get(1)
changed, newUser = self.moduleUtil.updateUser(user, {'name': "Jack Smith", "is_admin": "true"})
self.assertEqual(changed, True)
self.assertEqual(newUser.name, "Jack Smith")
self.assertEqual(newUser.is_admin, "true")
changed, newUser = self.moduleUtil.updateUser(user, {'name': "Jack Smith"})
self.assertEqual(changed, False)
@with_httmock(resp_find_user)
@with_httmock(resp_delete_user)
def test_delete_user(self):
self.moduleUtil.existsUser("john_smith")
rvalue = self.moduleUtil.deleteUser()
self.assertEqual(rvalue, None)
@with_httmock(resp_get_user)
@with_httmock(resp_get_user_keys)
def test_sshkey_exist(self):
user = self.gitlab_instance.users.get(1)
exist = self.moduleUtil.sshKeyExists(user, "Public key")
self.assertEqual(exist, True)
notExist = self.moduleUtil.sshKeyExists(user, "Private key")
self.assertEqual(notExist, False)
@with_httmock(resp_get_user)
@with_httmock(resp_create_user_keys)
@with_httmock(resp_get_user_keys)
def test_create_sshkey(self):
user = self.gitlab_instance.users.get(1)
rvalue = self.moduleUtil.addSshKeyToUser(user, {
'name': "Public key",
'file': "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJe"
"jgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4"
"soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="})
self.assertEqual(rvalue, False)
rvalue = self.moduleUtil.addSshKeyToUser(user, {
'name': "Private key",
'file': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcU"
"dRuSuA5zszUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+"
"xawxKWmI7hJ5S0tOv6MJ+IxyTa4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2j"
"TiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH2WOKBw6za0az6XoG75obUdFVdW3qcD0x"
"c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF"})
self.assertEqual(rvalue, True)
@with_httmock(resp_get_group)
@with_httmock(resp_get_member)
def test_find_member(self):
group = self.gitlab_instance.groups.get(1)
user = self.moduleUtil.findMember(group, 1)
self.assertEqual(user.username, "raymond_smith")
@with_httmock(resp_get_user)
@with_httmock(resp_get_group)
@with_httmock(resp_get_group)
@with_httmock(resp_get_member)
@with_httmock(resp_add_member)
@with_httmock(resp_update_member)
def test_assign_user_to_group(self):
group = self.gitlab_instance.groups.get(1)
user = self.gitlab_instance.users.get(1)
rvalue = self.moduleUtil.assignUserToGroup(user, group.id, "developer")
self.assertEqual(rvalue, False)
rvalue = self.moduleUtil.assignUserToGroup(user, group.id, "guest")
self.assertEqual(rvalue, True)
| gpl-3.0 |
jkburges/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py | 124 | 2557 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import unittest2 as unittest
from webkitpy.common.watchlist.filenamepattern import FilenamePattern
class FileNamePatternTest(unittest.TestCase):
def test_filename_pattern_literal(self):
filename_pattern = FilenamePattern(re.compile(r'MyFileName\.cpp'))
# Note the follow filenames are not regex.
self.assertTrue(filename_pattern.match('MyFileName.cpp', None))
self.assertTrue(filename_pattern.match('MyFileName.cppa', None))
self.assertFalse(filename_pattern.match('aMyFileName.cpp', None))
self.assertFalse(filename_pattern.match('MyFileNamebcpp', None))
def test_filename_pattern_substring(self):
filename_pattern = FilenamePattern(re.compile(r'.*\\MyFileName\..*'))
# Note the follow filenames are not regex.
self.assertTrue(filename_pattern.match(r'\\MyFileName.cpp', None))
self.assertTrue(filename_pattern.match(r'a\\MyFileName.h', None))
self.assertFalse(filename_pattern.match(r'\\aMyFileName.cpp', None))
| bsd-3-clause |
yodalee/servo | tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-style-property-tests.py | 841 | 3434 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-style-property-011 ~ 020 which
cover all possible values of text-emphasis-style property, except none
and <string>, with horizontal writing mode. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-style-property-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis-style: {title}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-style-property">
<meta name="assert" content="'text-emphasis-style: {value}' produces {code} as emphasis marks.">
<link rel="match" href="text-emphasis-style-property-{index:03}-ref.html">
<p>Pass if there is a '{char}' above every character below:</p>
<div style="line-height: 5; text-emphasis-style: {value}">試験テスト</div>
'''
REF_FILE = 'text-emphasis-style-property-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis-style: {0}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if there is a '{1}' above every character below:</p>
<div style="line-height: 5;"><ruby>試<rt>{1}</rt>験<rt>{1}</rt>テ<rt>{1}</rt>ス<rt>{1}</rt>ト<rt>{1}</rt></ruby></div>
'''
DATA_SET = [
('dot', 0x2022, 0x25e6),
('circle', 0x25cf, 0x25cb),
('double-circle', 0x25c9, 0x25ce),
('triangle', 0x25b2, 0x25b3),
('sesame', 0xfe45, 0xfe46),
]
SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e']
def get_html_entity(code):
return '&#x{:04X};'.format(code)
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
def write_test_file(idx, suffix, style, code, name=None):
if not name:
name = style
filename = TEST_FILE.format(idx, suffix)
write_file(filename, TEST_TEMPLATE.format(index=idx, value=style,
char=get_html_entity(code),
code='U+{:04X}'.format(code),
title=name))
print("== {} {}".format(filename, REF_FILE.format(idx)))
idx = 10
def write_files(style, code):
global idx
idx += 1
fill, shape = style
basic_style = "{} {}".format(fill, shape)
write_file(REF_FILE.format(idx),
REF_TEMPLATE.format(basic_style, get_html_entity(code)))
suffix = iter(SUFFIXES)
write_test_file(idx, next(suffix), basic_style, code)
write_test_file(idx, next(suffix), "{} {}".format(shape, fill), code)
if fill == 'filled':
write_test_file(idx, next(suffix), shape, code)
if shape == 'circle':
write_test_file(idx, next(suffix), fill, code, fill + ', horizontal')
print("# START tests from {}".format(__file__))
for name, code, _ in DATA_SET:
write_files(('filled', name), code)
for name, _, code in DATA_SET:
write_files(('open', name), code)
print("# END tests from {}".format(__file__))
| mpl-2.0 |
Sodki/ansible | test/runner/lib/changes.py | 11 | 5897 | """Detect changes in Ansible code."""
from __future__ import absolute_import, print_function
import re
import os
from lib.util import (
ApplicationError,
SubprocessError,
MissingEnvironmentVariable,
CommonConfig,
display,
)
from lib.http import (
HttpClient,
urlencode,
)
from lib.git import (
Git,
)
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason):
"""
:type branch: str
:type reason: str
"""
message = 'Invalid branch: %s\n%s' % (branch, reason)
super(InvalidBranch, self).__init__(message)
self.branch = branch
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
def __init__(self, message):
"""
:type message: str
"""
super(ChangeDetectionNotSupported, self).__init__(message)
class ShippableChanges(object):
"""Change information for Shippable build."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(git.get_diff_names(['origin/%s' % self.branch, '--']))
self.diff = git.get_diff(['origin/%s' % self.branch, '--'])
else:
merge_runs = self.get_merge_runs(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(git, merge_runs)
if last_successful_commit:
self.paths = sorted(git.get_diff_names([last_successful_commit, self.commit]))
self.diff = git.get_diff([last_successful_commit, self.commit])
else:
# tracked files (including unchanged)
self.paths = sorted(git.get_file_names(['--cached']))
self.diff = []
def get_merge_runs(self, project_id, branch):
"""
:type project_id: str
:type branch: str
:rtype: list[dict]
"""
params = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
client = HttpClient(self.args, always=True)
response = client.get('https://api.shippable.com/runs?%s' % urlencode(params))
return response.json()
@staticmethod
def get_last_successful_commit(git, merge_runs):
"""
:type git: Git
:type merge_runs: dict | list[dict]
:rtype: str
"""
if 'id' in merge_runs and merge_runs['id'] == 4004:
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return None
merge_runs = sorted(merge_runs, key=lambda r: r['createdAt'])
known_commits = set()
last_successful_commit = None
for merge_run in merge_runs:
commit_sha = merge_run['commitSha']
if commit_sha not in known_commits:
known_commits.add(commit_sha)
if merge_run['statusCode'] == 30:
if git.is_valid_ref(commit_sha):
last_successful_commit = commit_sha
return last_successful_commit
class LocalChanges(object):
"""Change information for local work."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
self.current_branch = git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = git.get_diff([self.fork_point])
@staticmethod
def is_official_branch(name):
"""
:type name: str
:rtype: bool
"""
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| gpl-3.0 |
danielkza/dnf | dnf/cli/commands/erase.py | 2 | 2884 | # erase_command.py
# Erase CLI command.
#
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from .. import commands
from dnf.i18n import _
import dnf.exceptions
import logging
logger = logging.getLogger("dnf")
class EraseCommand(commands.Command):
"""Erase command."""
aliases = ('erase', 'remove')
summary = _("Remove a package or packages from your system")
usage = "%s..." % _('PACKAGE')
def configure(self, _):
demands = self.cli.demands
demands.allow_erasing = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
demands.sack_activation = True
def doCheck(self, basecmd, extcmds):
"""Verify that conditions are met so that this command can
run. These include that the program is being run by the root
user, and that this command is called with appropriate
arguments.
:param basecmd: the name of the command
:param extcmds: the command line arguments passed to *basecmd*
"""
commands.checkPackageArg(self.cli, basecmd, extcmds)
def run(self, extcmds):
pkg_specs, grp_specs, filenames = commands.parse_spec_group_file(
extcmds)
pkg_specs += filenames # local pkgs not supported in erase command
done = False
# Remove groups.
if grp_specs:
self.base.read_comps()
if self.base.env_group_remove(grp_specs):
done = True
for pkg_spec in pkg_specs:
try:
self.base.remove(pkg_spec)
except dnf.exceptions.MarkingError:
logger.info(_('No match for argument: %s'),
pkg_spec)
else:
done = True
if not done:
raise dnf.exceptions.Error(_('No packages marked for removal.'))
| gpl-2.0 |
schechter/pykoans | python3/koans/about_scoring_project.py | 107 | 2207 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
def score(dice):
# You need to write this method
pass
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2])) | mit |
smunaut/gnuradio | gr-analog/python/analog/qa_dpll.py | 10 | 2027 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks
class test_dpll_bb(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_dpll_bb_001(self):
# Test set/gets
period = 1.0
gain = 0.1
op = analog.dpll_bb(period, gain)
op.set_gain(0.2)
g = op.gain()
self.assertAlmostEqual(g, 0.2)
f = op.freq()
self.assertEqual(1/period, f)
d0 = 1.0 - 0.5*f;
d1 = op.decision_threshold()
self.assertAlmostEqual(d0, d1)
p = op.phase()
self.assertEqual(0, p)
def test_dpll_bb_002(self):
period = 4
gain = 0.1
src_data = 10*((period-1)*[0,] + [1,])
expected_result = src_data
src = blocks.vector_source_b(src_data)
op = analog.dpll_bb(period, gain)
dst = blocks.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_dpll_bb, "test_dpll_bb.xml")
| gpl-3.0 |
feliperfranca/django-nonrel-example | django/utils/simplejson/decoder.py | 409 | 11952 | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from django.utils.simplejson.scanner import make_scanner
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
raise ValueError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| bsd-3-clause |
onecloud/neutron | neutron/plugins/hyperv/model.py | 8 | 2073 | # Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from neutron.db import model_base
class VlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'hyperv_vlan_allocations'
physical_network = Column(String(64), nullable=False, primary_key=True)
vlan_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
class NetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'hyperv_network_bindings'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = Column(String(32), nullable=False)
physical_network = Column(String(64))
segmentation_id = Column(Integer)
def __init__(self, network_id, network_type, physical_network,
segmentation_id):
self.network_id = network_id
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
| apache-2.0 |
tumluliu/mmspa | demo/python/benchmark.py | 1 | 2555 | #!/usr/bin/env python
import sys
import time
from termcolor import colored
from pymmspa4pg import *
source_list = []
target_list = []
def load_routing_options(
sources_file_path,
targets_file_path,
options_file_path):
with open(sources_file_path) as sources_file:
source_list = [source.strip() for source in sources_file.readlines()]
with open(targets_file_path) as targets_file:
target_list = [target.strip() for target in targets_file.readlines()]
options = read_routing_options(options_file_path)
def read_routing_options(options_file_path):
return options_file_path
def do_benchmarking(sources, targets, options):
print "Connecting to database... ",
if connect_db("dbname = 'mmrp_munich' user = 'liulu' password = 'workhard'") != 0:
print colored("failed!", "red")
exit()
print colored("done!", "green")
print "Preparing routing plan... ",
#create_routing_plan(1, 1)
create_routing_plan(1, 0)
#set_mode(0, 1900)
set_mode(0, 1002)
#set_public_transit_mode(0, 1003)
set_cost_factor("speed")
set_target_constraint(None)
print colored("done!", "green")
print "Loading multimodal transportation networks in Munich... ",
t1 = time.time()
if parse() != 0:
print colored("failed!", "red")
exit()
t2 = time.time()
print colored("done!", "green")
print "Time consumed: ",
print colored(str(t2 - t1), "red"),
print " seconds"
print "Test for randomly selected " + str(len(sources)) + " source vertices"
print "Routing plan: by foot"
#print "Routing plan: by underground trains"
print "Start benchmarking multimodal path calculation by MultimodalTwoQ... ",
t1 = time.time()
for s in sources:
multimodal_twoq(long(s))
t2 = time.time()
print colored("done!", "green")
print "Average calculation time: ",
print colored(str((t2 - t1) / len(sources)), "red"),
print " seconds"
print "Post processing... ",
dispose()
print colored("done!", "green")
print "Disconnecting database... ",
disconnect_db()
print colored("done!", "green")
if __name__ == "__main__":
with open(sys.argv[1]) as f_sources:
source_list = [source.strip() for source in f_sources.readlines()]
with open(sys.argv[2]) as f_targets:
target_list = [target.strip() for target in f_targets.readlines()]
#options = read_routing_options(options_file_path)
options = ""
do_benchmarking(source_list, target_list, options)
| mit |
tianx/mysql-5.6 | xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/bug606981_test.py | 19 | 5429 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
import tarfile
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup paths
for del_path in [backup_path]:
if os.path.exists(del_path):
shutil.rmtree(del_path)
def test_ib_stream(self):
self.servers = servers
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
tar_file_path = os.path.join(backup_path,'out.tar')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
# Add desired option to config file
config_file = open(master_server.cnf_file,'a')
config_file.write("innodb_flush_method=O_DIRECT\n")
config_file.close()
# take a backup
try:
os.mkdir(backup_path)
except OSError:
pass
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--stream=tar"
, "--user=root"
, "--port=%d" %master_server.master_port
, "--host=127.0.0.1"
, "--no-timestamp"
, "--ibbackup=%s" %xtrabackup
, "%s > %s" %(backup_path,tar_file_path)
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
expected_output = "xtrabackup: using O_DIRECT"
self.assertTrue(expected_output in output, msg=output)
# stop the server
master_server.stop()
# extract our backup tarball
cmd = "tar -ivxf %s" %tar_file_path
retcode, output = self.execute_cmd(cmd, output_path, backup_path, True)
self.assertEqual(retcode,0,output)
# Check for Bug 723318 - seems quicker than separate test case
self.assertTrue('xtrabackup_binary' in os.listdir(backup_path)
, msg = "Bug723318: xtrabackup_binary not included in tar archive when streaming")
# do prepare on backup
cmd = [ innobackupex
, "--apply-log"
, "--no-timestamp"
, "--use-memory=500M"
, "--ibbackup=%s" %xtrabackup
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0,output)
# remove old datadir
shutil.rmtree(master_server.datadir)
os.mkdir(master_server.datadir)
# restore from backup
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--copy-back"
, "--ibbackup=%s" %(xtrabackup)
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0, output)
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertEqual(master_server.status,1, 'Server failed restart from restored datadir...')
# Check the server is ok
query = "SELECT COUNT(*) FROM test.DD"
expected_output = ((100L,),)
retcode, output = self.execute_query(query, master_server)
self.assertEqual(output, expected_output, msg = "%s || %s" %(output, expected_output))
| gpl-2.0 |
chutzimir/kedpm | kedpm/plugins/pdb_figaro.py | 2 | 13465 | # Copyright (C) 2003-2005 Andrey Lebedev <andrey@micro.lt>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: pdb_figaro.py,v 1.21 2006/09/06 03:29:41 gyepi Exp $
""" Figaro password manager database plugin """
import os
from xml.dom import minidom
from string import strip
from Crypto.Cipher import Blowfish
from Crypto.Hash import MD5
from random import randint
from kedpm.exceptions import WrongPassword
from kedpm.passdb import PasswordDatabase, DatabaseNotExist
from kedpm.password_tree import PasswordTree
from kedpm.password import Password, TYPE_STRING, TYPE_TEXT, TYPE_PASSWORD
FPM_PASSWORD_LEN = 24
class FigaroPasswordTooLongError(ValueError):
pass
class FigaroPassword (Password):
fields_type_info = [
('title', {'title': 'Title', 'type': TYPE_STRING}),
('user', {'title': 'Username', 'type': TYPE_STRING}),
('password', {'title': 'Password', 'type': TYPE_PASSWORD}),
('url', {'title': 'URL', 'type': TYPE_STRING}),
('notes', {'title': 'Notes', 'type': TYPE_TEXT}),
]
default = 0
store_long_password = 0
launcher = ""
def __init__(self, **kw):
Password.__init__(self, **kw)
self.default = kw.get('default', 0)
self.launcher = kw.get('launcher', '')
def __setitem__(self, key, value):
if key=='password' and len(value) > FPM_PASSWORD_LEN and not self.store_long_password:
raise FigaroPasswordTooLongError, "Password is too long"
Password.__setitem__(self, key, value)
class PDBFigaro (PasswordDatabase):
default_db_filename = os.path.join(os.path.expanduser("~"),
'.fpm', 'fpm')
launcherlist = None
filename = None
native = 0
#default_db_filename = 'test/fpm.sample'
# default versions
FULL_VERSION = "00.53.00"
DISPLAY_VERSION = "0.53"
MIN_VERSION = "00.50.00"
def __init__(self, **args):
self._pass_tree = PasswordTree()
if args.has_key('filename'):
self.default_db_filename = args['filename']
def open(self, password, fname=""):
""" Open figaro password database and construct password tree """
self._password = password
self.filename = fname or self.default_db_filename
# Check existance of database file
if not os.access(self.filename, os.F_OK):
raise DatabaseNotExist, 'File %s is not found' % self.filename
fpm = minidom.parse(self.filename)
generator = fpm.documentElement.getAttribute('generator')
if generator.startswith('kedpm'):
self.native=1
self.convDomToTree(fpm)
def changePassword(self, password):
"""Change password for database.
Database will be saved and reopened with new password."""
self._password = password
self.save()
self.open(password, fname = self.filename)
def convDomToTree(self, fpm):
'Read figaro xml database and create password tree from it'
root = fpm.documentElement
# Save version information
self.FULL_VERSION = root.getAttribute('full_version')
self.MIN_VERSION = root.getAttribute('min_version')
self.DISPLAY_VERSION = root.getAttribute('display_version')
# Support long passwords of fpm-0.58
if self.MIN_VERSION >="00.58.00":
global FPM_PASSWORD_LEN
FPM_PASSWORD_LEN = 256
keyinfo = fpm.documentElement.getElementsByTagName("KeyInfo")[0]
self._salt = keyinfo.getAttribute('salt')
vstring = keyinfo.getAttribute('vstring')
if self.decrypt(vstring) != "FIGARO":
raise WrongPassword, "Wrong password"
# Save LauncherList xml element. Although kedpm don't use launchers
# yet, this list will be inserted into saved database to preserve
# compatibility with fpm.
nodes = fpm.documentElement.getElementsByTagName("LauncherList")
if nodes:
assert len(nodes) == 1
self.launcherlist = nodes[0]
nodes = fpm.documentElement.getElementsByTagName("PasswordItem")
for node in nodes:
category = self._getTagData(node, "category")
if category=="":
branch = self._pass_tree
else:
branch = self._pass_tree
path = category.split('/')
for pelem in path:
subbranch = branch.get(pelem)
if not subbranch:
branch = branch.addBranch(pelem)
else:
branch = subbranch
branch.addNode(self._getPasswordFromNode(node))
def save(self, fname=""):
"""Save figaro password database"""
# Create new salt for each save
self._salt = self.generateSalt()
doc = self.convTreeToDom()
filename = fname or self.filename or self.default_db_filename
f = open(filename, 'w')
f.write(doc.toxml())
f.close()
os.chmod(filename, 0600)
def generateSalt(self):
"""Generate salt, that consists of 8 small latin characters"""
salt = ""
for i in range(8):
salt += chr(randint(ord('a'), ord('z')))
return salt
def convTreeToDom(self):
"""Build and return DOM document from current password tree"""
domimpl = minidom.getDOMImplementation()
document= domimpl.createDocument("http://kedpm.sourceforge.net/xml/fpm", "FPM", None)
root = document.documentElement
root.setAttribute('full_version', self.FULL_VERSION)
root.setAttribute('min_version', self.MIN_VERSION)
root.setAttribute('display_version', self.DISPLAY_VERSION)
root.setAttribute('generator', 'kedpm')
# KeyInfo tag
keyinfo = document.createElement('KeyInfo')
keyinfo.setAttribute('salt', self._salt)
keyinfo.setAttribute('vstring', self.encrypt('FIGARO'))
root.appendChild(keyinfo)
# Add LauncherList for fpm compatibility
if self.launcherlist:
root.appendChild(self.launcherlist)
# PasswordList tag
passwordlist = document.createElement('PasswordList')
props = ['title', 'user', 'url', 'notes']
iter = self._pass_tree.getIterator()
while 1:
pwd = iter.next()
if pwd is None:
break
pwitem = document.createElement('PasswordItem')
for prop in props:
pr_node_text = document.createTextNode(self.encrypt(pwd[prop]))
pr_node = document.createElement(prop)
pr_node.appendChild(pr_node_text)
pwitem.appendChild(pr_node)
password = document.createElement('password')
text = document.createTextNode(self.encrypt(pwd['password'], 1))
password.appendChild(text)
pwitem.appendChild(password)
category = document.createElement('category')
text = document.createTextNode(self.encrypt('/'.join(iter.getCurrentCategory())))
category.appendChild(text)
pwitem.appendChild(category)
# Following launcher and default tags for fpm compatibility
launcher = document.createElement('launcher')
text = document.createTextNode(self.encrypt(pwd.launcher))
launcher.appendChild(text)
pwitem.appendChild(launcher)
if pwd.default:
pwitem.appendChild(document.createElement('default'))
passwordlist.appendChild(pwitem)
root.appendChild(passwordlist)
return document
def create(self, password, fname=""):
filename = fname or self.default_db_filename
dirname, fname = os.path.split(filename)
if not os.access(dirname, os.F_OK):
print "Creating directory %s" % dirname
os.mkdir(dirname, 0700)
newdb = PDBFigaro()
newdb._password = password
newdb.save(filename)
def _getPasswordFromNode(self, node):
""" Create password instance from given fpm node """
fields = ["title", "user", "url", "notes", "password"]
params = {}
for field in fields:
params[field] = self._getTagData(node, field)
# save default and launcher fields for fpm compatibility
chnode = node.getElementsByTagName('default')
if len(chnode)==1:
params['default'] = 1
params['launcher'] = self._getTagData(node, 'launcher')
return FigaroPassword(**params)
def _getTagData(self, node, tag):
chnode = node.getElementsByTagName(tag)
if chnode and node.hasChildNodes():
datanode= chnode.pop()
encrypted = ""
# datanode can have more than one text chunk
for child in datanode.childNodes:
encrypted += child.data
assert len(encrypted) % 8 == 0
return self.decrypt(encrypted)
else: return ""
def encrypt(self, field, field_is_password=0):
""" Encrypt FPM encoded field """
hash=MD5.new()
hash.update(self._salt + self._password)
key = hash.digest()
bf = Blowfish.new(key)
# Allow passwords that are longer than 24 characters. Unfortunately
# this will break fpm compatibility somewhat - fpm will not be able to
# handle such long password correctly.
noised = self._addNoise(field, field_is_password and
(len(field) / FPM_PASSWORD_LEN + 1) * FPM_PASSWORD_LEN)
rotated = self._rotate(noised)
encrypted = bf.encrypt(rotated)
hexstr = self._bin_to_hex(encrypted)
return hexstr
def decrypt(self, field):
""" Decrypt FPM encoded field """
hash=MD5.new()
hash.update(self._salt + self._password)
key = hash.digest()
bf = Blowfish.new(key)
binstr = self._hex_to_bin(field)
rotated = bf.decrypt(binstr)
plaintext = self._unrotate(rotated)
return plaintext
def _bin_to_hex(self, strin):
"""Used in encrypt"""
strout = ""
for i in range(len(strin)):
data = strin[i]
high = ord(data) / 16
low = ord(data) % 16
strout += chr(ord('a')+high) + chr(ord('a')+low)
assert (2*len(strin) == len(strout))
return strout
def _hex_to_bin(self, strin):
"""Used in decrypt"""
strout = ""
for i in range(len(strin) / 2):
high = ord(strin[i * 2]) - ord('a')
low = ord(strin[i * 2 + 1]) - ord('a')
data = high * 16 + low
assert data < 256
strout = strout + chr(data)
return strout
def _addNoise(self, field, reslen = 0):
"""If we have a short string, I add noise after the first null prior to
encrypting. This prevents empty blocks from looking identical to
eachother in the encrypted file."""
block_size = Blowfish.block_size
field += '\x00'
reslen = reslen or (len(field) / block_size + 1) * block_size
while len(field) < reslen:
rchar = chr(randint(0, 255))
field += rchar
return field
def _rotate(self, field):
"""After we use _addNoise (above) we ensure blocks don't look identical
unless all 8 chars in the block are part of the password. This routine
makes us use all three blocks equally rather than fill the first, then
the second, etc. This makes it so none of the blocks in the password
will remain constant from save to save, even if the password is from
7-20 characters long. Note that passwords from 21-24 characters start
to fill blocks, and so will be constant. """
plaintext = ""
tmp = {}
block_size = Blowfish.block_size
num_blocks = len(field)/block_size
for b in range(num_blocks):
for i in range(block_size):
tmp[b*block_size+i] = field[i*num_blocks+b]
for c in range(len(tmp)):
plaintext = plaintext + tmp[c]
return str(plaintext)
def _unrotate(self, field):
plaintext = ""
tmp = {}
block_size = Blowfish.block_size
num_blocks = len(field)/block_size
for b in range(num_blocks):
for i in range(block_size):
tmp[i*num_blocks+b] = field[b*block_size+i]
for c in range(len(tmp)):
if tmp[c] == chr(0):
break
plaintext = plaintext + tmp[c]
return str(plaintext)
| gpl-2.0 |
WoLpH/EventGhost | eg/Classes/UndoHandler/NewFolder.py | 1 | 2163 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
# Local imports
import eg
from NewItem import NewItem
class NewFolder(NewItem):
"""
Create a new FolderItem if the user has choosen to do so from the menu
or toolbar.
"""
name = eg.text.MainFrame.Menu.AddFolder.replace("&", "")
@eg.AssertInMainThread
@eg.LogIt
def Do(self, selection):
document = self.document
def ProcessInActionThread():
if isinstance(
selection,
(document.MacroItem, document.AutostartItem)
):
parent = selection.parent
pos = parent.childs.index(selection) + 1
if pos >= len(parent.childs):
pos = -1
elif isinstance(
selection,
(document.ActionItem, document.EventItem, document.PluginItem)
):
parent = selection.parent.parent
pos = parent.childs.index(selection.parent) + 1
if pos >= len(parent.childs):
pos = -1
else:
parent = selection
pos = -1
return document.FolderItem.Create(
parent,
pos,
name=eg.text.General.unnamedFolder
)
item = eg.actionThread.Func(ProcessInActionThread)()
self.StoreItem(item)
item.Select()
return item
| gpl-2.0 |
traveloka/ansible | lib/ansible/modules/source_control/github_key.py | 39 | 7753 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: github_key
short_description: Manage GitHub access keys.
description:
- Creates, removes, or updates GitHub access keys.
version_added: "2.2"
options:
token:
description:
- GitHub Access Token with permission to list and create public keys.
required: true
name:
description:
- SSH key name
required: true
pubkey:
description:
- SSH public key value. Required when C(state=present).
required: false
default: none
state:
description:
- Whether to remove a key, ensure that it exists, or update its value.
choices: ['present', 'absent']
default: 'present'
required: false
force:
description:
- The default is C(yes), which will replace the existing remote key
if it's different than C(pubkey). If C(no), the key will only be
set if no key with the given C(name) exists.
required: false
choices: ['yes', 'no']
default: 'yes'
author: Robert Estelle (@erydo)
'''
RETURN = '''
deleted_keys:
description: An array of key objects that were deleted. Only present on state=absent
type: list
returned: When state=absent
sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
matching_keys:
description: An array of keys matching the specified name. Only present on state=present
type: list
returned: When state=present
sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
key:
description: Metadata about the key just created. Only present on state=present
type: dict
returned: success
sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
'''
EXAMPLES = '''
- name: Read SSH public key to authorize
shell: cat /home/foo/.ssh/id_rsa.pub
register: ssh_pub_key
- name: Authorize key with GitHub
local_action:
module: github_key
name: Access Key for Some Machine
token: '{{ github_access_token }}'
pubkey: '{{ ssh_pub_key.stdout }}'
'''
import sys # noqa
import json
import re
API_BASE = 'https://api.github.com'
class GitHubResponse(object):
def __init__(self, response, info):
self.content = response.read()
self.info = info
def json(self):
return json.loads(self.content)
def links(self):
links = {}
if 'link' in self.info:
link_header = re.info['link']
matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
for url, rel in matches:
links[rel] = url
return links
class GitHubSession(object):
def __init__(self, module, token):
self.module = module
self.token = token
def request(self, method, url, data=None):
headers = {
'Authorization': 'token %s' % self.token,
'Content-Type': 'application/json',
'Accept': 'application/vnd.github.v3+json',
}
response, info = fetch_url(
self.module, url, method=method, data=data, headers=headers)
if not (200 <= info['status'] < 400):
self.module.fail_json(
msg=(" failed to send request %s to %s: %s"
% (method, url, info['msg'])))
return GitHubResponse(response, info)
def get_all_keys(session):
url = API_BASE + '/user/keys'
while url:
r = session.request('GET', url)
for key in r.json():
yield key
url = r.links().get('next')
def create_key(session, name, pubkey, check_mode):
if check_mode:
from datetime import datetime
now = datetime.utcnow()
return {
'id': 0,
'key': pubkey,
'title': name,
'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
'read_only': False,
'verified': False
}
else:
return session.request(
'POST',
API_BASE + '/user/keys',
data=json.dumps({'title': name, 'key': pubkey})).json()
def delete_keys(session, to_delete, check_mode):
if check_mode:
return
for key in to_delete:
session.request('DELETE', API_BASE + '/user/keys/%s' % key[id])
def ensure_key_absent(session, name, check_mode):
to_delete = [key for key in get_all_keys(session) if key['title'] == name]
delete_keys(session, to_delete, check_mode=check_mode)
return {'changed': bool(to_delete),
'deleted_keys': to_delete}
def ensure_key_present(session, name, pubkey, force, check_mode):
matching_keys = [k for k in get_all_keys(session) if k['title'] == name]
deleted_keys = []
if matching_keys and force and matching_keys[0]['key'] != pubkey:
delete_keys(session, matching_keys, check_mode=check_mode)
(deleted_keys, matching_keys) = (matching_keys, [])
if not matching_keys:
key = create_key(session, name, pubkey, check_mode=check_mode)
else:
key = matching_keys[0]
return {
'changed': bool(deleted_keys or not matching_keys),
'deleted_keys': deleted_keys,
'matching_keys': matching_keys,
'key': key
}
def main():
argument_spec = {
'token': {'required': True, 'no_log': True},
'name': {'required': True},
'pubkey': {},
'state': {'choices': ['present', 'absent'], 'default': 'present'},
'force': {'default': True, 'type': 'bool'},
}
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
token = module.params['token']
name = module.params['name']
state = module.params['state']
force = module.params['force']
pubkey = module.params.get('pubkey')
if pubkey:
pubkey_parts = pubkey.split(' ')
# Keys consist of a protocol, the key data, and an optional comment.
if len(pubkey_parts) < 2:
module.fail_json(msg='"pubkey" parameter has an invalid format')
# Strip out comment so we can compare to the keys GitHub returns.
pubkey = ' '.join(pubkey_parts[:2])
elif state == 'present':
module.fail_json(msg='"pubkey" is required when state=present')
session = GitHubSession(module, token)
if state == 'present':
result = ensure_key_present(session, name, pubkey, force=force,
check_mode=module.check_mode)
elif state == 'absent':
result = ensure_key_absent(session, name, check_mode=module.check_mode)
module.exit_json(**result)
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.urls import * # noqa
if __name__ == '__main__':
main()
| gpl-3.0 |
tarex/satellizer | examples/server/python/app.py | 1 | 9946 | from datetime import datetime, timedelta
import os
import jwt
import json
import requests
from functools import wraps
from urlparse import parse_qs, parse_qsl
from urllib import urlencode
from flask import Flask, g, send_file, request, redirect, url_for, jsonify
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from requests_oauthlib import OAuth1
# Configuration
current_path = os.path.dirname(__file__)
client_path = os.path.abspath(os.path.join(current_path, '..', '..', 'client'))
app = Flask(__name__, static_url_path='', static_folder=client_path)
app.config.from_object('config')
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String(120))
display_name = db.Column(db.String(120))
facebook = db.Column(db.String(120))
google = db.Column(db.String(120))
linkedin = db.Column(db.String(120))
twitter = db.Column(db.String(120))
def __init__(self, email=None, password=None, display_name=None,
facebook=None, google=None, linkedin=None, twitter=None):
if email:
self.email = email.lower()
if password:
self.set_password(password)
if display_name:
self.display_name = display_name
if facebook:
self.facebook = facebook
if google:
self.google = google
if linkedin:
self.linkedin = linkedin
if twitter:
self.twitter = twitter
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def to_json(self):
return dict(id=self.id, email=self.email, displayName=self.display_name,
facebook=self.facebook, google=self.google,
linkedin=self.linkedin, twitter=self.twitter)
db.create_all()
def create_jwt_token(user):
payload = {
'iss': 'localhost',
'sub': user.id,
'iat': datetime.now(),
'exp': datetime.now() + timedelta(days=14)
}
token = jwt.encode(payload, app.config['TOKEN_SECRET'])
return token.decode('unicode_escape')
def parse_token(req):
token = req.headers.get('Authorization').split()[1]
return jwt.decode(token, app.config['TOKEN_SECRET'])
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get('Authorization'):
response = jsonify(message='Missing authorization header')
response.status_code = 401
return response
payload = parse_token(request)
if datetime.fromtimestamp(payload['exp']) < datetime.now():
response = jsonify(message='Token has expired')
response.status_code = 401
return response
g.user_id = payload['sub']
return f(*args, **kwargs)
return decorated_function
# Routes
@app.route('/')
def index():
return send_file('../../client/index.html')
@app.route('/api/me')
@login_required
def me():
user = User.query.filter_by(id=g.user_id).first()
return jsonify(user.to_json())
@app.route('/auth/login', methods=['POST'])
def login():
user = User.query.filter_by(email=request.json['email']).first()
if not user or not user.check_password(request.json['password']):
response = jsonify(message='Wrong Email or Password')
response.status_code = 401
return response
token = create_jwt_token(user)
return jsonify(token=token)
@app.route('/auth/signup', methods=['POST'])
def signup():
user = User(email=request.json['email'], password=request.json['password'])
db.session.add(user)
db.session.commit()
token = create_jwt_token(user)
return jsonify(token=token)
@app.route('/auth/facebook', methods=['POST'])
def facebook():
access_token_url = 'https://graph.facebook.com/oauth/access_token'
graph_api_url = 'https://graph.facebook.com/me'
params = {
'client_id': request.json['clientId'],
'redirect_uri': request.json['redirectUri'],
'client_secret': app.config['FACEBOOK_SECRET'],
'code': request.json['code']
}
# Step 1. Exchange authorization code for access token.
r = requests.get(access_token_url, params=params)
access_token = dict(parse_qsl(r.text))
# Step 2. Retrieve information about the current user.
r = requests.get(graph_api_url, params=access_token)
profile = json.loads(r.text)
# Step 3. (optional) Link accounts.
if request.headers.get('Authorization'):
user = User.query.filter_by(facebook=profile['id']).first()
if user:
response = jsonify(message='There is already a Facebook account that belongs to you')
response.status_code = 409
return response
payload = parse_token(request)
user = User.query.filter_by(id=payload['sub']).first()
if not user:
response = jsonify(message='User not found')
response.status_code = 400
return response
u = User(facebook=profile['id'], display_name=profile['name'])
db.session.add(u)
db.session.commit()
token = create_jwt_token(u)
return jsonify(token=token)
# Step 4. Create a new account or return an existing one.
user = User.query.filter_by(facebook=profile['id']).first()
if user:
token = create_jwt_token(user)
return jsonify(token=token)
u = User(facebook=profile['id'], display_name=profile['name'])
db.session.add(u)
db.session.commit()
token = create_jwt_token(u)
return jsonify(token=token)
@app.route('/auth/google', methods=['POST'])
def google():
access_token_url = 'https://accounts.google.com/o/oauth2/token'
people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
payload = dict(client_id=request.json['clientId'],
redirect_uri=request.json['redirectUri'],
client_secret=app.config['GOOGLE_SECRET'],
code=request.json['code'],
grant_type='authorization_code')
# Step 1. Exchange authorization code for access token.
r = requests.post(access_token_url, data=payload)
token = json.loads(r.text)
headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}
# Step 2. Retrieve information about the current user.
r = requests.get(people_api_url, headers=headers)
profile = json.loads(r.text)
user = User.query.filter_by(google=profile['sub']).first()
if user:
token = create_jwt_token(user)
return jsonify(token=token)
u = User(google=profile['sub'],
display_name=profile['displayName'])
db.session.add(u)
db.session.commit()
token = create_jwt_token(u)
return jsonify(token=token)
@app.route('/auth/linkedin', methods=['POST'])
def linkedin():
access_token_url = 'https://www.linkedin.com/uas/oauth2/accessToken'
people_api_url = 'https://api.linkedin.com/v1/people/~:(id,first-name,last-name,email-address)'
payload = dict(client_id=request.json['clientId'],
redirect_uri=request.json['redirectUri'],
client_secret=app.config['LINKEDIN_SECRET'],
code=request.json['code'],
grant_type='authorization_code')
# Step 1. Exchange authorization code for access token.
r = requests.post(access_token_url, data=payload)
access_token = json.loads(r.text)
params = dict(oauth2_access_token=access_token['access_token'],
format='json')
# Step 2. Retrieve information about the current user.
r = requests.get(people_api_url, params=params)
profile = json.loads(r.text)
user = User.query.filter_by(linkedin=profile['id']).first()
if user:
token = create_jwt_token(user)
return jsonify(token=token)
u = User(linkedin=profile['id'],
display_name=profile['firstName'] + ' ' + profile['lastName'])
db.session.add(u)
db.session.commit()
token = create_jwt_token(u)
return jsonify(token=token)
@app.route('/auth/twitter')
def twitter():
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
authenticate_url = 'https://api.twitter.com/oauth/authenticate'
if request.args.get('oauth_token') and request.args.get('oauth_verifier'):
auth = OAuth1(app.config['TWITTER_CONSUMER_KEY'],
client_secret=app.config['TWITTER_CONSUMER_SECRET'],
resource_owner_key=request.args.get('oauth_token'),
verifier=request.args.get('oauth_verifier'))
r = requests.post(access_token_url, auth=auth)
profile = dict(parse_qsl(r.text))
user = User.query.filter_by(twitter=profile['user_id']).first()
if user:
token = create_jwt_token(user)
return jsonify(token=token)
u = User(twitter=profile['user_id'],
display_name=profile['screen_name'])
db.session.add(u)
db.session.commit()
token = create_jwt_token(u)
return jsonify(token=token)
else:
oauth = OAuth1(app.config['TWITTER_CONSUMER_KEY'],
client_secret=app.config['TWITTER_CONSUMER_SECRET'],
callback_uri=app.config['TWITTER_CALLBACK_URL'])
r = requests.post(request_token_url, auth=oauth)
oauth_token = dict(parse_qsl(r.text))
qs = urlencode(dict(oauth_token=oauth_token['oauth_token']))
return redirect(authenticate_url + '?' + qs)
if __name__ == '__main__':
app.run(port=3000)
| mit |
rjshade/grpc | src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py | 901 | 1528 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
janhahne/nest-simulator | pynest/examples/spatial/grid_iaf.py | 20 | 1437 | # -*- coding: utf-8 -*-
#
# grid_iaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create a population of iaf_psc_alpha neurons on a 4x3 grid
-----------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
l1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[4, 3], extent=[2., 1.5]))
nest.PrintNodes()
nest.PlotLayer(l1, nodesize=50)
# beautify
plt.axis([-1.0, 1.0, -0.75, 0.75])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('4 Columns, Extent: 1.5')
plt.ylabel('2 Rows, Extent: 1.0')
plt.show()
# plt.savefig('grid_iaf.png')
| gpl-2.0 |
angelapper/edx-platform | common/djangoapps/student/management/tests/test_transfer_students.py | 122 | 6240 | """
Tests the transfer student management command
"""
from django.conf import settings
from mock import patch, call
from opaque_keys.edx import locator
import unittest
import ddt
from shoppingcart.models import Order, CertificateItem # pylint: disable=import-error
from course_modes.models import CourseMode
from student.management.commands import transfer_students
from student.models import CourseEnrollment, UNENROLL_DONE, EVENT_NAME_ENROLLMENT_DEACTIVATED, \
EVENT_NAME_ENROLLMENT_ACTIVATED, EVENT_NAME_ENROLLMENT_MODE_CHANGED
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class TestTransferStudents(ModuleStoreTestCase):
"""Tests for transferring students between courses."""
PASSWORD = 'test'
signal_fired = False
def setUp(self, **kwargs):
"""Connect a stub receiver, and analytics event tracking."""
super(TestTransferStudents, self).setUp()
UNENROLL_DONE.connect(self.assert_unenroll_signal)
patcher = patch('student.models.tracker')
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
self.addCleanup(UNENROLL_DONE.disconnect, self.assert_unenroll_signal)
def assert_unenroll_signal(self, skip_refund=False, **kwargs): # pylint: disable=unused-argument
""" Signal Receiver stub for testing that the unenroll signal was fired. """
self.assertFalse(self.signal_fired)
self.assertTrue(skip_refund)
self.signal_fired = True
def test_transfer_students(self):
""" Verify the transfer student command works as intended. """
student = UserFactory.create()
student.set_password(self.PASSWORD)
student.save()
mode = 'verified'
# Original Course
original_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0')
course = self._create_course(original_course_location)
# Enroll the student in 'verified'
CourseEnrollment.enroll(student, course.id, mode="verified")
# Create and purchase a verified cert for the original course.
self._create_and_purchase_verified(student, course.id)
# New Course 1
course_location_one = locator.CourseLocator('Org1', 'Course1', 'Run1')
new_course_one = self._create_course(course_location_one)
# New Course 2
course_location_two = locator.CourseLocator('Org2', 'Course2', 'Run2')
new_course_two = self._create_course(course_location_two)
original_key = unicode(course.id)
new_key_one = unicode(new_course_one.id)
new_key_two = unicode(new_course_two.id)
# Run the actual management command
transfer_students.Command().handle(
source_course=original_key, dest_course_list=new_key_one + "," + new_key_two
)
self.assertTrue(self.signal_fired)
# Confirm the analytics event was emitted.
self.mock_tracker.emit.assert_has_calls( # pylint: disable=maybe-no-member
[
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_DEACTIVATED,
{'course_id': original_key, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': new_key_one, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': new_key_one, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_ACTIVATED,
{'course_id': new_key_two, 'user_id': student.id, 'mode': mode}
),
call(
EVENT_NAME_ENROLLMENT_MODE_CHANGED,
{'course_id': new_key_two, 'user_id': student.id, 'mode': mode}
)
]
)
self.mock_tracker.reset_mock()
# Confirm the enrollment mode is verified on the new courses, and enrollment is enabled as appropriate.
self.assertEquals((mode, False), CourseEnrollment.enrollment_mode_for_user(student, course.id))
self.assertEquals((mode, True), CourseEnrollment.enrollment_mode_for_user(student, new_course_one.id))
self.assertEquals((mode, True), CourseEnrollment.enrollment_mode_for_user(student, new_course_two.id))
# Confirm the student has not be refunded.
target_certs = CertificateItem.objects.filter(
course_id=course.id, user_id=student, status='purchased', mode=mode
)
self.assertTrue(target_certs[0])
self.assertFalse(target_certs[0].refund_requested_time)
self.assertEquals(target_certs[0].order.status, 'purchased')
def _create_course(self, course_location):
""" Creates a course """
return CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run
)
def _create_and_purchase_verified(self, student, course_id):
""" Creates a verified mode for the course and purchases it for the student. """
course_mode = CourseMode(course_id=course_id,
mode_slug="verified",
mode_display_name="verified cert",
min_price=50)
course_mode.save()
# When there is no expiration date on a verified mode, the user can always get a refund
cart = Order.get_cart_for_user(user=student)
CertificateItem.add_to_order(cart, course_id, 50, 'verified')
cart.purchase()
| agpl-3.0 |
tsabi/Odoo-tsabi-fixes | addons/hw_scale/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CFDEMproject/LAMMPS | tools/eff/cfg2lammps.py | 28 | 11563 | #!/usr/local/bin/python-2.5/bin/python
Info="""
Module name: cfg2lammps.py
Author: (c) Andres Jaramillo-Botero
California Institute of Technology
ajaramil@wag.caltech.edu
Project: pEFF
Version: August 2009
Reads in an eff .cfg file and produces the corresponding lammps data and input files
NOTE: Unsupported functions will be reported in the output log
12/2010: Added support for fixed-core and pseudo-core structures
"""
# import essentials:
import sys, os
from math import log10
from shutil import rmtree
from getopt import gnu_getopt as getopt
import numpy
def printHelp():
print Info
print "Usage: python cfg2lammps cfgfile\n"
return
general="""
# Created %s
# General parameters
variable sname index %s
log ${sname}.log
units electron
newton on
boundary %s
atom_style electron
read_data data.${sname}
pair_style eff/cut %s
pair_coeff * *
compute energies all pair eff/cut
variable eke equal c_energies[1]
variable epauli equal c_energies[2]
variable estatics equal c_energies[3]
variable errestrain equal c_energies[4]
communicate single vel yes
compute peratom all stress/atom
compute p all reduce sum c_peratom[1] c_peratom[2] c_peratom[3]
variable press equal -(c_p[1]+c_p[2]+c_p[3])/(3*vol)
compute effTemp all temp/eff
compute effPress all pressure effTemp
thermo %s
thermo_style custom step etotal pe ke v_eke v_epauli v_estatics v_errestrain temp press v_press
thermo_modify temp effTemp press effPress
"""
#%(date,name,boundary,cutoff,period)
minimize="""
# Minimization
min_style cg
dump 1 %s xyz %s ${sname}.min.xyz
dump 2 %s custom %s ${sname}.min.lammpstrj id type q spin eradius x y z fx fy fz erforce
min_modify line quadratic
minimize 0 1.0e-5 %s %s
undump 1
undump 2
"""
#%(group,period,group,period,iterations,fcalls)
single_pt="""
# Single point energy
run 0
"""
dynamics="""
# %s Dynamics
timestep %s
fix %s
dump 1 %s custom %s ${sname}.%s.lammpstrj id type q spin eradius x y z
dump 2 %s custom %s ${sname}.%s.xyz
run %s
unfix 1
undump 1
undump 2
"""
task={'single_pt':single_pt,'minimize':minimize,'dynamics':dynamics}
q2m={1:'1.007940',2:'4.002602',3:'6.941000',4:'9.012182',5:'10.811000',6:'12.010700',7:'14.006700',8:'15.999400',
9:'18.9984032',10:'20.179700',11:'22.98976928',12:'24.305000',13:'26.9815386',14:'28.085500',15:'30.973762',
16:'32.065000',17:'35.453000',18:'39.948000'}
def generate_lammps_input(infile):
# Defaults values
ensemble={"nve":"1 %s nve/eff",'nvt':"1 %s nvt/eff %s %s %s %s",'npt':"1 %s npt/eff %s %s %s %s %s %s"}
boundary="f f f"
xbound="-1000.000 1000.0 xlo xhi\n"
ybound="-1000.000 1000.0 ylo yhi\n"
zbound="-1000.000 1000.0 zlo zhi\n"
cutoff=1000.0
period="1"
emass=0
vels=""
datafile=open("data."+infile[:-4],'w')
scriptfile=open("in."+infile[:-4],'w')
print "Reading %s ... [WAIT]"%infile,
fin = open(infile,'r')
lines = fin.xreadlines()
print 7*"\b"+"[DONE]"
numcores=0
numnuclei=0
numelec=0
cores={}
nuclei={}
electrons={}
masses=[]
massstr="Masses\n\n"
types=1
q2type={}
Tflag=False # Default ensemble is NVE
steps='1000'
print "Extracting run parameters from %s ... "%(infile),
for line in lines:
# 1st level keywords
if line.find("@params")==0:
flag='params'
continue
elif line.find("@cores")==0:
flag='cores'
continue
elif line.find("@nuclei")==0:
flag='nuclei'
continue
elif line.find("@electrons")==0:
flag='electrons'
continue
elif line.find("@nuc_velocities")==0:
flag='n_vels'
continue
elif line.find("@elec_velocities")==0:
flag='e_vels'
continue
elif line.find("@nuc_masses")==0:
flag='n_mass'
continue
elif line.find("@elec_masses")==0:
flag='e_mass'
continue
elif line.find("@restraints")==0:
flag='restraints'
continue
# 2nd level keywords
if flag=='params':
if line.find("calc")>=0:
op=line.split()[2]
if line.find("print_every")>=0:
period=line.split()[2]
if line.find("num_steps")>=0:
steps=line.split()[2]
if line.find("min_freeze")>=0:
setforce="velocity\t% set 0.0 0.0 0.0\nfix\tfreeze %s setforce 0.0 0.0 0.0"%(line.split()[2],line.split()[2])
if line.find("thermostat")>=0:
tflag=True
#ensemble="fix\t1 all nvt/eff "
if line.find("start_temperature")>=0:
Tstart=line.split()[2]
#ensemble+=Tstart
if line.find("end_temperature")>=0:
Tstop=line.split()[2]
#ensemble+=Tstop
if line.find("andersen_coupling")>=0 or line.find("nose_hoover_coupling")>=0:
Tdamp=line.split()[2]
#ensemble+=Tdamp
if line.find("dt")>=0:
dt=line.split()[2]
if line.find("electron_mass")>=0:
emass=line.split()[2]
if line.find("adaptive_step_size")>=0:
continue
if line.find("adaptive_energy")>=0:
continue
if line.find("e_field_freq")>=0:
continue
if line.find("e_field_packet_duration")>=0:
continue
if line.find("e_field")>=0:
field=line.split()[2:5]
efield="fix\field all efield %s %s %s"%(field[0],field[1],field[2])
if line.find("e_field_packet_duration")>=0:
continue
if line.find("set_limit")>=0:
continue # need to add this contraint
if line.find("set_limit_stiffness")>=0:
continue
if line.find("output_position")>=0:
dump_pos="dump\t1 all custom %s ${sname}.lammpstrj id type q spin eradius x y z "%(period)
if line.find("output_velocities")>=0:
dump_pos+="vx vy vz "
if line.find("output_energy_forces")>=0:
dump_pos="compute\tenergy all pe/atom\n"+dump_pos
dump_pos+="c_energy fx fy fz\n"
if line.find("output_restart")>=0:
restart="restart\t%s ${sname}.restart1 ${sname}.restart2"%(period)
if line.find("output_restraints")>=0:
continue
if line.find("ewald_re_cutoff")>=0 or line.find("ewald_autoset")>=0 or line.find("ewald_log_precision")>=0 or line.find("ewald_max_re")>=0 or \
line.find("ewald_r_cutoff")>=0 or line.find("ewald_k_cutoff")>=0 or line.find("ewald_nuc_r")>=0:
continue
if line.find("periodic")>=0:
bounds=line.split()[2]
if bounds=="True": boundary="p p p"
elif bounds=="minimage_x": boundary="p f f"
elif bounds=="minimage_xy": boundary="p p f"
elif bounds=="minimage_y": boundary="f p f"
elif bounds=="minimage_xyz": boundary="p p p"
elif bounds=="minimage_z": boundary="f f p"
if line.find("x_bound")>=0:
xbnds=line.split()[2:4]
xbound="%s %s xlo xhi\n"%(xbnds[0],xbnds[1])
if line.find("y_bound")>=0:
ybnds=line.split()[2:4]
ybound="%s %s ylo yhi\n"%(ybnds[0],ybnds[1])
if line.find("z_bound")>=0:
zbnds=line.split()[2:4]
zbound="%s %s zlo zhi\n"%(zbnds[0],zbnds[1])
if line.find("taper_cutoff")>=0:
cutoff=line.split()[2]
continue
if flag=='cores' and len(line)>1:
numcores+=1
ln=line.split()
nc=' '.join(ln[0:3])
q=ln[3]
spin='3'
radius=ln[4]
m=q2m[int(float(q))]
if m not in masses:
masses.append(m)
massstr+="%d %s\n"%(types,m)
q2type[q]=types
types+=1
cores[numcores]=[nc,q,spin,radius]
continue
if flag=='nuclei' and len(line)>1:
numnuclei+=1
ln=line.split()
np=' '.join(ln[0:3])
q=ln[3]
m=q2m[int(float(q))]
if m not in masses:
masses.append(m)
massstr+="%d %s\n"%(types,m)
q2type[q]=types
types+=1
nuclei[numnuclei]=[np,q]
continue
if flag=='electrons' and len(line)>1:
numelec+=1
ln=line.split()
ep=' '.join(ln[0:3])
spin=ln[3]
radius=ln[4]
electrons[numelec]=[ep,spin,radius]
if numelec==1:
if emass!=0: massstr+="%d %s\n\n"%(types,emass) # electron mass=1
else: massstr+="%d 1.000000\n\n"%(types)
continue
if flag=='n_vels' and len(line)>1:
vels+=line+" 0.0"
continue
if flag=='e_vels' and len(line)>1:
ln=line.split()
ln[0]=ln[0]+numnuclei
vels+=ln[0]+" "+ln[1]+" "+ln[2]+" "+ln[3]+" "+ln[4]+"\n"
continue
if flag=='n_mass' and len(line)>1:
print "Setting nuclear masses is unsupported\n"
continue
if flag=='e_mass' and len(line)>1:
print "Setting electron masses is unsupported\n"
continue
print "\bDone"
# Build data file
print "Writing datafile to %s ... "%('data.'+infile),
sys.stdout.flush()
print "\b"*19+"General section ",
datafile.writelines("Created using cfg2lammps (c) AJB-2009\n\n%d atoms\n%d atom types\n\n%s%s%s\n"%(numcores+numnuclei+numelec,types,xbound,ybound,zbound))
print "\b"*19+"Masses section ",
datafile.writelines(massstr)
print "\b"*19+"Atoms section ",
datafile.writelines("Atoms\n\n")
for n in range(numcores):
datafile.writelines("%d %d %2.2f %s %s %s\n"%(n+1,q2type[cores[n+1][1]],float(cores[n+1][1]),cores[n+1][2],cores[n+1][3],cores[n+1][0]))
for n in range(numnuclei):
datafile.writelines("%d %d %2.2f 0 0.0 %s\n"%(n+numcores+1,q2type[nuclei[n+1][1]],float(nuclei[n+1][1]),nuclei[n+1][0]))
for e in range(numelec):
datafile.write("%d %d 0.0 %s %s %s\n"%(e+numnuclei+numcores+1,types,electrons[e+1][1],electrons[e+1][2],electrons[e+1][0]))
print "\b"*19+"Velocities section\n",
datafile.writelines(vels)
datafile.writelines("\n")
print "DONE .... GOODBYE !!"
datafile.close()
# Build input script
import datetime
scriptfile.writelines(general%(datetime.date.today(),infile[:-4],boundary,cutoff,period))
if op=='minimize':
scriptfile.writelines(minimize%('all',period,'all',period,steps,'10000'))
#%(group,period,group,period,iterations,fcalls)
elif op=='single_pt':
scriptfile.writelines(single_pt%())
elif op=='dynamics':
if Tflag==True:
scriptfile.writelines(dynamics%('NVT',dt,ensemble['nvt']%('all',Tstart,Tstop,Tdamp,''),'all',period,'nvt','all',period,'nve',steps))
#%(ensemble,dt,group,ensemble%(group,tstart,tstop,tdamp,options))
else:
scriptfile.writelines(dynamics%('NVE',dt,ensemble['nve']%('all'),'all',period,'nve','all',period,'nve',steps))
#%(ensemble,dt,group,ensemble%(group))
scriptfile.writelines("\n")
if __name__ == '__main__':
# set defaults
# check for input:
opts, argv = getopt(sys.argv[1:], 'h')
# if no input, print help and exit
if len(argv) != 1:
printHelp()
sys.exit(1)
else:
infile=argv[0]
# read options
for opt, arg in opts:
if opt == '-h': # -h: print help
printHelp()
generate_lammps_input(infile)
| gpl-2.0 |
psigen/ledgerlemur | ledger.py | 1 | 1932 | import jinja2
import os
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
# We set a parent key on the 'Transactions' to ensure that they are all in the same
# entity group. Queries across the single entity group will be consistent.
# However, the write rate should be limited to ~1/second.
def ledger_key(ledger_name='default_ledger'):
return ndb.Key('Ledger', ledger_name)
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
class Transaction(ndb.Model):
author = ndb.UserProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
class MainPage(webapp2.RequestHandler):
def get(self):
transactions_query = Transaction.query(ancestor=ledger_key()).order(-Transaction.date)
transactions = transactions_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(transactions=transactions,
url=url,
url_linktext=url_linktext))
class Ledger(webapp2.RequestHandler):
def post(self):
transaction = Transaction(parent=ledger_key())
if users.get_current_user():
transaction.author = users.get_current_user()
transaction.content = self.request.get('content')
transaction.put()
self.redirect('/')
application = webapp2.WSGIApplication([
('/', MainPage),
('/update', Ledger),
], debug=True)
| apache-2.0 |
mdjurfeldt/nest-simulator | examples/neuronview/neuronview.py | 13 | 10676 | # -*- coding: utf-8 -*-
#
# neuronview.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk # noqa
import pango # noqa
import gobject # noqa
from matplotlib.figure import Figure # noqa
from matplotlib.backends.backend_gtkagg import \
FigureCanvasGTKAgg as FigureCanvas # noqa
import matplotlib.gridspec as gridspec # noqa
import os # noqa
import nest # noqa
default_neuron = "iaf_psc_alpha"
default_stimulator = "dc_generator"
class Main():
def __init__(self):
self._gladefile = "neuronview.glade"
self._builder = gtk.Builder()
self._builder.add_from_file(self._gladefile)
self._builder.connect_signals(self)
self._win = self._builder.get_object("mainwindow")
self._win.resize(900, 700)
box = self._builder.get_object("box5")
self._stimulatordictview = DictView()
self._builder.get_object("scrolledwindow2").add(
self._stimulatordictview)
box = self._builder.get_object("box4")
self._neurondictview = DictView()
self._builder.get_object("scrolledwindow3").add(self._neurondictview)
self.populate_comboboxes()
self._figure = Figure(figsize=(5, 4), dpi=100)
canvas = FigureCanvas(self._figure)
canvas.set_size_request(200, 250)
canvas.show()
box = self._builder.get_object("box3")
bg_style = box.get_style().bg[gtk.STATE_NORMAL]
gtk_color = (bg_style.red_float, bg_style.green_float,
bg_style.blue_float)
self._figure.set_facecolor(gtk_color)
box.pack_start(canvas)
self._win.show()
gtk.main()
def update_figure(self, spikes, potentials):
if nest.GetKernelStatus("time") != 0.0:
self._figure.clear()
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
ax0 = self._figure.add_subplot(gs[0])
ax0.plot(spikes[0]["times"], [1] * len(spikes[0]["times"]), ".")
ax0.set_yticks([])
ax0.set_xticks([])
ax1 = self._figure.add_subplot(gs[1])
ax1.plot(potentials[0]["times"], potentials[0]["V_m"], "r-")
ax1.set_ylabel("$V_m$ (mV)")
ax1.set_xlabel("time (s)")
# plt.tight_layout()
self._figure.canvas.draw()
def filter_statusdict(self, params):
for key in ["archiver_length", "available", "capacity",
"elementsize", "frozen", "global_id",
"instantiations", "is_refractory", "local",
"model", "element_type", "offset", "origin",
"receptor_types", "recordables",
"refractory_input", "rmax", "state", "t_spike",
"thread", "tlast", "tspike", "type_id", "vp",
"ymod"]:
if key in params.keys():
params.pop(key)
def populate_comboboxes(self):
neuronmodels = self._builder.get_object("neuronmodels")
neuronmodelsliststore = neuronmodels.get_model()
stimulatormodels = self._builder.get_object("stimulatormodels")
stimulatormodelsliststore = stimulatormodels.get_model()
neuron_it = None
stimulator_it = None
models = nest.Models("nodes")
models = [x for x in models if
x not in ["correlation_detector", "sli_neuron",
"iaf_psc_alpha_norec", "parrot_neuron",
"parrot_neuron_ps"]]
for entry in models:
try:
entrytype = nest.GetDefaults(entry)["element_type"]
except:
entrytype = "unknown"
if entrytype == "neuron":
it = neuronmodelsliststore.append([entry])
if entry == default_neuron:
neuron_it = it
elif entrytype == "stimulator":
it = stimulatormodelsliststore.append([entry])
if entry == default_stimulator:
stimulator_it = it
cell = gtk.CellRendererText()
neuronmodels.pack_start(cell, True)
neuronmodels.add_attribute(cell, 'text', 0)
neuronmodels.set_active_iter(neuron_it)
stimulatormodels.pack_start(cell, True)
stimulatormodels.add_attribute(cell, 'text', 0)
stimulatormodels.set_active_iter(stimulator_it)
docviewcombo = self._builder.get_object("docviewcombo")
docviewcomboliststore = docviewcombo.get_model()
docviewcomboliststore.append(["Stimulating device"])
it = docviewcomboliststore.append(["Neuron"])
docviewcombo.pack_start(cell, True)
docviewcombo.add_attribute(cell, 'text', 0)
docviewcombo.set_active_iter(it)
def get_help_text(self, name):
nest.sli_run("statusdict /prgdocdir get")
docdir = nest.sli_pop()
helptext = "No documentation available"
for subdir in ["cc", "sli"]:
filename = os.path.join(docdir, "help", subdir, name + ".hlp")
if os.path.isfile(filename):
helptext = open(filename, 'r').read()
return helptext
def on_model_selected(self, widget):
liststore = widget.get_model()
model = liststore.get_value(widget.get_active_iter(), 0)
statusdict = nest.GetDefaults(model)
self.filter_statusdict(statusdict)
if widget == self._builder.get_object("neuronmodels"):
self._neurondictview.set_params(statusdict)
if widget == self._builder.get_object("stimulatormodels"):
self._stimulatordictview.set_params(statusdict)
self.on_doc_selected(self._builder.get_object("docviewcombo"))
def on_doc_selected(self, widget):
liststore = widget.get_model()
doc = liststore.get_value(widget.get_active_iter(), 0)
docview = self._builder.get_object("docview")
docbuffer = gtk.TextBuffer()
if doc == "Neuron":
combobox = self._builder.get_object("neuronmodels")
if doc == "Stimulating device":
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
model = liststore.get_value(combobox.get_active_iter(), 0)
docbuffer.set_text(self.get_help_text(model))
docview.set_buffer(docbuffer)
docview.modify_font(pango.FontDescription("monospace 10"))
def on_simulate_clicked(self, widget):
nest.ResetKernel()
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
stimulatormodel = liststore.get_value(combobox.get_active_iter(), 0)
params = self._stimulatordictview.get_params()
stimulator = nest.Create(stimulatormodel, params=params)
combobox = self._builder.get_object("neuronmodels")
liststore = combobox.get_model()
neuronmodel = liststore.get_value(combobox.get_active_iter(), 0)
neuron = nest.Create(neuronmodel,
params=self._neurondictview.get_params())
weight = self._builder.get_object("weight").get_value()
delay = self._builder.get_object("delay").get_value()
nest.Connect(stimulator, neuron, weight, delay)
sd = nest.Create("spike_detector", params={"record_to": ["memory"]})
nest.Connect(neuron, sd)
vm = nest.Create("voltmeter", params={"record_to": ["memory"],
"interval": 0.1})
nest.Connect(vm, neuron)
simtime = self._builder.get_object("simtime").get_value()
nest.Simulate(simtime)
self.update_figure(nest.GetStatus(sd, "events"),
nest.GetStatus(vm, "events"))
def on_delete_event(self, widget, event):
self.on_quit(widget)
return True
def on_quit(self, project):
self._builder.get_object("mainwindow").hide()
gtk.main_quit()
class DictView(gtk.TreeView):
def __init__(self, params=None):
gtk.TreeView.__init__(self)
if params:
self.params = params
self.repopulate()
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", renderer, text=1)
self.append_column(column)
renderer = gtk.CellRendererText()
renderer.set_property("mode", gtk.CELL_RENDERER_MODE_EDITABLE)
renderer.set_property("editable", True)
column = gtk.TreeViewColumn("Value", renderer, text=2)
self.append_column(column)
self.set_size_request(200, 150)
renderer.connect("edited", self.check_value)
self.show()
def repopulate(self):
model = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_STRING,
gobject.TYPE_STRING)
for key in sorted(self.params.keys()):
pos = model.insert_after(None, None)
data = {"key": key, "element_type": type(self.params[key])}
model.set_value(pos, 0, data)
model.set_value(pos, 1, str(key))
model.set_value(pos, 2, str(self.params[key]))
self.set_model(model)
def check_value(self, widget, path, new_text):
model = self.get_model()
data = model[path][0]
try:
typename = data["element_type"].__name__
new_value = eval("%s('%s')" % (typename, new_text))
if typename == "bool" and new_text.lower() in ["false", "0"]:
new_value = False
self.params[data["key"]] = new_value
model[path][2] = str(new_value)
except ValueError:
old_value = self.params[data["key"]]
model[path][2] = str(old_value)
def get_params(self):
return self.params
def set_params(self, params):
self.params = params
self.repopulate()
if __name__ == "__main__":
Main()
| gpl-2.0 |
ppries/tensorflow | tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py | 10 | 9682 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoints tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = tf.get_variable("var1", [1, 10])
v2 = tf.get_variable("var2", [10, 10])
v3 = tf.get_variable("var3", [100, 100])
with tf.variable_scope("useful_scope"):
v4 = tf.get_variable("var4", [9, 9])
sess.run(tf.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = tf.train.Saver()
saver.save(sess, checkpoint_prefix, global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = tf.get_variable(
name="var1",
shape=[100, 100],
initializer=tf.truncated_normal_initializer(0.5),
partitioner=tf.min_max_variable_partitioner(max_partitions=5, axis=0,
min_slice_size=8 << 10))
sess.run(tf.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = tf.train.Saver()
saver.save(sess, checkpoint_prefix, global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
class CheckpointsTest(tf.test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(tf.errors.OpError):
self.assertAllEqual(tf.contrib.framework.load_variable(
checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(tf.errors.OpError):
self.assertAllEqual(tf.contrib.framework.load_variable(
checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(tf.contrib.framework.load_variable(
checkpoint_dir, "var1"), v1)
self.assertAllEqual(tf.contrib.framework.load_variable(
checkpoint_dir, "var2"), v2)
self.assertAllEqual(tf.contrib.framework.load_variable(
checkpoint_dir, "var3"), v3)
self.assertAllEqual(
tf.contrib.framework.load_variable(
checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(tf.contrib.framework.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]),
("var1", [1, 10]),
("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with tf.variable_scope("some_scope"):
my1 = tf.get_variable("my1", [1, 10])
with tf.variable_scope("some_other_scope"):
my2 = tf.get_variable("my2", [10, 10])
with tf.variable_scope("other_useful_scope"):
my4 = tf.get_variable("var4", [9, 9])
my3 = tf.get_variable("my3", [100, 100])
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(tf.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 27000)
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with tf.variable_scope("some_scope"):
my1 = tf.get_variable("var1", [1, 10])
my2 = tf.get_variable("var2", [10, 10])
my3 = tf.get_variable("var3", [100, 100])
with tf.variable_scope("useful_scope"):
my4 = tf.get_variable("var4", [9, 9])
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"/": "some_scope/",
})
session.run(tf.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with tf.variable_scope("some_scope"):
my1 = tf.get_variable(
name="my1",
shape=[100, 100],
initializer=tf.truncated_normal_initializer(0.5),
partitioner=tf.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
})
session.run(tf.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
# New graph and session.
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with tf.variable_scope("some_scope"):
my1 = tf.get_variable(
name="my1",
shape=[100, 100],
initializer=tf.truncated_normal_initializer(0.5),
partitioner=tf.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"var1": my1_var_list,
})
session.run(tf.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with tf.variable_scope("some_scope"):
_ = tf.get_variable("my1", [10, 10])
_ = tf.get_variable("my2", [1, 10],
dtype=tf.int64, initializer=tf.zeros_initializer)
# No directory.
with self.assertRaises(tf.errors.OpError):
tf.contrib.framework.init_from_checkpoint("no_dir", {
"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
tf.contrib.framework.init_from_checkpoint(checkpoint_dir, {
"useful_scope": "some_scope/"})
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
soumyajitpaul/Soumyajit-Github-Byte-3 | lib/flask/ctx.py | 776 | 14266 | # -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.