repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Lembed/uClinux-Cortex | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
jeongarmy/TizenRT | os/tools/discover.py | 10 | 4155 | #!/usr/bin/env python
############################################################################
#
# Copyright 2016 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
############################################################################
############################################################################
# tools/discover.py
#
# Copyright (C) 2012 Max Holtzberg. All rights reserved.
# Author: Max Holtzberg <mh@uvc.de>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
from __future__ import print_function
import array
from socket import *
PORT = 96
DISCOVER_PROTO_ID = 0x99
DISCOVER_ALL = 0xff # 0xff means all devices
DISCOVER_REQUEST = 0x01
DISCOVER_RESPONSE = 0x02
DISCOVER_REQUEST_SIZE = 4
DISCOVER_RESPONSE_SIZE = 35
def check_sum(data):
chksum = 0
for c in data[:-1]:
chksum -= c
return (chksum & 0xff) == data[-1]
def send_discover(socket):
cmd = array.array('B', [0] * DISCOVER_REQUEST_SIZE)
cmd[0] = DISCOVER_PROTO_ID # Tag for identification of the protocol
cmd[1] = DISCOVER_REQUEST # Request command
cmd[2] = DISCOVER_ALL
chksum = 0
for c in cmd[:3]:
chksum -= c;
cmd[3] = chksum & 0xff
socket.sendto(cmd, ('<broadcast>', PORT))
def read_responses(socket):
res = []
response = array.array('B', [0] * DISCOVER_RESPONSE_SIZE)
try:
while 1:
size, src = socket.recvfrom_into(response)
if (size == DISCOVER_RESPONSE_SIZE
and response[0] == DISCOVER_PROTO_ID
and response[1] == DISCOVER_RESPONSE
and check_sum(response)):
dev = {}
dev['addr'] = src[0]
dev['descr'] = response[2:-1].tostring().rstrip('\0')
res.append(dev)
except timeout:
return res
if __name__ == '__main__':
print('Sending discover...')
s = socket(AF_INET, SOCK_DGRAM)
s.bind(('0.0.0.0', PORT))
s.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
s.settimeout(1.0)
send_discover(s)
devices = read_responses(s)
socket.close(s)
print(devices)
| apache-2.0 |
CasataliaLabs/biscuit_drishtiman | Pmw-2.0.0/build/lib.linux-x86_64-2.7/Pmw/Pmw_1_3_3/bin/bundlepmw.py | 2 | 4443 | #!/usr/bin/env python
# Helper script when freezing Pmw applications. It concatenates all
# Pmw megawidget files into a single file, 'Pmw.py', in the current
# directory. The script must be called with one argument, being the
# path to the 'lib' directory of the required version of Pmw.
# To freeze a Pmw application, you will also need to copy the
# following files to the application directory before freezing:
#
# PmwBlt.py PmwColor.py
import os
import re
import string
import sys
# The order of these files is significant. Files which reference
# other files must appear later. Files may be deleted if they are not
# used.
files = [
'Dialog', 'TimeFuncs', 'Balloon', 'ButtonBox', 'EntryField',
'Group', 'LabeledWidget', 'MainMenuBar', 'MenuBar', 'MessageBar',
'MessageDialog', 'NoteBook', 'OptionMenu', 'PanedWidget', 'PromptDialog',
'RadioSelect', 'ScrolledCanvas', 'ScrolledField', 'ScrolledFrame',
'ScrolledListBox', 'ScrolledText', 'HistoryText', 'SelectionDialog',
'TextDialog', 'TimeCounter', 'AboutDialog', 'ComboBox', 'ComboBoxDialog',
'Counter', 'CounterDialog',
]
# Set this to 0 if you do not use any of the Pmw.Color functions:
needColor = 1
# Set this to 0 if you do not use any of the Pmw.Blt functions:
needBlt = 1
def expandLinks(path):
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
while 1:
if not os.path.islink(path):
break
dir = os.path.dirname(path)
path = os.path.join(dir, os.readlink(path))
return path
def mungeFile(file):
# Read the file and modify it so that it can be bundled with the
# other Pmw files.
file = 'Pmw' + file + '.py'
text = open(os.path.join(srcdir, file)).read()
text = re.sub(r'import Pmw\b', '', text)
text = re.sub('INITOPT = Pmw.INITOPT', '', text)
text = re.sub(r'\bPmw\.', '', text)
text = '\n' + ('#' * 70) + '\n' + '### File: ' + file + '\n' + text
return text
# Work out which version is being bundled.
file = sys.argv[0]
file = os.path.normpath(file)
file = expandLinks(file)
dir = os.path.dirname(file)
dir = expandLinks(dir)
dir = os.path.dirname(dir)
dir = expandLinks(dir)
dir = os.path.basename(dir)
version = string.replace(dir[4:], '_', '.')
# Code to import the Color module.
colorCode = """
import PmwColor
Color = PmwColor
del PmwColor
"""
# Code to import the Blt module.
bltCode = """
import PmwBlt
Blt = PmwBlt
del PmwBlt
"""
# Code used when not linking with PmwBlt.py.
ignoreBltCode = """
_bltImported = 1
_bltbusyOK = 0
"""
# Code to define the functions normally supplied by the dynamic loader.
extraCode = """
### Loader functions:
_VERSION = '%s'
def setversion(version):
if version != _VERSION:
raise ValueError, 'Dynamic versioning not available'
def setalphaversions(*alpha_versions):
if alpha_versions != ():
raise ValueError, 'Dynamic versioning not available'
def version(alpha = 0):
if alpha:
return ()
else:
return _VERSION
def installedversions(alpha = 0):
if alpha:
return ()
else:
return (_VERSION,)
"""
if '-noblt' in sys.argv:
sys.argv.remove('-noblt')
needBlt = 0
if '-nocolor' in sys.argv:
sys.argv.remove('-nocolor')
needColor = 0
if len(sys.argv) != 2:
print 'usage: bundlepmw.py [-noblt] [-nocolor] /path/to/Pmw/Pmw_X_X_X/lib'
sys.exit()
srcdir = sys.argv[1]
if os.path.exists('Pmw.py'):
print 'Pmw.py already exists. Remove it and try again.'
sys.exit()
outfile = open('Pmw.py', 'w')
if needColor:
outfile.write(colorCode)
if needBlt:
outfile.write(bltCode)
outfile.write(extraCode % version)
# Specially handle PmwBase.py file:
text = mungeFile('Base')
text = re.sub('import PmwLogicalFont', '', text)
text = re.sub('PmwLogicalFont._font_initialise', '_font_initialise', text)
outfile.write(text)
if not needBlt:
outfile.write(ignoreBltCode)
files.append('LogicalFont')
for file in files:
text = mungeFile(file)
outfile.write(text)
print ''
print ' Pmw.py has been created.'
if needColor or needBlt:
print ' Before running freeze, also copy the following file(s):'
if needBlt:
print ' ' + os.path.join(srcdir, 'PmwBlt.py')
if needColor:
print ' ' + os.path.join(srcdir, 'PmwColor.py')
| gpl-3.0 |
longmen21/edx-platform | common/djangoapps/third_party_auth/tests/test_views.py | 23 | 6360 | """
Test the views served by third_party_auth.
"""
import ddt
from lxml import etree
from onelogin.saml2.errors import OneLogin_Saml2_Error
import unittest
from .testutil import AUTH_FEATURE_ENABLED, SAMLTestCase
# Define some XML namespaces:
from third_party_auth.tasks import SAML_XML_NS
XMLDSIG_XML_NS = 'http://www.w3.org/2000/09/xmldsig#'
@unittest.skipUnless(AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
@ddt.ddt
class SAMLMetadataTest(SAMLTestCase):
"""
Test the SAML metadata view
"""
METADATA_URL = '/auth/saml/metadata.xml'
def test_saml_disabled(self):
""" When SAML is not enabled, the metadata view should return 404 """
self.enable_saml(enabled=False)
response = self.client.get(self.METADATA_URL)
self.assertEqual(response.status_code, 404)
def test_metadata(self):
self.enable_saml()
doc = self._fetch_metadata()
# Check the ACS URL:
acs_node = doc.find(".//{}".format(etree.QName(SAML_XML_NS, 'AssertionConsumerService')))
self.assertIsNotNone(acs_node)
self.assertEqual(acs_node.attrib['Location'], 'http://example.none/auth/complete/tpa-saml/')
def test_default_contact_info(self):
self.enable_saml()
self.check_metadata_contacts(
xml=self._fetch_metadata(),
tech_name="edX Support",
tech_email="technical@example.com",
support_name="edX Support",
support_email="technical@example.com"
)
def test_custom_contact_info(self):
self.enable_saml(
other_config_str=(
'{'
'"TECHNICAL_CONTACT": {"givenName": "Jane Tech", "emailAddress": "jane@example.com"},'
'"SUPPORT_CONTACT": {"givenName": "Joe Support", "emailAddress": "joe@example.com"}'
'}'
)
)
self.check_metadata_contacts(
xml=self._fetch_metadata(),
tech_name="Jane Tech",
tech_email="jane@example.com",
support_name="Joe Support",
support_email="joe@example.com"
)
@ddt.data(
# Test two slightly different key pair export formats
('saml_key', 'MIICsDCCAhmgAw'),
('saml_key_alt', 'MIICWDCCAcGgAw'),
)
@ddt.unpack
def test_signed_metadata(self, key_name, pub_key_starts_with):
self.enable_saml(
private_key=self._get_private_key(key_name),
public_key=self._get_public_key(key_name),
other_config_str='{"SECURITY_CONFIG": {"signMetadata": true} }',
)
self._validate_signed_metadata(pub_key_starts_with=pub_key_starts_with)
def test_secure_key_configuration(self):
""" Test that the SAML private key can be stored in Django settings and not the DB """
self.enable_saml(
public_key='',
private_key='',
other_config_str='{"SECURITY_CONFIG": {"signMetadata": true} }',
)
with self.assertRaises(OneLogin_Saml2_Error):
self._fetch_metadata() # OneLogin_Saml2_Error: Cannot sign metadata: missing SP private key.
with self.settings(
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY=self._get_private_key('saml_key'),
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT=self._get_public_key('saml_key'),
):
self._validate_signed_metadata()
def _validate_signed_metadata(self, pub_key_starts_with='MIICsDCCAhmgAw'):
""" Fetch the SAML metadata and do some validation """
doc = self._fetch_metadata()
sig_node = doc.find(".//{}".format(etree.QName(XMLDSIG_XML_NS, 'SignatureValue')))
self.assertIsNotNone(sig_node)
# Check that the right public key was used:
pub_key_node = doc.find(".//{}".format(etree.QName(XMLDSIG_XML_NS, 'X509Certificate')))
self.assertIsNotNone(pub_key_node)
self.assertIn(pub_key_starts_with, pub_key_node.text)
def _fetch_metadata(self):
""" Fetch and parse the metadata XML at self.METADATA_URL """
response = self.client.get(self.METADATA_URL)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/xml')
# The result should be valid XML:
try:
metadata_doc = etree.fromstring(response.content)
except etree.LxmlError:
self.fail('SAML metadata must be valid XML')
self.assertEqual(metadata_doc.tag, etree.QName(SAML_XML_NS, 'EntityDescriptor'))
return metadata_doc
def check_metadata_contacts(self, xml, tech_name, tech_email, support_name, support_email):
""" Validate that the contact info in the metadata has the expected values """
technical_node = xml.find(".//{}[@contactType='technical']".format(etree.QName(SAML_XML_NS, 'ContactPerson')))
self.assertIsNotNone(technical_node)
tech_name_node = technical_node.find(etree.QName(SAML_XML_NS, 'GivenName'))
self.assertEqual(tech_name_node.text, tech_name)
tech_email_node = technical_node.find(etree.QName(SAML_XML_NS, 'EmailAddress'))
self.assertEqual(tech_email_node.text, tech_email)
support_node = xml.find(".//{}[@contactType='support']".format(etree.QName(SAML_XML_NS, 'ContactPerson')))
self.assertIsNotNone(support_node)
support_name_node = support_node.find(etree.QName(SAML_XML_NS, 'GivenName'))
self.assertEqual(support_name_node.text, support_name)
support_email_node = support_node.find(etree.QName(SAML_XML_NS, 'EmailAddress'))
self.assertEqual(support_email_node.text, support_email)
@unittest.skipUnless(AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class SAMLAuthTest(SAMLTestCase):
"""
Test the SAML auth views
"""
LOGIN_URL = '/auth/login/tpa-saml/'
def test_login_without_idp(self):
""" Accessing the login endpoint without an idp query param should return 302 """
self.enable_saml()
response = self.client.get(self.LOGIN_URL)
self.assertEqual(response.status_code, 302)
def test_login_disabled(self):
""" When SAML is not enabled, the login view should return 404 """
self.enable_saml(enabled=False)
response = self.client.get(self.LOGIN_URL)
self.assertEqual(response.status_code, 404)
| agpl-3.0 |
yrizk/django-blog | blogvenv/lib/python3.4/site-packages/django/contrib/gis/db/backends/mysql/operations.py | 101 | 2042 | from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.db.backends.mysql.operations import DatabaseOperations
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # .. ..
'contained': SpatialOperator(func='MBRWithin'), # .. ..
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func='MBREqual'),
'exact': SpatialOperator(func='MBREqual'),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func='MBREqual'),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
disallowed_aggregates = (aggregates.Collect, aggregates.Extent, aggregates.Extent3D, aggregates.MakeLine, aggregates.Union)
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
| apache-2.0 |
dani-i/bachelor-project | graphics/output/data_augmentation_output_f.py | 1 | 3130 | from graphics.widgets.checkbox_item_output_f import CheckboxItemOutputF
from utils.train.data_augmentation import DataAugmentation
import constants.output_constants as const
import tkinter as tk
class DataAugmentationOutputF(tk.Frame):
def __init__(self,
parent,
disabled=False):
"""
:param parent: Parent.
:param disabled: - Default: False;
- If True all the widgets will be disabled.
"""
tk.Frame.__init__(self,
parent,
relief=const.DAO_FRAME_RELIEF,
padx=const.DAO_FRAME_PADX,
pady=const.DAO_FRAME_PADY,
bd=const.DAO_FRAME_BD)
self._cio_output = []
# Widget creation
self._lbl_title = tk.Label(
self,
font=const.DAO_TITLE_FONT,
text=const.DAO_TITLE_TEXT,
padx=const.DAO_TITLE_PADX,
pady=const.DAO_TITLE_PADY,
)
self._f_option = tk.Frame(
self,
relief=const.DAO_SUBFRAME_RELIEF,
padx=const.DAO_SUBFRAME_PADX,
pady=const.DAO_SUBFRAME_PADY,
bd=const.DAO_SUBFRAME_BD
)
# Widget placement
self._lbl_title.pack(side='top',
fill='both',
expand=True)
self._f_option.pack(side='top',
fill='both',
expand=True)
# Initial output
initial_output = DataAugmentation()
self.update_status(initial_output)
if disabled:
self.disable()
#########################################################################
# Public methods
def update_status(
self,
data_augmentation_options: DataAugmentation):
"""
- Updates the option's state.
:param data_augmentation_options: DataAugmentation list.
"""
options_list = data_augmentation_options.get_options_list()
for index in range(len(self._cio_output)):
self._cio_output[index].destroy()
self._cio_output = []
for index in range(len(options_list)):
self._cio_output.append(
CheckboxItemOutputF(
parent=self._f_option,
item_text=options_list[index][0],
checked=options_list[index][1],
)
)
self._cio_output[index].pack(side='top')
def enable(self):
""" Enables all the widgets."""
for index in range(len(self._cio_output)):
self._cio_output[index].enable()
self._lbl_title.config(state='normal')
def disable(self):
""" Disables all the widgets."""
for index in range(len(self._cio_output)):
self._cio_output[index].disable()
self._lbl_title.config(state='disabled')
#########################################################################
| apache-2.0 |
wainersm/buildbot | master/buildbot/db/pool.py | 9 | 8738 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import traceback
import sqlalchemy as sa
from twisted.internet import threads
from twisted.python import log
from twisted.python import threadpool
from buildbot.db.buildrequests import AlreadyClaimedError
from buildbot.db.changesources import ChangeSourceAlreadyClaimedError
from buildbot.db.schedulers import SchedulerAlreadyClaimedError
from buildbot.process import metrics
# set this to True for *very* verbose query debugging output; this can
# be monkey-patched from master.cfg, too:
# from buildbot.db import pool
# pool.debug = True
debug = False
_debug_id = 1
def timed_do_fn(f):
"""Decorate a do function to log before, after, and elapsed time,
with the name of the calling function. This is not speedy!"""
def wrap(callable, *args, **kwargs):
global _debug_id
# get a description of the function that called us
st = traceback.extract_stack(limit=2)
file, line, name, _ = st[0]
# and its locals
frame = inspect.currentframe()
locals = frame.f_locals
# invent a unique ID for the description
id, _debug_id = _debug_id, _debug_id + 1
descr = "%s-%08x" % (name, id)
start_time = time.time()
log.msg("%s - before ('%s' line %d)" % (descr, file, line))
for name in locals:
if name in ('self', 'thd'):
continue
log.msg("%s - %s = %r" % (descr, name, locals[name]))
# wrap the callable to log the begin and end of the actual thread
# function
def callable_wrap(*args, **kargs):
log.msg("%s - thd start" % (descr,))
try:
return callable(*args, **kwargs)
finally:
log.msg("%s - thd end" % (descr,))
d = f(callable_wrap, *args, **kwargs)
@d.addBoth
def after(x):
end_time = time.time()
elapsed = (end_time - start_time) * 1000
log.msg("%s - after (%0.2f ms elapsed)" % (descr, elapsed))
return x
return d
wrap.__name__ = f.__name__
wrap.__doc__ = f.__doc__
return wrap
class DBThreadPool(object):
running = False
def __init__(self, engine, reactor, verbose=False):
# verbose is used by upgrade scripts, and if it is set we should print
# messages about versions and other warnings
log_msg = log.msg
if verbose:
def _log_msg(m):
print(m)
log_msg = _log_msg
self.reactor = reactor
pool_size = 5
# If the engine has an C{optimal_thread_pool_size} attribute, then the
# maxthreads of the thread pool will be set to that value. This is
# most useful for SQLite in-memory connections, where exactly one
# connection (and thus thread) should be used.
if hasattr(engine, 'optimal_thread_pool_size'):
pool_size = engine.optimal_thread_pool_size
self._pool = threadpool.ThreadPool(minthreads=1,
maxthreads=pool_size,
name='DBThreadPool')
self.engine = engine
if engine.dialect.name == 'sqlite':
vers = self.get_sqlite_version()
if vers < (3, 7):
log_msg("Using SQLite Version %s" % (vers,))
log_msg("NOTE: this old version of SQLite does not support "
"WAL journal mode; a busy master may encounter "
"'Database is locked' errors. Consider upgrading.")
if vers < (3, 6, 19):
log_msg("NOTE: this old version of SQLite is not "
"supported.")
raise RuntimeError("unsupported SQLite version")
self._start_evt = self.reactor.callWhenRunning(self._start)
# patch the do methods to do verbose logging if necessary
if debug:
self.do = timed_do_fn(self.do)
self.do_with_engine = timed_do_fn(self.do_with_engine)
def _start(self):
self._start_evt = None
if not self.running:
self._pool.start()
self._stop_evt = self.reactor.addSystemEventTrigger(
'during', 'shutdown', self._stop)
self.running = True
def _stop(self):
self._stop_evt = None
threads.deferToThreadPool(
self.reactor, self._pool, self.engine.dispose)
self._pool.stop()
self.running = False
def shutdown(self):
"""Manually stop the pool. This is only necessary from tests, as the
pool will stop itself when the reactor stops under normal
circumstances."""
if not self._stop_evt:
return # pool is already stopped
self.reactor.removeSystemEventTrigger(self._stop_evt)
self._stop()
# Try about 170 times over the space of a day, with the last few tries
# being about an hour apart. This is designed to span a reasonable amount
# of time for repairing a broken database server, while still failing
# actual problematic queries eventually
BACKOFF_START = 1.0
BACKOFF_MULT = 1.05
MAX_OPERATIONALERROR_TIME = 3600 * 24 # one day
def __thd(self, with_engine, callable, args, kwargs):
# try to call callable(arg, *args, **kwargs) repeatedly until no
# OperationalErrors occur, where arg is either the engine (with_engine)
# or a connection (not with_engine)
backoff = self.BACKOFF_START
start = time.time()
while True:
if with_engine:
arg = self.engine
else:
arg = self.engine.contextual_connect()
try:
try:
rv = callable(arg, *args, **kwargs)
assert not isinstance(rv, sa.engine.ResultProxy), \
"do not return ResultProxy objects!"
except sa.exc.OperationalError as e:
if not self.engine.should_retry(e):
log.err(e, 'Got fatal OperationalError on DB')
raise
elapsed = time.time() - start
if elapsed > self.MAX_OPERATIONALERROR_TIME:
log.err(e, ('Raising due to {0} seconds delay on DB '
'query retries'.format(self.MAX_OPERATIONALERROR_TIME)))
raise
metrics.MetricCountEvent.log(
"DBThreadPool.retry-on-OperationalError")
# sleep (remember, we're in a thread..)
time.sleep(backoff)
backoff *= self.BACKOFF_MULT
# and re-try
log.err(e, 'retrying {} after sql error {}'.format(callable, e))
continue
# AlreadyClaimedError are normal especially in a multimaster
# configuration
except (AlreadyClaimedError, ChangeSourceAlreadyClaimedError, SchedulerAlreadyClaimedError):
raise
except Exception as e:
log.err(e, 'Got fatal Exception on DB')
raise
finally:
if not with_engine:
arg.close()
break
return rv
def do(self, callable, *args, **kwargs):
return threads.deferToThreadPool(self.reactor, self._pool,
self.__thd, False, callable, args, kwargs)
def do_with_engine(self, callable, *args, **kwargs):
return threads.deferToThreadPool(self.reactor, self._pool,
self.__thd, True, callable, args, kwargs)
def get_sqlite_version(self):
import sqlite3
return sqlite3.sqlite_version_info
| gpl-2.0 |
LuminateWireless/bazel | third_party/py/mock/setup.py | 91 | 2134 | #! /usr/bin/env python
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from mock import __version__
import os
NAME = 'mock'
MODULES = ['mock']
DESCRIPTION = 'A Python Mocking and Patching Library for Testing'
URL = "http://www.voidspace.org.uk/python/mock/"
readme = os.path.join(os.path.dirname(__file__), 'README.txt')
LONG_DESCRIPTION = open(readme).read()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: Jython',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
]
AUTHOR = 'Michael Foord'
AUTHOR_EMAIL = 'michael@voidspace.org.uk'
KEYWORDS = ("testing test mock mocking unittest patching "
"stubs fakes doubles").split(' ')
params = dict(
name=NAME,
version=__version__,
py_modules=MODULES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
params['tests_require'] = ['unittest2']
params['test_suite'] = 'unittest2.collector'
setup(**params)
| apache-2.0 |
pyfa-org/Pyfa | graphs/calc.py | 2 | 1322 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
from service.settings import GraphSettings
def checkLockRange(src, distance):
if distance is None:
return True
if GraphSettings.getInstance().get('ignoreLockRange'):
return True
return distance <= src.item.maxTargetRange
def checkDroneControlRange(src, distance):
if distance is None:
return True
if GraphSettings.getInstance().get('ignoreDCR'):
return True
return distance <= src.item.extraAttributes['droneControlRange']
| gpl-3.0 |
bjoshua/ansible | lib/ansible/plugins/lookup/random_choice.py | 253 | 1226 | # (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import random
from ansible.plugins.lookup import LookupBase
# useful for introducing chaos ... or just somewhat reasonably fair selection
# amongst available mirrors
#
# tasks:
# - debug: msg=$item
# with_random_choice:
# - one
# - two
# - three
class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
return [ random.choice(terms) ]
| gpl-3.0 |
axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/rsa/bigfile.py | 82 | 5185 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Large file support
.. deprecated:: 3.4
The VARBLOCK format is NOT recommended for general use, has been deprecated since
Python-RSA 3.4, and will be removed in a future release. It's vulnerable to a
number of attacks:
1. decrypt/encrypt_bigfile() does not implement `Authenticated encryption`_ nor
uses MACs to verify messages before decrypting public key encrypted messages.
2. decrypt/encrypt_bigfile() does not use hybrid encryption (it uses plain RSA)
and has no method for chaining, so block reordering is possible.
See `issue #19 on Github`_ for more information.
.. _Authenticated encryption: https://en.wikipedia.org/wiki/Authenticated_encryption
.. _issue #19 on Github: https://github.com/sybrenstuvel/python-rsa/issues/13
This module contains functions to:
- break a file into smaller blocks, and encrypt them, and store the
encrypted blocks in another file.
- take such an encrypted files, decrypt its blocks, and reconstruct the
original file.
The encrypted file format is as follows, where || denotes byte concatenation:
FILE := VERSION || BLOCK || BLOCK ...
BLOCK := LENGTH || DATA
LENGTH := varint-encoded length of the subsequent data. Varint comes from
Google Protobuf, and encodes an integer into a variable number of bytes.
Each byte uses the 7 lowest bits to encode the value. The highest bit set
to 1 indicates the next byte is also part of the varint. The last byte will
have this bit set to 0.
This file format is called the VARBLOCK format, in line with the varint format
used to denote the block sizes.
"""
import warnings
from rsa import key, common, pkcs1, varblock
from rsa._compat import byte
def encrypt_bigfile(infile, outfile, pub_key):
"""Encrypts a file, writing it to 'outfile' in VARBLOCK format.
.. deprecated:: 3.4
This function was deprecated in Python-RSA version 3.4 due to security issues
in the VARBLOCK format. See the documentation_ for more information.
.. _documentation: https://stuvel.eu/python-rsa-doc/usage.html#working-with-big-files
:param infile: file-like object to read the cleartext from
:param outfile: file-like object to write the crypto in VARBLOCK format to
:param pub_key: :py:class:`rsa.PublicKey` to encrypt with
"""
warnings.warn("The 'rsa.bigfile.encrypt_bigfile' function was deprecated in Python-RSA version "
"3.4 due to security issues in the VARBLOCK format. See "
"https://stuvel.eu/python-rsa-doc/usage.html#working-with-big-files "
"for more information.",
DeprecationWarning, stacklevel=2)
if not isinstance(pub_key, key.PublicKey):
raise TypeError('Public key required, but got %r' % pub_key)
key_bytes = common.bit_size(pub_key.n) // 8
blocksize = key_bytes - 11 # keep space for PKCS#1 padding
# Write the version number to the VARBLOCK file
outfile.write(byte(varblock.VARBLOCK_VERSION))
# Encrypt and write each block
for block in varblock.yield_fixedblocks(infile, blocksize):
crypto = pkcs1.encrypt(block, pub_key)
varblock.write_varint(outfile, len(crypto))
outfile.write(crypto)
def decrypt_bigfile(infile, outfile, priv_key):
"""Decrypts an encrypted VARBLOCK file, writing it to 'outfile'
.. deprecated:: 3.4
This function was deprecated in Python-RSA version 3.4 due to security issues
in the VARBLOCK format. See the documentation_ for more information.
.. _documentation: https://stuvel.eu/python-rsa-doc/usage.html#working-with-big-files
:param infile: file-like object to read the crypto in VARBLOCK format from
:param outfile: file-like object to write the cleartext to
:param priv_key: :py:class:`rsa.PrivateKey` to decrypt with
"""
warnings.warn("The 'rsa.bigfile.decrypt_bigfile' function was deprecated in Python-RSA version "
"3.4 due to security issues in the VARBLOCK format. See "
"https://stuvel.eu/python-rsa-doc/usage.html#working-with-big-files "
"for more information.",
DeprecationWarning, stacklevel=2)
if not isinstance(priv_key, key.PrivateKey):
raise TypeError('Private key required, but got %r' % priv_key)
for block in varblock.yield_varblocks(infile):
cleartext = pkcs1.decrypt(block, priv_key)
outfile.write(cleartext)
__all__ = ['encrypt_bigfile', 'decrypt_bigfile']
| apache-2.0 |
geekaia/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/persistent_factories.py | 6 | 2965 | from xmodule.course_module import CourseDescriptor
from xmodule.x_module import XModuleDescriptor
import factory
from factory.helpers import lazy_attribute
class SplitFactory(factory.Factory):
"""
Abstracted superclass which defines modulestore so that there's no dependency on django
if the caller passes modulestore in kwargs
"""
@lazy_attribute
def modulestore(self):
# Delayed import so that we only depend on django if the caller
# hasn't provided their own modulestore
from xmodule.modulestore.django import modulestore
return modulestore('split')
class PersistentCourseFactory(SplitFactory):
"""
Create a new course (not a new version of a course, but a whole new index entry).
keywords: any xblock field plus (note, the below are filtered out; so, if they
become legitimate xblock fields, they won't be settable via this factory)
* org: defaults to textX
* master_branch: (optional) defaults to 'draft'
* user_id: (optional) defaults to 'test_user'
* display_name (xblock field): will default to 'Robot Super Course' unless provided
"""
FACTORY_FOR = CourseDescriptor
# pylint: disable=W0613
@classmethod
def _create(cls, target_class, offering='999', org='testX', user_id='test_user',
master_branch='draft', **kwargs):
modulestore = kwargs.pop('modulestore')
root_block_id = kwargs.pop('root_block_id', 'course')
# Write the data to the mongo datastore
new_course = modulestore.create_course(
org, offering, user_id, fields=kwargs,
master_branch=master_branch, root_block_id=root_block_id
)
return new_course
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise NotImplementedError()
class ItemFactory(SplitFactory):
FACTORY_FOR = XModuleDescriptor
display_name = factory.LazyAttributeSequence(lambda o, n: "{} {}".format(o.category, n))
# pylint: disable=W0613
@classmethod
def _create(cls, target_class, parent_location, category='chapter',
user_id='test_user', block_id=None, definition_locator=None, force=False,
continue_version=False, **kwargs):
"""
passes *kwargs* as the new item's field values:
:param parent_location: (required) the location of the course & possibly parent
:param category: (defaults to 'chapter')
:param definition_locator (optional): the DescriptorLocator for the definition this uses or branches
"""
modulestore = kwargs.pop('modulestore')
return modulestore.create_item(
parent_location, category, user_id, definition_locator=definition_locator,
block_id=block_id, force=force, continue_version=continue_version, fields=kwargs
)
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise NotImplementedError()
| agpl-3.0 |
levigross/pyscanner | mytests/django/utils/decorators.py | 86 | 4708 | "Functions that help with dynamically creating decorators for views."
from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the view class.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined it.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func(self, *args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445.
"""
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception, e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
callback = lambda response: middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
| mit |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/contrib/boosted_trees/python/ops/gen_split_handler_ops.py | 2 | 22455 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: gen_split_handler_ops_py.cc
"""
import collections as _collections
from tensorflow.python.eager import execute as _execute
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
_build_categorical_equality_splits_outputs = ["output_partition_ids", "gains",
"split_infos"]
_BuildCategoricalEqualitySplitsOutput = _collections.namedtuple(
"BuildCategoricalEqualitySplits",
_build_categorical_equality_splits_outputs)
def build_categorical_equality_splits(num_minibatches, partition_ids, feature_ids, gradients, hessians, class_id, feature_column_group_id, bias_feature_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, multiclass_strategy, name=None):
r"""Find the split that has the best gain for the accumulated stats.
Args:
num_minibatches: A `Tensor` of type `int64`.
A scalar, the number of times per example gradients & hessians
were accumulated. The stats are divided by this to get per example stats.
partition_ids: A `Tensor` of type `int32`.
A rank 1 tensor of partition IDs.
feature_ids: A `Tensor` of type `int64`. A rank 1 tensor of feature IDs.
gradients: A `Tensor` of type `float32`. A rank 1 tensor of gradients.
hessians: A `Tensor` of type `float32`. A rank 1 tensor of hessians.
class_id: A `Tensor` of type `int32`.
feature_column_group_id: An `int`.
bias_feature_id: An `int`.
l1_regularization: A `float`.
l2_regularization: A `float`.
tree_complexity_regularization: A `float`.
min_node_weight: A `float`.
multiclass_strategy: An `int`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_partition_ids, gains, split_infos).
output_partition_ids: A `Tensor` of type `int32`. A rank 1 tensor, the partition IDs that we created splits
for.
gains: A `Tensor` of type `float32`. A rank 1 tensor, for the computed gain for the created splits.
split_infos: A `Tensor` of type `string`. A rank 1 tensor of serialized protos which contains the
`SplitInfo`s.
"""
feature_column_group_id = _execute.make_int(feature_column_group_id, "feature_column_group_id")
bias_feature_id = _execute.make_int(bias_feature_id, "bias_feature_id")
l1_regularization = _execute.make_float(l1_regularization, "l1_regularization")
l2_regularization = _execute.make_float(l2_regularization, "l2_regularization")
tree_complexity_regularization = _execute.make_float(tree_complexity_regularization, "tree_complexity_regularization")
min_node_weight = _execute.make_float(min_node_weight, "min_node_weight")
multiclass_strategy = _execute.make_int(multiclass_strategy, "multiclass_strategy")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"BuildCategoricalEqualitySplits", num_minibatches=num_minibatches,
partition_ids=partition_ids, feature_ids=feature_ids,
gradients=gradients, hessians=hessians, class_id=class_id,
feature_column_group_id=feature_column_group_id,
bias_feature_id=bias_feature_id, l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
multiclass_strategy=multiclass_strategy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("feature_column_group_id",
_op.get_attr("feature_column_group_id"), "bias_feature_id",
_op.get_attr("bias_feature_id"), "l1_regularization",
_op.get_attr("l1_regularization"), "l2_regularization",
_op.get_attr("l2_regularization"),
"tree_complexity_regularization",
_op.get_attr("tree_complexity_regularization"),
"min_node_weight", _op.get_attr("min_node_weight"),
"multiclass_strategy", _op.get_attr("multiclass_strategy"))
else:
num_minibatches = _ops.convert_to_tensor(num_minibatches, _dtypes.int64)
partition_ids = _ops.convert_to_tensor(partition_ids, _dtypes.int32)
feature_ids = _ops.convert_to_tensor(feature_ids, _dtypes.int64)
gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
class_id = _ops.convert_to_tensor(class_id, _dtypes.int32)
_inputs_flat = [num_minibatches, partition_ids, feature_ids, gradients, hessians, class_id]
_attrs = ("feature_column_group_id", feature_column_group_id,
"bias_feature_id", bias_feature_id, "l1_regularization",
l1_regularization, "l2_regularization", l2_regularization,
"tree_complexity_regularization",
tree_complexity_regularization, "min_node_weight",
min_node_weight, "multiclass_strategy", multiclass_strategy)
_result = _execute.execute(b"BuildCategoricalEqualitySplits", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BuildCategoricalEqualitySplits", _inputs_flat, _attrs, _result, name)
_result = _BuildCategoricalEqualitySplitsOutput._make(_result)
return _result
_ops.RegisterShape("BuildCategoricalEqualitySplits")(None)
_build_dense_inequality_splits_outputs = ["output_partition_ids", "gains",
"split_infos"]
_BuildDenseInequalitySplitsOutput = _collections.namedtuple(
"BuildDenseInequalitySplits", _build_dense_inequality_splits_outputs)
def build_dense_inequality_splits(num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id, feature_column_group_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, multiclass_strategy, name=None):
r"""Find the split that has the best gain for the accumulated stats.
Args:
num_minibatches: A `Tensor` of type `int64`.
A scalar, the number of times per example gradients & hessians
were accumulated. The stats are divided by this to get per example stats.
partition_ids: A `Tensor` of type `int32`.
A rank 1 tensor of partition IDs.
bucket_ids: A `Tensor` of type `int64`. A rank 1 tensor of buckets IDs.
gradients: A `Tensor` of type `float32`. A rank 1 tensor of gradients.
hessians: A `Tensor` of type `float32`. A rank 1 tensor of hessians.
bucket_boundaries: A `Tensor` of type `float32`.
A rank 1 tensor, thresholds that were used for bucketization.
class_id: A `Tensor` of type `int32`.
feature_column_group_id: An `int`.
l1_regularization: A `float`.
l2_regularization: A `float`.
tree_complexity_regularization: A `float`.
min_node_weight: A `float`.
multiclass_strategy: An `int`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_partition_ids, gains, split_infos).
output_partition_ids: A `Tensor` of type `int32`. A rank 1 tensor, the partition IDs that we created splits
for.
gains: A `Tensor` of type `float32`. A rank 1 tensor, for the computed gain for the created splits.
split_infos: A `Tensor` of type `string`. A rank 1 tensor of serialized protos which contains the
`SplitInfo`s.
"""
feature_column_group_id = _execute.make_int(feature_column_group_id, "feature_column_group_id")
l1_regularization = _execute.make_float(l1_regularization, "l1_regularization")
l2_regularization = _execute.make_float(l2_regularization, "l2_regularization")
tree_complexity_regularization = _execute.make_float(tree_complexity_regularization, "tree_complexity_regularization")
min_node_weight = _execute.make_float(min_node_weight, "min_node_weight")
multiclass_strategy = _execute.make_int(multiclass_strategy, "multiclass_strategy")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"BuildDenseInequalitySplits", num_minibatches=num_minibatches,
partition_ids=partition_ids, bucket_ids=bucket_ids,
gradients=gradients, hessians=hessians,
bucket_boundaries=bucket_boundaries, class_id=class_id,
feature_column_group_id=feature_column_group_id,
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
multiclass_strategy=multiclass_strategy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("feature_column_group_id",
_op.get_attr("feature_column_group_id"), "l1_regularization",
_op.get_attr("l1_regularization"), "l2_regularization",
_op.get_attr("l2_regularization"),
"tree_complexity_regularization",
_op.get_attr("tree_complexity_regularization"),
"min_node_weight", _op.get_attr("min_node_weight"),
"multiclass_strategy", _op.get_attr("multiclass_strategy"))
else:
num_minibatches = _ops.convert_to_tensor(num_minibatches, _dtypes.int64)
partition_ids = _ops.convert_to_tensor(partition_ids, _dtypes.int32)
bucket_ids = _ops.convert_to_tensor(bucket_ids, _dtypes.int64)
gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
bucket_boundaries = _ops.convert_to_tensor(bucket_boundaries, _dtypes.float32)
class_id = _ops.convert_to_tensor(class_id, _dtypes.int32)
_inputs_flat = [num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id]
_attrs = ("feature_column_group_id", feature_column_group_id,
"l1_regularization", l1_regularization, "l2_regularization",
l2_regularization, "tree_complexity_regularization",
tree_complexity_regularization, "min_node_weight",
min_node_weight, "multiclass_strategy", multiclass_strategy)
_result = _execute.execute(b"BuildDenseInequalitySplits", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BuildDenseInequalitySplits", _inputs_flat, _attrs, _result, name)
_result = _BuildDenseInequalitySplitsOutput._make(_result)
return _result
_ops.RegisterShape("BuildDenseInequalitySplits")(None)
_build_sparse_inequality_splits_outputs = ["output_partition_ids", "gains",
"split_infos"]
_BuildSparseInequalitySplitsOutput = _collections.namedtuple(
"BuildSparseInequalitySplits", _build_sparse_inequality_splits_outputs)
def build_sparse_inequality_splits(num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id, feature_column_group_id, bias_feature_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, multiclass_strategy, name=None):
r"""Find the split that has the best gain for the accumulated stats.
Args:
num_minibatches: A `Tensor` of type `int64`.
A scalar, the number of times per example gradients & hessians
were accumulated. The stats are divided by this to get per example stats.
partition_ids: A `Tensor` of type `int32`.
A rank 1 tensor of partition IDs.
bucket_ids: A `Tensor` of type `int64`. A rank 1 tensor of buckets IDs.
gradients: A `Tensor` of type `float32`. A rank 1 tensor of gradients.
hessians: A `Tensor` of type `float32`. A rank 1 tensor of hessians.
bucket_boundaries: A `Tensor` of type `float32`.
A rank 1 tensor, thresholds that were used for bucketization.
class_id: A `Tensor` of type `int32`.
feature_column_group_id: An `int`.
bias_feature_id: An `int`.
l1_regularization: A `float`.
l2_regularization: A `float`.
tree_complexity_regularization: A `float`.
min_node_weight: A `float`.
multiclass_strategy: An `int`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_partition_ids, gains, split_infos).
output_partition_ids: A `Tensor` of type `int32`. A rank 1 tensor, the partition IDs that we created splits
for.
gains: A `Tensor` of type `float32`. A rank 1 tensor, for the computed gain for the created splits.
split_infos: A `Tensor` of type `string`. A rank 1 tensor of serialized protos which contains the
`SplitInfo`s.
"""
feature_column_group_id = _execute.make_int(feature_column_group_id, "feature_column_group_id")
bias_feature_id = _execute.make_int(bias_feature_id, "bias_feature_id")
l1_regularization = _execute.make_float(l1_regularization, "l1_regularization")
l2_regularization = _execute.make_float(l2_regularization, "l2_regularization")
tree_complexity_regularization = _execute.make_float(tree_complexity_regularization, "tree_complexity_regularization")
min_node_weight = _execute.make_float(min_node_weight, "min_node_weight")
multiclass_strategy = _execute.make_int(multiclass_strategy, "multiclass_strategy")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"BuildSparseInequalitySplits", num_minibatches=num_minibatches,
partition_ids=partition_ids, bucket_ids=bucket_ids,
gradients=gradients, hessians=hessians,
bucket_boundaries=bucket_boundaries, class_id=class_id,
feature_column_group_id=feature_column_group_id,
bias_feature_id=bias_feature_id, l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
multiclass_strategy=multiclass_strategy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("feature_column_group_id",
_op.get_attr("feature_column_group_id"), "bias_feature_id",
_op.get_attr("bias_feature_id"), "l1_regularization",
_op.get_attr("l1_regularization"), "l2_regularization",
_op.get_attr("l2_regularization"),
"tree_complexity_regularization",
_op.get_attr("tree_complexity_regularization"),
"min_node_weight", _op.get_attr("min_node_weight"),
"multiclass_strategy", _op.get_attr("multiclass_strategy"))
else:
num_minibatches = _ops.convert_to_tensor(num_minibatches, _dtypes.int64)
partition_ids = _ops.convert_to_tensor(partition_ids, _dtypes.int32)
bucket_ids = _ops.convert_to_tensor(bucket_ids, _dtypes.int64)
gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
bucket_boundaries = _ops.convert_to_tensor(bucket_boundaries, _dtypes.float32)
class_id = _ops.convert_to_tensor(class_id, _dtypes.int32)
_inputs_flat = [num_minibatches, partition_ids, bucket_ids, gradients, hessians, bucket_boundaries, class_id]
_attrs = ("feature_column_group_id", feature_column_group_id,
"bias_feature_id", bias_feature_id, "l1_regularization",
l1_regularization, "l2_regularization", l2_regularization,
"tree_complexity_regularization",
tree_complexity_regularization, "min_node_weight",
min_node_weight, "multiclass_strategy", multiclass_strategy)
_result = _execute.execute(b"BuildSparseInequalitySplits", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BuildSparseInequalitySplits", _inputs_flat, _attrs, _result, name)
_result = _BuildSparseInequalitySplitsOutput._make(_result)
return _result
_ops.RegisterShape("BuildSparseInequalitySplits")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "BuildCategoricalEqualitySplits"
# input_arg {
# name: "num_minibatches"
# type: DT_INT64
# }
# input_arg {
# name: "partition_ids"
# type: DT_INT32
# }
# input_arg {
# name: "feature_ids"
# type: DT_INT64
# }
# input_arg {
# name: "gradients"
# type: DT_FLOAT
# }
# input_arg {
# name: "hessians"
# type: DT_FLOAT
# }
# input_arg {
# name: "class_id"
# type: DT_INT32
# }
# output_arg {
# name: "output_partition_ids"
# type: DT_INT32
# }
# output_arg {
# name: "gains"
# type: DT_FLOAT
# }
# output_arg {
# name: "split_infos"
# type: DT_STRING
# }
# attr {
# name: "feature_column_group_id"
# type: "int"
# }
# attr {
# name: "bias_feature_id"
# type: "int"
# }
# attr {
# name: "l1_regularization"
# type: "float"
# }
# attr {
# name: "l2_regularization"
# type: "float"
# }
# attr {
# name: "tree_complexity_regularization"
# type: "float"
# }
# attr {
# name: "min_node_weight"
# type: "float"
# }
# attr {
# name: "multiclass_strategy"
# type: "int"
# }
# }
# op {
# name: "BuildDenseInequalitySplits"
# input_arg {
# name: "num_minibatches"
# type: DT_INT64
# }
# input_arg {
# name: "partition_ids"
# type: DT_INT32
# }
# input_arg {
# name: "bucket_ids"
# type: DT_INT64
# }
# input_arg {
# name: "gradients"
# type: DT_FLOAT
# }
# input_arg {
# name: "hessians"
# type: DT_FLOAT
# }
# input_arg {
# name: "bucket_boundaries"
# type: DT_FLOAT
# }
# input_arg {
# name: "class_id"
# type: DT_INT32
# }
# output_arg {
# name: "output_partition_ids"
# type: DT_INT32
# }
# output_arg {
# name: "gains"
# type: DT_FLOAT
# }
# output_arg {
# name: "split_infos"
# type: DT_STRING
# }
# attr {
# name: "feature_column_group_id"
# type: "int"
# }
# attr {
# name: "l1_regularization"
# type: "float"
# }
# attr {
# name: "l2_regularization"
# type: "float"
# }
# attr {
# name: "tree_complexity_regularization"
# type: "float"
# }
# attr {
# name: "min_node_weight"
# type: "float"
# }
# attr {
# name: "multiclass_strategy"
# type: "int"
# }
# }
# op {
# name: "BuildSparseInequalitySplits"
# input_arg {
# name: "num_minibatches"
# type: DT_INT64
# }
# input_arg {
# name: "partition_ids"
# type: DT_INT32
# }
# input_arg {
# name: "bucket_ids"
# type: DT_INT64
# }
# input_arg {
# name: "gradients"
# type: DT_FLOAT
# }
# input_arg {
# name: "hessians"
# type: DT_FLOAT
# }
# input_arg {
# name: "bucket_boundaries"
# type: DT_FLOAT
# }
# input_arg {
# name: "class_id"
# type: DT_INT32
# }
# output_arg {
# name: "output_partition_ids"
# type: DT_INT32
# }
# output_arg {
# name: "gains"
# type: DT_FLOAT
# }
# output_arg {
# name: "split_infos"
# type: DT_STRING
# }
# attr {
# name: "feature_column_group_id"
# type: "int"
# }
# attr {
# name: "bias_feature_id"
# type: "int"
# }
# attr {
# name: "l1_regularization"
# type: "float"
# }
# attr {
# name: "l2_regularization"
# type: "float"
# }
# attr {
# name: "tree_complexity_regularization"
# type: "float"
# }
# attr {
# name: "min_node_weight"
# type: "float"
# }
# attr {
# name: "multiclass_strategy"
# type: "int"
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\211\003\n\036BuildCategoricalEqualitySplits\022\023\n\017num_minibatches\030\t\022\021\n\rpartition_ids\030\003\022\017\n\013feature_ids\030\t\022\r\n\tgradients\030\001\022\014\n\010hessians\030\001\022\014\n\010class_id\030\003\032\030\n\024output_partition_ids\030\003\032\t\n\005gains\030\001\032\017\n\013split_infos\030\007\"\036\n\027feature_column_group_id\022\003int\"\026\n\017bias_feature_id\022\003int\"\032\n\021l1_regularization\022\005float\"\032\n\021l2_regularization\022\005float\"\'\n\036tree_complexity_regularization\022\005float\"\030\n\017min_node_weight\022\005float\"\032\n\023multiclass_strategy\022\003int\n\203\003\n\032BuildDenseInequalitySplits\022\023\n\017num_minibatches\030\t\022\021\n\rpartition_ids\030\003\022\016\n\nbucket_ids\030\t\022\r\n\tgradients\030\001\022\014\n\010hessians\030\001\022\025\n\021bucket_boundaries\030\001\022\014\n\010class_id\030\003\032\030\n\024output_partition_ids\030\003\032\t\n\005gains\030\001\032\017\n\013split_infos\030\007\"\036\n\027feature_column_group_id\022\003int\"\032\n\021l1_regularization\022\005float\"\032\n\021l2_regularization\022\005float\"\'\n\036tree_complexity_regularization\022\005float\"\030\n\017min_node_weight\022\005float\"\032\n\023multiclass_strategy\022\003int\n\234\003\n\033BuildSparseInequalitySplits\022\023\n\017num_minibatches\030\t\022\021\n\rpartition_ids\030\003\022\016\n\nbucket_ids\030\t\022\r\n\tgradients\030\001\022\014\n\010hessians\030\001\022\025\n\021bucket_boundaries\030\001\022\014\n\010class_id\030\003\032\030\n\024output_partition_ids\030\003\032\t\n\005gains\030\001\032\017\n\013split_infos\030\007\"\036\n\027feature_column_group_id\022\003int\"\026\n\017bias_feature_id\022\003int\"\032\n\021l1_regularization\022\005float\"\032\n\021l2_regularization\022\005float\"\'\n\036tree_complexity_regularization\022\005float\"\030\n\017min_node_weight\022\005float\"\032\n\023multiclass_strategy\022\003int")
| mit |
mikeycattell/benefit-housing-rebuild | node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
libscie/liberator | liberator/lib/python3.6/site-packages/django/conf/locale/ml/formats.py | 1007 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| cc0-1.0 |
cherez/youtube-dl | youtube_dl/extractor/ccc.py | 107 | 3890 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
unified_strdate,
)
class CCCIE(InfoExtractor):
IE_NAME = 'media.ccc.de'
_VALID_URL = r'https?://(?:www\.)?media\.ccc\.de/[^?#]+/[^?#/]*?_(?P<id>[0-9]{8,})._[^?#/]*\.html'
_TEST = {
'url': 'http://media.ccc.de/browse/congress/2013/30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor.html#video',
'md5': '3a1eda8f3a29515d27f5adb967d7e740',
'info_dict': {
'id': '20131228183',
'ext': 'mp4',
'title': 'Introduction to Processor Design',
'description': 'md5:5ddbf8c734800267f2cee4eab187bc1b',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
'upload_date': '20131229',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if self._downloader.params.get('prefer_free_formats'):
preference = qualities(['mp3', 'opus', 'mp4-lq', 'webm-lq', 'h264-sd', 'mp4-sd', 'webm-sd', 'mp4', 'webm', 'mp4-hd', 'h264-hd', 'webm-hd'])
else:
preference = qualities(['opus', 'mp3', 'webm-lq', 'mp4-lq', 'webm-sd', 'h264-sd', 'mp4-sd', 'webm', 'mp4', 'webm-hd', 'mp4-hd', 'h264-hd'])
title = self._html_search_regex(
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r"(?s)<p class='description'>(.*?)</p>",
webpage, 'description', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r"(?s)<span class='[^']*fa-calendar-o'></span>(.*?)</li>",
webpage, 'upload date', fatal=False))
view_count = int_or_none(self._html_search_regex(
r"(?s)<span class='[^']*fa-eye'></span>(.*?)</li>",
webpage, 'view count', fatal=False))
matches = re.finditer(r'''(?xs)
<(?:span|div)\s+class='label\s+filetype'>(?P<format>.*?)</(?:span|div)>\s*
<a\s+download\s+href='(?P<http_url>[^']+)'>\s*
(?:
.*?
<a\s+href='(?P<torrent_url>[^']+\.torrent)'
)?''', webpage)
formats = []
for m in matches:
format = m.group('format')
format_id = self._search_regex(
r'.*/([a-z0-9_-]+)/[^/]*$',
m.group('http_url'), 'format id', default=None)
vcodec = 'h264' if 'h264' in format_id else (
'none' if format_id in ('mp3', 'opus') else None
)
formats.append({
'format_id': format_id,
'format': format,
'url': m.group('http_url'),
'vcodec': vcodec,
'preference': preference(format_id),
})
if m.group('torrent_url'):
formats.append({
'format_id': 'torrent-%s' % (format if format_id is None else format_id),
'format': '%s (torrent)' % format,
'proto': 'torrent',
'format_note': '(unsupported; will just download the .torrent file)',
'vcodec': vcodec,
'preference': -100 + preference(format_id),
'url': m.group('torrent_url'),
})
self._sort_formats(formats)
thumbnail = self._html_search_regex(
r"<video.*?poster='([^']+)'", webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'view_count': view_count,
'upload_date': upload_date,
'formats': formats,
}
| unlicense |
Hiyorimi/scikit-image | doc/gh-pages.py | 42 | 4518 | #!/usr/bin/env python
"""Script to commit the doc build outputs into the github-pages repo.
Use:
gh-pages.py [tag]
If no tag is given, the current output of 'git describe' is used. If given,
that is how the resulting directory will be named.
In practice, you should use either actual clean tags from a current build or
something like 'current' as a stable URL for the mest current version of the """
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import shutil
import sys
from os import chdir as cd
from subprocess import Popen, PIPE, CalledProcessError, check_call
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
pages_dir = 'gh-pages'
html_dir = 'build/html'
pdf_dir = 'build/latex'
pages_repo = 'https://github.com/scikit-image/docs.git'
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def sh(cmd):
"""Execute command in a subshell, return status code."""
return check_call(cmd, shell=True)
def sh2(cmd):
"""Execute command in a subshell, return stdout.
Stderr is unbuffered from the subshell.x"""
p = Popen(cmd, stdout=PIPE, shell=True)
out = p.communicate()[0]
retcode = p.returncode
if retcode:
print(out.rstrip())
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip()
def sh3(cmd):
"""Execute command in a subshell, return stdout, stderr
If anything appears in stderr, print it out to sys.stderr"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
out, err = p.communicate()
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip(), err.rstrip()
def init_repo(path):
"""clone the gh-pages repo if we haven't already."""
sh("git clone %s %s"%(pages_repo, path))
here = os.getcwd()
cd(path)
sh('git checkout gh-pages')
cd(here)
#-----------------------------------------------------------------------------
# Script starts
#-----------------------------------------------------------------------------
if __name__ == '__main__':
# find the version number from skimage/__init__.py
setup_lines = open('../skimage/__init__.py').readlines()
tag = 'vUndefined'
for l in setup_lines:
if l.startswith('__version__'):
tag = l.split("'")[1]
if "dev" in tag:
tag = "dev"
else:
# Rename e.g. 0.9.0 to 0.9.x
tag = '.'.join(tag.split('.')[:-1] + ['x'])
break
startdir = os.getcwd()
if not os.path.exists(pages_dir):
# init the repo
init_repo(pages_dir)
else:
# ensure up-to-date before operating
cd(pages_dir)
sh('git checkout gh-pages')
sh('git pull')
cd(startdir)
dest = os.path.join(pages_dir, tag)
# This is pretty unforgiving: we unconditionally nuke the destination
# directory, and then copy the html tree in there
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree(html_dir, dest)
# copy pdf file into tree
#shutil.copy(pjoin(pdf_dir, 'scikits.image.pdf'), pjoin(dest, 'scikits.image.pdf'))
try:
cd(pages_dir)
status = sh2('git status | head -1')
branch = re.match(b'On branch (.*)$', status).group(1)
if branch != b'gh-pages':
e = 'On %r, git branch is %r, MUST be "gh-pages"' % (pages_dir,
branch)
raise RuntimeError(e)
sh("touch .nojekyll")
sh('git add .nojekyll')
sh('git add index.html')
sh('git add --all %s' % tag)
status = sh2('git status | tail -1')
if not re.match(b'nothing to commit', status):
sh2('git commit -m"Updated doc release: %s"' % tag)
else:
print('\n! Note: no changes to commit\n')
print('Most recent commit:')
sys.stdout.flush()
sh('git --no-pager log --oneline HEAD~1..')
finally:
cd(startdir)
print('')
print('Now verify the build in: %r' % dest)
print("If everything looks good, run 'git push' inside doc/gh-pages.")
| bsd-3-clause |
daeseokyoun/youtube-dl | youtube_dl/extractor/dhm.py | 110 | 2089 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_duration
class DHMIE(InfoExtractor):
IE_DESC = 'Filmarchiv - Deutsches Historisches Museum'
_VALID_URL = r'https?://(?:www\.)?dhm\.de/filmarchiv/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.dhm.de/filmarchiv/die-filme/the-marshallplan-at-work-in-west-germany/',
'md5': '11c475f670209bf6acca0b2b7ef51827',
'info_dict': {
'id': 'the-marshallplan-at-work-in-west-germany',
'ext': 'flv',
'title': 'MARSHALL PLAN AT WORK IN WESTERN GERMANY, THE',
'description': 'md5:1fabd480c153f97b07add61c44407c82',
'duration': 660,
'thumbnail': 're:^https?://.*\.jpg$',
},
}, {
'url': 'http://www.dhm.de/filmarchiv/02-mapping-the-wall/peter-g/rolle-1/',
'md5': '09890226332476a3e3f6f2cb74734aa5',
'info_dict': {
'id': 'rolle-1',
'ext': 'flv',
'title': 'ROLLE 1',
'thumbnail': 're:^https?://.*\.jpg$',
},
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist_url = self._search_regex(
r"file\s*:\s*'([^']+)'", webpage, 'playlist url')
entries = self._extract_xspf_playlist(playlist_url, playlist_id)
title = self._search_regex(
[r'dc:title="([^"]+)"', r'<title> »([^<]+)</title>'],
webpage, 'title').strip()
description = self._html_search_regex(
r'<p><strong>Description:</strong>(.+?)</p>',
webpage, 'description', default=None)
duration = parse_duration(self._search_regex(
r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)',
webpage, 'duration', default=None))
entries[0].update({
'title': title,
'description': description,
'duration': duration,
})
return self.playlist_result(entries, playlist_id)
| unlicense |
sole/three.js | utils/exporters/blender/addons/io_three/exporter/scene.py | 24 | 8788 | import os
from .. import constants, logger
from . import (
base_classes,
texture,
material,
geometry,
object as object_,
utilities,
io,
api
)
from bpy import context
class Scene(base_classes.BaseScene):
"""Class that handles the contruction of a Three scene"""
def __init__(self, filepath, options=None):
logger.debug("Scene().__init__(%s, %s)", filepath, options)
self._defaults = {
constants.METADATA: constants.DEFAULT_METADATA.copy(),
constants.GEOMETRIES: [],
constants.MATERIALS: [],
constants.IMAGES: [],
constants.TEXTURES: [],
constants.ANIMATION: []
}
base_classes.BaseScene.__init__(self, filepath, options or {})
source_file = api.scene_name()
if source_file:
self[constants.METADATA][constants.SOURCE_FILE] = source_file
self.__init_animation()
def __init_animation(self):
self[constants.ANIMATION].append({
constants.NAME: "default",
constants.FPS : context.scene.render.fps,
constants.KEYFRAMES: []
});
pass
@property
def valid_types(self):
"""
:return: list of valid node types
"""
valid_types = [api.constants.MESH]
if self.options.get(constants.HIERARCHY, False):
valid_types.append(api.constants.EMPTY)
if self.options.get(constants.CAMERAS):
logger.info("Adding cameras to valid object types")
valid_types.append(api.constants.CAMERA)
if self.options.get(constants.LIGHTS):
logger.info("Adding lights to valid object types")
valid_types.append(api.constants.LAMP)
return valid_types
def geometry(self, value):
"""Find a geometry node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().geometry(%s)", value)
return _find_node(value, self[constants.GEOMETRIES])
def image(self, value):
"""Find a image node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().image%s)", value)
return _find_node(value, self[constants.IMAGES])
def material(self, value):
"""Find a material node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().material(%s)", value)
return _find_node(value, self[constants.MATERIALS])
def parse(self):
"""Execute the parsing of the scene"""
logger.debug("Scene().parse()")
if self.options.get(constants.MAPS):
self._parse_textures()
if self.options.get(constants.MATERIALS):
self._parse_materials()
self._parse_geometries()
self._parse_objects()
def texture(self, value):
"""Find a texture node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().texture(%s)", value)
return _find_node(value, self[constants.TEXTURES])
def write(self):
"""Write the parsed scene to disk."""
logger.debug("Scene().write()")
data = {}
embed_anim = self.options.get(constants.EMBED_ANIMATION, True)
embed = self.options.get(constants.EMBED_GEOMETRY, True)
compression = self.options.get(constants.COMPRESSION)
extension = constants.EXTENSIONS.get(
compression,
constants.EXTENSIONS[constants.JSON])
export_dir = os.path.dirname(self.filepath)
for key, value in self.items():
if key == constants.GEOMETRIES:
geometries = []
for geom in value:
if not embed_anim:
geom.write_animation(export_dir)
geom_data = geom.copy()
if embed:
geometries.append(geom_data)
continue
geo_type = geom_data[constants.TYPE].lower()
if geo_type == constants.GEOMETRY.lower():
geom_data.pop(constants.DATA)
elif geo_type == constants.BUFFER_GEOMETRY.lower():
geom_data.pop(constants.ATTRIBUTES)
geom_data.pop(constants.METADATA)
url = 'geometry.%s%s' % (geom.node, extension)
geometry_file = os.path.join(export_dir, url)
geom.write(filepath=geometry_file)
geom_data[constants.URL] = os.path.basename(url)
geometries.append(geom_data)
data[key] = geometries
elif isinstance(value, list):
data[key] = []
for each in value:
data[key].append(each.copy())
elif isinstance(value, dict):
data[key] = value.copy()
io.dump(self.filepath, data, options=self.options)
if self.options.get(constants.EXPORT_TEXTURES) and not self.options.get(constants.EMBED_TEXTURES):
texture_folder = self.options.get(constants.TEXTURE_FOLDER)
for geo in self[constants.GEOMETRIES]:
logger.info("Copying textures from %s", geo.node)
geo.copy_textures(texture_folder)
def _parse_geometries(self):
"""Locate all geometry nodes and parse them"""
logger.debug("Scene()._parse_geometries()")
# this is an important step. please refer to the doc string
# on the function for more information
api.object.prep_meshes(self.options)
geometries = []
# now iterate over all the extracted mesh nodes and parse each one
for mesh in api.object.extracted_meshes():
logger.info("Parsing geometry %s", mesh)
geo = geometry.Geometry(mesh, self)
geo.parse()
geometries.append(geo)
logger.info("Added %d geometry nodes", len(geometries))
self[constants.GEOMETRIES] = geometries
def _parse_materials(self):
"""Locate all non-orphaned materials and parse them"""
logger.debug("Scene()._parse_materials()")
materials = []
for material_name in api.material.used_materials():
logger.info("Parsing material %s", material_name)
materials.append(material.Material(material_name, parent=self))
logger.info("Added %d material nodes", len(materials))
self[constants.MATERIALS] = materials
def _parse_objects(self):
"""Locate all valid objects in the scene and parse them"""
logger.debug("Scene()._parse_objects()")
try:
scene_name = self[constants.METADATA][constants.SOURCE_FILE]
except KeyError:
scene_name = constants.SCENE
self[constants.OBJECT] = object_.Object(None, parent=self)
self[constants.OBJECT][constants.TYPE] = constants.SCENE.title()
self[constants.UUID] = utilities.id_from_name(scene_name)
objects = []
if self.options.get(constants.HIERARCHY, False):
nodes = api.object.assemblies(self.valid_types, self.options)
else:
nodes = api.object.nodes(self.valid_types, self.options)
for node in nodes:
logger.info("Parsing object %s", node)
obj = object_.Object(node, parent=self[constants.OBJECT])
objects.append(obj)
logger.info("Added %d object nodes", len(objects))
self[constants.OBJECT][constants.CHILDREN] = objects
def _parse_textures(self):
"""Locate all non-orphaned textures and parse them"""
logger.debug("Scene()._parse_textures()")
textures = []
for texture_name in api.texture.textures():
logger.info("Parsing texture %s", texture_name)
tex_inst = texture.Texture(texture_name, self)
textures.append(tex_inst)
logger.info("Added %d texture nodes", len(textures))
self[constants.TEXTURES] = textures
def _find_node(value, manifest):
"""Find a node that matches either a name
or uuid value.
:param value: name or uuid
:param manifest: manifest of nodes to search
:type value: str
:type manifest: list
"""
for index in manifest:
uuid = index.get(constants.UUID) == value
name = index.node == value
if uuid or name:
return index
else:
logger.debug("No matching node for %s", value)
| mit |
sunny-wyb/xen-4.1.2 | dist/install/usr/lib/python2.7/site-packages/xen/xend/XendTaskManager.py | 47 | 3074 | #===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2007 XenSource Ltd
#============================================================================
"""
Task Manager for Xen API asynchronous tasks.
Stores all tasks in a simple dictionary in module's own local storage to
avoid the 'instance()' methods.
Tasks are indexed by UUID.
"""
from xen.xend.XendTask import XendTask
from xen.xend import uuid
import threading
tasks = {}
tasks_lock = threading.Lock()
def create_task(func, args, func_name, return_type, label, session):
"""Creates a new Task and registers it with the XendTaskManager.
@param func: callable object XMLRPC method
@type func: callable object
@param args: tuple or list of arguments
@type args: tuple or list
@param func_name: XMLRPC method name, so we can estimate the progress
@type func_name: string
@return: Task UUID
@rtype: string.
"""
task_uuid = uuid.createString()
try:
tasks_lock.acquire()
task = XendTask(task_uuid, func, args, func_name, return_type, label,
'', session)
tasks[task_uuid] = task
finally:
tasks_lock.release()
task.start()
return task_uuid
def destroy_task(task_uuid):
"""Destroys a task.
@param task_uuid: Task UUID
@type task_uuid: string.
"""
try:
tasks_lock.acquire()
if task_uuid in tasks:
del tasks[task_uuid]
finally:
tasks_lock.release()
def get_all_tasks():
""" Returns all the UUID of tracked tasks, completed or pending.
@returns: list of UUIDs
@rtype: list of strings
"""
try:
tasks_lock.acquire()
return tasks.keys()
finally:
tasks_lock.release()
def get_task(task_uuid):
""" Retrieves a task by UUID.
@rtype: XendTask or None
@return: Task denoted by UUID.
"""
try:
tasks_lock.acquire()
return tasks.get(task_uuid)
finally:
tasks_lock.release()
def get_tasks_by_name(task_name):
""" Retrieves a task by UUID.
@rtype: XendTask or None
@return: Task denoted by UUID.
"""
try:
tasks_lock.acquire()
return [t.uuid for t in tasks if t.name_label == name]
finally:
tasks_lock.release()
| gpl-2.0 |
lujinda/gale | gale/ipc.py | 1 | 6057 | #!/usr/bin/env python
#coding:utf-8
# Author : tuxpy
# Email : q8886888@qq.com.com
# Last modified : 2015-09-15 08:31:23
# Filename : ipc.py
# Description :
from __future__ import print_function
from gale.utils import set_close_exec, ObjectDict, single_pattern
from gale.e import IPCError
from gevent import socket
import gevent
import json
from multiprocessing import Process
import os
import tempfile
import time
import struct
from functools import partial
import signal
__all__ = ['IPCServer','IPCDict']
ALL_OPERA = {'set': False, 'get': True, 'items': True, 'has_key': True,
'pop': True, 'popitem': True, 'update': False, 'values': True,
'setdefault': True, 'keys': True, 'clear': False, 'del': False,
'incr': True}
class FRAME():
DATA_LENGTH = 8 # 数据的位数
def genearte_sock_path(is_sub = False):
return '/tmp/gale.sock'
class _Memory(dict):
def _set(self, name, value):
self[name] = value
def _del(self, name):
try:
del self[name]
except KeyError:
pass
def _incr(self, name, increment = 1):
if not isinstance(increment, int):
raise TypeError('increment type must be int')
current = self.setdefault(name, 0)
self[name] = current + increment
return self[name]
@single_pattern
class IPCMemory(object):
_memory_block = {}
def __getitem__(self, name):
return self._memory_block.setdefault(name, _Memory())
@single_pattern
class IPCServer(object):
def __init__(self):
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(self._socket.fileno())
sock_path = genearte_sock_path()
try:
os.remove(sock_path)
except OSError as e:
if e.errno != 2: # 2表示no such file or direrctory
raise e
self._socket.bind(genearte_sock_path())
self._socket.listen(50)
def serve_forever(self):
Process(target = self._serve_forever).start()
def _serve_forever(self):
signal.signal(signal.SIGINT, self.close)
while not self._socket.closed:
try:
conn, addr = self._socket.accept()
except socket.error:
self.close()
break
connection = IPCConnection(conn)
gevent.spawn(connection.start_work)
def close(self, *args):
if self._socket.closed:
return
os.remove(self._socket.getsockname())
self._socket.close()
class Connection(object):
def __init__(self, _socket):
self._socket = _socket
def recv_parse_frame(self):
frame = ObjectDict()
self.on_frame_header(frame)
if not frame:
self.close()
return
self.on_frame_data(frame)
return frame
def close(self):
self._socket.close()
def on_frame_header(self, frame):
_header = ObjectDict()
header = self._socket.recv(FRAME.DATA_LENGTH) # 先获取ipc数据库
if not header:
return
data_length = struct.unpack(b'!Q', header)[0]
_header.data_length = data_length
frame.update(_header)
def on_frame_data(self, frame):
data_length = frame.data_length
data = self._recv_data(data_length)
frame.data = data
def _recv_data(self, data_length):
assert data_length > 0
chunk = []
while True:
_data = self._socket.recv(data_length)
_len = len(_data)
assert _len <= data_length
chunk.append(_data)
if len(_data) == data_length:
break
data_length -= _len
return b''.join(chunk)
def _send_data(self, data):
"""这里的data是已经处理好了的"""
frame = b''
data_length = len(data)
frame += struct.pack(b'!Q', data_length)
frame += data
self._socket.sendall(frame)
class IPCConnection(Connection):
def start_work(self):
while True:
frame = self.recv_parse_frame()
if not frame:
break
self.exec_ipc(json.loads(frame.data))
def exec_ipc(self, data):
ipc_memory = IPCMemory()
name, command, args, kwargs = data
need_return = ALL_OPERA[command]
if command not in dir({}):
command = '_' + command
exec_func = getattr(ipc_memory[name], command)
result = exec_func(*args, **kwargs)
if not need_return: # 如果不需要返回值,则就此结束了
return
self._send_data(json.dumps(result))
class IPCClient(Connection):
def __init__(self, name):
_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock_path = genearte_sock_path(True)
_socket.connect(sock_path)
self.name = name
Connection.__init__(self, _socket)
def _exec_ipc(self, command, *args, **kwargs):
need_return = ALL_OPERA[command]
frame_data = self.generate_frame_data(command, *args, **kwargs)
self._send_data(frame_data)
if not need_return:
return
return self.__get_ipc_return()
def __get_ipc_return(self):
frame = self.recv_parse_frame()
if not frame:
return
data = frame.data
return json.loads(data)
def generate_frame_data(self, command, *args, **kwargs):
return json.dumps([self.name, command, args, kwargs])
class IPCDict(IPCClient):
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
if name not in ALL_OPERA:
name = 'get'
return partial(self._exec_ipc, name)
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
self.set(name, value)
def __delitem__(self, name):
return self._exec_ipc('del', name)
| mit |
samboiki/smap-data | python/smap/ssl.py | 6 | 3923 | """
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Configure client and server SSL contexts for use in various servers
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
import os
from twisted.python import log
from smap.util import to_bool
try:
from twisted.internet.ssl import ClientContextFactory
from twisted.internet.ssl import DefaultOpenSSLContextFactory
from OpenSSL import SSL
except ImportError:
pass
else:
def defaultVerifyCallback(connection, x509, errnum, errdepth, okay):
if not okay:
log.err("Invalid cert from subject: " + str(x509.get_subject()))
return False
return True
class SslServerContextFactory(DefaultOpenSSLContextFactory):
"""A server context factory for validating client connections"""
def __init__(self, opts, verifyCallback=defaultVerifyCallback):
if not 'key' in opts or not 'cert' in opts:
raise ValueError("Cannot create ssl context without key and certificate files")
DefaultOpenSSLContextFactory.__init__(self,
os.path.expanduser(opts["key"]),
os.path.expanduser(opts["cert"]))
ctx = self.getContext()
if 'verify' in opts and to_bool(opts['verify']):
ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
verifyCallback)
if 'ca' in opts:
ctx.load_verify_locations(os.path.expanduser(opts["ca"]))
class SslClientContextFactory(ClientContextFactory):
"""Make a client context factory for delivering data.
"""
def __init__(self, opts, verifyCallback=defaultVerifyCallback):
self.ssl_opts = opts
self.verifyCallback = verifyCallback
def getContext(self, hostname, port):
self.method = SSL.SSLv23_METHOD
ctx = ClientContextFactory.getContext(self)
if 'cert' in self.ssl_opts and 'key' in self.ssl_opts:
ctx.use_certificate_file(os.path.expanduser(self.ssl_opts['cert']))
ctx.use_privatekey_file(os.path.expanduser(self.ssl_opts['key']))
if 'verify' in self.ssl_opts and to_bool(self.ssl_opts['verify']):
ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
self.verifyCallback)
if 'ca' in self.ssl_opts:
ctx.load_verify_locations(os.path.expanduser(self.ssl_opts['CAFile']))
return ctx
| bsd-2-clause |
dneg/cortex | test/IECoreHoudini/procedurals/deformationBlur/deformationBlur-1.py | 12 | 4815 | #=====
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#=====
#
# Deformation Blur
#
# This cookbook example injects two different geometries at different time
# two time samples creating deformation blur.
#
# Motion blur is defined in a very similar fashion to RenderMan. First we call
# motionBegin() with a list of time samples. You can have as many motion samples
# as your renderer will allow. For each time sample we then inject some
# geometry. It is important that the topology remain consistent between time
# samples, otherwise the renderer will complain. We finish by calling
# motionEnd(). Remember not to inject anything but geometry samples between
# motionBegin() and motionEnd().
#
# Don't forget to turn on motion blur in your renderer
#
# In OpenGL all samples will be rendered simultaneously. Refer to the
# RenderSwitch example for code that can differentiate based on which renderer
# is currently rendering.
#
# In general the code will look like:
#
# renderer.motionBegin( [ sample1, sample2, ... ] )
# sample1_geometry.render( renderer )
# sample2_geometry.render( renderer )
# ...
# renderer.motionEnd()
#
#=====
from IECore import *
class deformationBlur(ParameterisedProcedural):
#=====
# Init
def __init__(self) :
ParameterisedProcedural.__init__( self, "DeformationBlur procedural." )
geo1 = PathParameter( name="geo1", description="Geometry #1",
defaultValue="test_data/deform1.cob" )
geo2 = PathParameter( name="geo2", description="Geometry #2",
defaultValue="test_data/deform2.cob" )
self.parameters().addParameters( [geo1, geo2] )
#=====
# It's important that the bounding box extend to contain both geometry
# samples.
def doBound(self, args) :
bbox = Box3f()
geo1 = Reader.create( args['geo1'].value ).read()
geo2 = Reader.create( args['geo2'].value ).read()
bbox.extendBy( geo1.bound() )
bbox.extendBy( geo2.bound() )
return bbox
#=====
# Nothing to do
def doRenderState(self, renderer, args) :
pass
#=====
# Render our two motion samples
def doRender(self, renderer, args):
# load our geometry
geo1 = Reader.create( args['geo1'].value ).read()
geo2 = Reader.create( args['geo2'].value ).read()
# get the shutter open/close values from the renderer
shutter = renderer.getOption('shutter').value # this is a V2f
# if motion blur is not enabled then both shutter open & close will
# be zero.
do_moblur = ( shutter.length() > 0 )
# inject the motion samples
renderer.motionBegin( [ shutter[0], shutter[1] ] )
geo1.render( renderer )
geo2.render( renderer )
renderer.motionEnd()
#=====
# Register our procedural
registerRunTimeTyped( deformationBlur )
| bsd-3-clause |
andyzsf/django | tests/view_tests/tests/test_specials.py | 66 | 1428 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, override_settings
@override_settings(ROOT_URLCONF='view_tests.generic_urls')
class URLHandling(TestCase):
"""
Tests for URL handling in views and responses.
"""
redirect_target = "/%E4%B8%AD%E6%96%87/target/"
def test_combining_redirect(self):
"""
Tests that redirecting to an IRI, requiring encoding before we use it
in an HTTP response, is handled correctly. In this case the arg to
HttpRedirect is ASCII but the current request path contains non-ASCII
characters so this test ensures the creation of the full path with a
base non-ASCII part is handled correctly.
"""
response = self.client.get('/中文/')
self.assertRedirects(response, self.redirect_target)
def test_nonascii_redirect(self):
"""
Tests that a non-ASCII argument to HttpRedirect is handled properly.
"""
response = self.client.get('/nonascii_redirect/')
self.assertRedirects(response, self.redirect_target)
def test_permanent_nonascii_redirect(self):
"""
Tests that a non-ASCII argument to HttpPermanentRedirect is handled
properly.
"""
response = self.client.get('/permanent_nonascii_redirect/')
self.assertRedirects(response, self.redirect_target, status_code=301)
| bsd-3-clause |
playm2mboy/edx-platform | lms/djangoapps/instructor_task/tasks_helper.py | 26 | 65045 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
import re
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import transaction, reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from track.views import task_track
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xblock.runtime import KvsFieldData
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import (
enrolled_students_features,
get_proctored_exam_results,
list_may_enroll,
list_problem_responses
)
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from verify_student.models import SoftwareSecurePhotoVerification
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
student_data=student_data,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@transaction.autocommit
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@transaction.autocommit
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@transaction.autocommit
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
config={
'content_type': 'text/html',
'content_encoding': None,
}
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = dict()
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == 'sequential':
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the sequential block
while blocks[current]['block_type'] != 'sequential':
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
all student answers to a given problem, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating students answers to problem'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
problem_location = task_input.get('problem_location')
student_data = list_problem_responses(course_id, problem_location)
features = ['username', 'state']
header, rows = format_dictlist(student_data, features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
problem_location = re.sub(r'[:/]', '_', problem_location)
csv_name = 'student_state_from_{}'.format(problem_location)
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input.get('features')
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_proctored_exam_results_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a CSV file containing
information about proctored exam results, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about proctored exam results in a course'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = _task_input.get('features')
student_data = get_proctored_exam_results(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'proctored_exam_results_report', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # pylint: disable=unused-argument
"""
For a given `course_id`, generate certificates for all students
that are enrolled.
"""
start_time = time()
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
students_require_certs = students_require_certificate(course_id, enrolled_students)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
with transaction.commit_on_success():
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students):
""" Returns list of students where certificates needs to be generated.
Removing those students who have their certificate already generated
from total enrolled students for given course.
:param course_id:
:param enrolled_students:
"""
# compute those students where certificates already generated
students_already_have_certs = User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id)
return list(set(enrolled_students) - set(students_already_have_certs))
| agpl-3.0 |
greenpau/PyIdGen | pyidgen/useraccount.py | 1 | 5391 | # PyIdGen - User Profile Generation Library for Quality Assurance and Information Security Testing
# Copyright (C) 2013 Paul Greenberg <paul@greenberg.pro>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import string;
import hashlib;
class UserAccount:
'Represents a computer user account'
def __init__(self, uid_fname="John", uid_lname="Doe", uid_format="LLLLLFFN", pwd_length=7, pwd_complexity="High", email_format="F.L", uid=None, pwd=None):
'''Initialize a person based on a provided criteria.'''
self.FirstName = uid_fname;
self.LastName = uid_lname;
self.UIDFormat = uid_format;
self.EmailFormat = email_format;
self.PWDLength = pwd_length;
self.PWDStrength = pwd_complexity;
if uid is not None:
self.uid = uid;
else:
self._get_uid();
self._get_password();
self._get_email();
def _get_uid(self):
'''Generate username and e-mail address'''
name = list(self.FirstName);
family = list(self.LastName);
x = 0; y = 0;
uid_chars = [];
for i in list(self.UIDFormat):
if (i == 'L'):
try:
family[x];
uid_chars.append(family[x]);
x += 1;
continue;
except IndexError:
pass;
try:
name[y]
uid_chars.append(name[y]);
y += 1;
continue;
except IndexError:
pass;
uid_chars.append(str(random.randint(0, 9)));
if (i == 'F'):
try:
name[y]
uid_chars.append(name[y]);
y += 1;
continue;
except IndexError:
pass
try:
family[x];
uid_chars.append(family[x]);
x += 1;
continue;
except IndexError:
pass;
uid_chars.append(str(random.randint(0, 9)));
if (i == 'N'):
uid_chars.append(str(random.randint(0, 9)));
self.uid = ''.join(uid_chars).lower();
return;
def _get_password(self):
'''Generate a password. Complexity Levels are:
High = uppercase, lower case, digits and symbols
Medium = uppercase, lower case, digits and equal to or greater than 8 chars long
Low = uppercase, lower case, digits and less than 8 chars long
'''
chars_up = string.ascii_uppercase;
chars_low = string.ascii_lowercase;
chars_dgt = string.digits;
chars_sym = '~!@#$%^&*()_-+={}[]\|:;<>?/';
if self.PWDStrength == 'High':
chars = chars_up + chars_low + chars_dgt + chars_sym;
if self.PWDLength < 8:
self.PWDLength = random.randint(8, 20);
elif self.PWDStrength == 'Medium':
chars = chars_up + chars_low + chars_dgt;
if self.PWDLength < 8:
self.PWDLength = random.randint(8, 20);
else:
chars = chars_up + chars_low + chars_dgt;
self.pwd = ''.join(random.choice(chars) for _ in range(self.PWDLength));
try:
self.md5 = hashlib.md5(self.pwd).hexdigest();
except:
self.md5 = hashlib.md5(self.pwd.encode('utf-8')).hexdigest();
try:
self.sha1 = hashlib.sha1(self.pwd).hexdigest();
except:
self.sha1 = hashlib.sha1(self.pwd.encode('utf-8')).hexdigest();
try:
self.sha512 = hashlib.sha512(self.pwd).hexdigest();
except:
self.sha512 = hashlib.sha512(self.pwd.encode('utf-8')).hexdigest();
return;
def _get_email(self):
'''Generate username and e-mail address'''
email_domains = ['ymail.com','gmail.com','aol.com','icloud.com','outlook.com','hotmail.com'];
if self.EmailFormat == 'F.L':
self.email = self.FirstName.title() + "." + self.LastName.title() + "@" + random.choice(email_domains);
elif self.EmailFormat == 'f.l':
self.email = self.FirstName.lower() + "." + self.LastName.lower() + "@" + random.choice(email_domains);
elif self.EmailFormat == 'U':
self.email = self.uid.upper() + "@" + random.choice(email_domains);
elif self.EmailFormat == 'u':
self.email = self.uid.lower() + "@" + random.choice(email_domains);
else:
self.email = self.LastName + "." + self.FirstName + "@" + random.choice(email_domains);
return;
| gpl-3.0 |
olivierverdier/sfepy | tests/test_lcbc_3d.py | 1 | 2359 | # 05.10.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
# Whole domain $Y$.
region_1000 = {
'name' : 'Y',
'select' : 'all',
}
# Domain $Y_1$.
region_1 = {
'name' : 'Y1',
'select' : 'elements of group 1',
}
# Domain $Y_2$.
region_2 = {
'name' : 'Y2',
'select' : 'elements of group 2',
}
region_10 = {
'name' : 'Bottom',
'select' : 'nodes in (z < %f)' % -0.499,
}
region_11 = {
'name' : 'Top',
'select' : 'nodes in (z > %f)' % 0.499,
}
material_1 = {
'name' : 'solid',
'values' : {
'lam' : 1e1,
'mu' : 1e0,
'density' : 1e-1,
},
}
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : 'vector',
'region' : 'Y',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
ebc_1 = {
'name' : 'Fix',
'region' : 'Bottom',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.[0,1]' : 0.2, 'u.2' : 0.5},
}
lcbc_1 = {
'name' : 'rigid1',
'region' : 'Y2',
'dofs' : {'u.[0,1]' : 'rigid'},
}
## lcbc_2 = {
## 'name' : 'rigid1',
## 'region' : 'Y3',
## 'dofs' : {'u.all' : 'rigid'},
## }
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d3',
}
equations = {
'balance' : """dw_lin_elastic_iso.i1.Y( solid.lam, solid.mu, v, u ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
from testsBasic import TestLCBC
output_name = 'test_lcbc_3d.vtk'
##
# 03.10.2007, c
class Test( TestLCBC ):
pass
| bsd-3-clause |
jelly/calibre | src/calibre/utils/podofo/__init__.py | 2 | 8030 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, shutil
from calibre.constants import plugins, preferred_encoding
from calibre.ebooks.metadata import authors_to_string
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.ipc.simple_worker import fork_job, WorkerError
def get_podofo():
podofo, podofo_err = plugins['podofo']
if podofo is None:
raise RuntimeError('Failed to load podofo: %s'%podofo_err)
return podofo
def prep(val):
if not val:
return u''
if not isinstance(val, unicode):
val = val.decode(preferred_encoding, 'replace')
return val.strip()
def set_metadata(stream, mi):
with TemporaryDirectory(u'_podofo_set_metadata') as tdir:
with open(os.path.join(tdir, u'input.pdf'), 'wb') as f:
shutil.copyfileobj(stream, f)
from calibre.ebooks.metadata.xmp import metadata_to_xmp_packet
xmp_packet = metadata_to_xmp_packet(mi)
try:
result = fork_job('calibre.utils.podofo', 'set_metadata_', (tdir,
mi.title, mi.authors, mi.book_producer, mi.tags, xmp_packet))
touched = result['result']
except WorkerError as e:
raise Exception('Failed to set PDF metadata in (%s): %s'%(mi.title, e.orig_tb))
if touched:
with open(os.path.join(tdir, u'output.pdf'), 'rb') as f:
f.seek(0, 2)
if f.tell() > 100:
f.seek(0)
stream.seek(0)
stream.truncate()
shutil.copyfileobj(f, stream)
stream.flush()
stream.seek(0)
def set_metadata_(tdir, title, authors, bkp, tags, xmp_packet):
podofo = get_podofo()
os.chdir(tdir)
p = podofo.PDFDoc()
p.open(u'input.pdf')
title = prep(title)
touched = False
if title and title != p.title:
p.title = title
touched = True
author = prep(authors_to_string(authors))
if author and author != p.author:
p.author = author
touched = True
bkp = prep(bkp)
if bkp and bkp != p.creator:
p.creator = bkp
touched = True
try:
tags = prep(u', '.join([x.strip() for x in tags if x.strip()]))
if tags != p.keywords:
p.keywords = tags
touched = True
except:
pass
try:
current_xmp_packet = p.get_xmp_metadata()
if current_xmp_packet:
from calibre.ebooks.metadata.xmp import merge_xmp_packet
xmp_packet = merge_xmp_packet(current_xmp_packet, xmp_packet)
p.set_xmp_metadata(xmp_packet)
touched = True
except:
pass
if touched:
p.save(u'output.pdf')
return touched
def delete_all_but(path, pages):
''' Delete all the pages in the pdf except for the specified ones. Negative
numbers are counted from the end of the PDF. '''
podofo = get_podofo()
p = podofo.PDFDoc()
with open(path, 'rb') as f:
raw = f.read()
p.load(raw)
total = p.page_count()
pages = {total + x if x < 0 else x for x in pages}
for page in xrange(total-1, -1, -1):
if page not in pages:
p.delete_page(page)
with open(path, 'wb') as f:
f.save_to_fileobj(path)
def get_xmp_metadata(path):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(path, 'rb') as f:
raw = f.read()
p.load(raw)
return p.get_xmp_metadata()
def get_image_count(path):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(path, 'rb') as f:
raw = f.read()
p.load(raw)
return p.image_count()
def test_outline(src):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(src, 'rb') as f:
raw = f.read()
p.load(raw)
total = p.page_count()
root = p.create_outline(u'Table of Contents')
for i in xrange(0, total):
root.create(u'Page %d'%i, i, True)
raw = p.write()
out = '/tmp/outlined.pdf'
with open(out, 'wb') as f:
f.write(raw)
print 'Outlined PDF:', out
def test_save_to(src, dest):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(src, 'rb') as f:
raw = f.read()
p.load(raw)
with open(dest, 'wb') as out:
p.save_to_fileobj(out)
print ('Wrote PDF of size:', out.tell())
def test_podofo():
from io import BytesIO
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.xmp import metadata_to_xmp_packet
raw = b"%PDF-1.1\n%\xe2\xe3\xcf\xd3\n1 0 obj<</Type/Catalog/Metadata 6 0 R/Pages 2 0 R>>\nendobj\n2 0 obj<</Type/Pages/Count 1/Kids[ 3 0 R]/MediaBox[ 0 0 300 144]>>\nendobj\n3 0 obj<</Type/Page/Contents 4 0 R/Parent 2 0 R/Resources<</Font<</F1<</Type/Font/BaseFont/Times-Roman/Subtype/Type1>>>>>>>>\nendobj\n4 0 obj<</Length 55>>\nstream\n BT\n /F1 18 Tf\n 0 0 Td\n (Hello World) Tj\n ET\nendstream\nendobj\n5 0 obj<</Author(\xfe\xff\x00U\x00n\x00k\x00n\x00o\x00w\x00n)/CreationDate(D:20140919134038+05'00')/Producer(PoDoFo - http://podofo.sf.net)/Title(\xfe\xff\x00n\x00e\x00w\x00t)>>\nendobj\n6 0 obj<</Type/Metadata/Filter/FlateDecode/Length 584/Subtype/XML>>\nstream\nx\x9c\xed\x98\xcd\xb2\x930\x14\xc7\xf7}\n&.\x1d\x1ahoGa\x80\x8e\xb6\xe3x\x17ua\xaf\xe3\xd2\t\xc9i\x1b\x0b\x81&a\xc0\xfbj.|$_\xc1\xd0r\xe9\xb7V\x9d\xbb\x83\x15\x9c\x9c\xff\xff\x97\x8fs\xb2 \x18W9\xa1k\xd0V\x0cK.B\xf4\xf3\xfb\x0fdq\x16\xa2\xcf\xa3\x993\xcb'\xb0\xe2\xef\x1f%\xcc\x1f?<\xd0\xc75\xf5\x18\x1aG\xbd\xa0\xf2\xab4OA\x13\xabJ\x13\xa1\xfc*D\x84e1\xf8\xe6\xbd\x0ec\x14\xf5,+\x90l\xe1\x7f\x9c\xbek\x92\xccW\x88VZ\xe7>\xc6eY\xf6\xcba?\x93K\xecz\x9e\x87\x9d\x01\x1e\x0cl\x93a\xaboB\x93\xca\x16\xea\xc5\xd6\xa3q\x99\x82\xa2\x92\xe7\x9ag\xa2qc\xb45\xcb\x0b\x99l\xad\x18\xc5\x90@\nB+\xec\xf6]\x8c\xacZK\xe2\xac\xd0!j\xec\x8c!\xa3>\xdb\xfb=\x85\x1b\xd2\x9bD\xef#M,\xe15\xd4O\x88X\x86\xa8\xb2\x19,H\x91h\x14\x05x7z`\x81O<\x02|\x99VOBs\x9d\xc0\x7f\xe0\x05\x94\xfa\xd6)\x1c\xb1jx^\xc4\tW+\x90'\x13xK\x96\xf8Hy\x96X\xabU\x11\x7f\x05\xaa\xff\xa4=I\xab\x95T\x02\xd1\xd9)u\x0e\x9b\x0b\xcb\x8e>\x89\xb5\xc8Jqm\x91\x07\xaa-\xee\xc8{\x972=\xdd\xfa+\xe5d\xea\xb9\xad'\xa1\xfa\xdbj\xee\xd3,\xc5\x15\xc9M-9\xa6\x96\xdaD\xce6Wr\xd3\x1c\xdf3S~|\xc1A\xe2MA\x92F{\xb1\x0eM\xba?3\xdd\xc2\x88&S\xa2!\x1a8\xee\x9d\xedx\xb6\xeb=\xb8C\xff\xce\xf1\x87\xaf\xfb\xde\xe0\xd5\xc8\xf3^:#\x7f\xe8\x04\xf8L\xf2\x0fK\xcd%W\xe9\xbey\xea/\xa5\x89`D\xb2m\x17\t\x92\x822\xb7\x02(\x1c\x13\xc5)\x1e\x9c-\x01\xff\x1e\xc0\x16\xd5\xe5\r\xaaG\xcc\x8e\x0c\xff\xca\x8e\x92\x84\xc7\x12&\x93\xd6\xb3\x89\xd8\x10g\xd9\xfai\xe7\xedv\xde6-\x94\xceR\x9bfI\x91\n\x85\x8e}nu9\x91\xcd\xefo\xc6+\x90\x1c\x94\xcd\x05\x83\xea\xca\xd17\x16\xbb\xb6\xfc\xa22\xa9\x9bn\xbe0p\xfd\x88wAs\xc3\x9a+\x19\xb7w\xf2a#=\xdf\xd3A:H\x07\xe9 \x1d\xa4\x83t\x90\x0e\xd2A:H\x07yNH/h\x7f\xd6\x80`!*\xd18\xfa\x05\x94\x80P\xb0\nendstream\nendobj\nxref\n0 7\n0000000000 65535 f \n0000000015 00000 n \n0000000074 00000 n \n0000000148 00000 n \n0000000280 00000 n \n0000000382 00000 n \n0000000522 00000 n \ntrailer\n<</ID[<4D028D512DEBEFD964756764AD8FF726><4D028D512DEBEFD964756764AD8FF726>]/Info 5 0 R/Root 1 0 R/Size 7>>\nstartxref\n1199\n%%EOF\n" # noqa
mi = Metadata(u'title1', [u'author1'])
xmp_packet = metadata_to_xmp_packet(mi)
podofo = get_podofo()
p = podofo.PDFDoc()
p.load(raw)
p.title = mi.title
p.author = mi.authors[0]
p.set_xmp_metadata(xmp_packet)
buf = BytesIO()
p.save_to_fileobj(buf)
raw = buf.getvalue()
p = podofo.PDFDoc()
p.load(raw)
if (p.title, p.author) != (mi.title, mi.authors[0]):
raise ValueError('podofo failed to set title and author in Info dict')
if not p.get_xmp_metadata():
raise ValueError('podofo failed to write XMP packet')
if __name__ == '__main__':
import sys
get_xmp_metadata(sys.argv[-1])
| gpl-3.0 |
jhsenjaliya/incubator-airflow | airflow/migrations/versions/4446e08588_dagrun_start_end.py | 60 | 1151 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dagrun start end
Revision ID: 4446e08588
Revises: 561833c1c74b
Create Date: 2015-12-10 11:26:18.439223
"""
# revision identifiers, used by Alembic.
revision = '4446e08588'
down_revision = '561833c1c74b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('dag_run', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('dag_run', sa.Column('start_date', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('dag_run', 'start_date')
op.drop_column('dag_run', 'end_date')
| apache-2.0 |
PorthTechnolegauIaith/moses-smt | scripts/python-server.py | 1 | 11166 | #!/usr/bin/env python
import os
import sys
import threading
import subprocess
import cherrypy
import json
import itertools
import logging
import time
import re
import xmlrpclib
import math
from threading import Timer
def popen(cmd):
cmd = cmd.split()
logger = logging.getLogger('translation_log.popen')
logger.info("executing: %s" %(" ".join(cmd)))
return subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def pclose(pipe):
def kill_pipe():
pipe.kill()
t = Timer(5., kill_pipe)
t.start()
pipe.terminate()
t.cancel()
def init_log(filename):
logger = logging.getLogger('translation_log')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
logformat = '%(asctime)s %(thread)d - %(filename)s:%(lineno)s: %(message)s'
formatter = logging.Formatter(logformat)
fh.setFormatter(formatter)
logger.addHandler(fh)
class Filter(object):
def __init__(self, remove_newlines=True, collapse_spaces=True):
self.filters = []
if remove_newlines:
self.filters.append(self.__remove_newlines)
if collapse_spaces:
self.filters.append(self.__collapse_spaces)
def filter(self, s):
for f in self.filters:
s = f(s)
return s
def __remove_newlines(self, s):
s = s.replace('\r\n',' ')
s = s.replace('\n',' ')
return s
def __collapse_spaces(self, s):
s=re.sub('\s\s+', ' ', s)
s=re.sub('\s([\',.])',r'\1',s)
return s
def json_error(status, message, traceback, version):
err = {"status":status, "message":message, "traceback":traceback, "version":version}
return json.dumps(err, sort_keys=True, indent=4)
class ExternalProcessor(object):
""" wraps an external script and does utf-8 conversions, is thread-safe """
def __init__(self, cmd):
self.cmd = cmd
if self.cmd != None:
self.proc = popen(cmd)
self._lock = threading.Lock()
def process(self, line):
if self.cmd == None: return line
u_string = u"%s\n" %line
u_string = u_string.encode("utf-8")
result = u_string #fallback: return input
with self._lock:
self.proc.stdin.write(u_string)
self.proc.stdin.flush()
result = self.proc.stdout.readline()
return result.decode("utf-8").strip()
# should be rstrip but normalize_punctiation.perl inserts space
# for lines starting with '('
class Root(object):
def __init__(self, moses_home, moses_url, recaser_url, slang, tlang, pretty=False, verbose=0, timeout=-1):
self.filter = Filter(remove_newlines=True, collapse_spaces=True)
self.moses_url = moses_url
self.recaser_url = recaser_url
self.pretty = bool(pretty)
self.timeout = timeout
self.verbose = verbose
tokenizer = ['perl',os.path.join(moses_home,"mosesdecoder","scripts","tokenizer","tokenizer.perl"),"-b","-X","-l",slang,'-a']
detokenizer = ['perl',os.path.join(moses_home,"mosesdecoder","scripts","tokenizer","detokenizer.perl"),"-b","-l",tlang]
detruecaser = ['perl',os.path.join(moses_home,"mosesdecoder","scripts","recaser","detruecase.perl"),"-b"]
self._tokenizer = map(ExternalProcessor, [u' '.join(tokenizer)])
self._detokenizer = map(ExternalProcessor,[u' '.join(detokenizer)])
self._detruecaser = map(ExternalProcessor,[u' '.join(detruecaser)])
self.tokenize = self._exec(self._tokenizer)
self.detokenize = self._exec(self._detokenizer)
self.detruecase = self._exec(self._detruecaser)
def _exec(self, procs):
def f(line):
for proc in procs:
line = proc.process(line)
return line
return f
def _timeout_error(self, q, location):
errors = [{"originalquery":q, "location" : location}]
message = "Timeout after %ss" %self.timeout
return {"error": {"errors":errors, "code":400, "message":message}}
def _dump_json(self, data):
if self.pretty:
return json.dumps(data, indent=2) + "\n"
return json.dumps(data) + "\n"
def _load_json(self, string):
return json.loads(string)
def tokenize(self, sentence):
sentence_tokenized = self.tokenize(sentence)
return sentence_tokenized
def detokenize(self, sentence):
sentence_detokenized = self.detokenize(sentence)
return sentence_detokenized
def _translate(self, source):
""" wraps the actual translate call to mosesserver via XMLPRC """
proxy = xmlrpclib.ServerProxy(self.moses_url)
params = {"text":source}
return proxy.translate(params)
def _recaser(self, sentence):
proxy=xmlrpclib.ServerProxy(self.recaser_url)
params = {"text":sentence}
return proxy.translate(params)
@cherrypy.expose
def translate(self, **kwargs):
response = cherrypy.response
response.headers['Content-Type'] = 'application/json'
q = self.filter.filter(kwargs["q"])
callback = kwargs["callback"]
raw_src = q
self.log("The server is working on: %s" %repr(raw_src))
self.log_info("Request before preprocessing: %s" %repr(raw_src))
translationDict = {"sourceText":raw_src.strip()}
lower_src = raw_src.lower()
tokenized_src = self.tokenize(lower_src)
translation = ''
# query MT engine
self.log_info("Requesting translation for %s" % repr(tokenized_src))
result = self._translate(tokenized_src)
if 'text' in result:
translation = result['text']
else:
return self._timeout_error(tokenized_src, 'translation')
self.log_info("Received translation: %s" % repr(translation))
#
recased_result = self._recaser(translation)
if 'text' in recased_result:
recased_trans=recased_result['text']
else:
recased_trans=translation
detokenized_trans = self.detokenize(recased_trans)
detruecased_trans = self.detruecase(detokenized_trans)
translatedText = self.filter.filter(detruecased_trans)
translationDict = {"translatedText":translatedText}
data = {"data" : {"translations" : [translationDict]}}
self.log("The server is returning: %s" %self._dump_json(data))
if callback:
return callback + "(" + self._dump_json(data) + ");"
else:
return self._dump_json(data)
def log_info(self, message):
if self.verbose > 0:
self.log(message, level=logging.INFO)
def log(self, message, level=logging.INFO):
logger = logging.getLogger('translation_log.info')
logger.info(message)
@cherrypy.expose
def index(self):
return """
<html>
<head>
<script src="http://ajax.googleapis.com/ajax/libs/angularjs/1.4.2/angular.js"></script>
<script src="http://ajax.googleapis.com/ajax/libs/angularjs/1.4.2/angular-resource.js"></script>
<script>
var app = angular.module("MosesApp", ["ngResource"]);
app.controller('MosesCtrl', function($scope, $http) {
$scope.loading = false;
$scope.translate = function(engine, sourcelanguage, targetlanguage) {
$scope.loading=true;
$translatedText="";
$http.jsonp('http://localhost:' + location.port + '/translate?callback=JSON_CALLBACK', {
params: {
q:$scope.sourceText
}
})
.success(function(response){
$scope.translatedText=response.data.translations[0].translatedText;
$scope.loading=false;
})
.error(function(response){
$scope.loading=false;
console.log("Error");
});
};
});
</script>
<style>
.logos {
background-color: #333333;
height: 90px;
}
.uti {
float: left;
padding-left: 32px;
padding-top: 12px;
}
.pb {
float: right;
padding-right: 24px;
padding-top: 12px;
}
h1, p, textarea {
font-family: "Vectora W02_55 Roman","Voces";
}
</style>
</head>
<body>
<div class="logos">
<div class="uti"><a href="http://www.bangor.ac.uk"><img src="http://geiriadur.bangor.ac.uk/skins/GeiriadurBangor/images/pb.jpg"></a></div>
<div class="pb"><a href="http://techiaith.bangor.ac.uk"><img src="http://geiriadur.bangor.ac.uk/skins/GeiriadurBangor/images/uti.jpg"></a></div>
</div>
<h1>DEMO CYFIEITHU PEIRIANYDDOL ~ MACHINE TRANSLATION DEMO</h1>
<div ng-app="MosesApp">
<div ng-controller="MosesCtrl">
<table width="100%" style="margin:auto"><tr>
<td width="45%">
<textarea ng-model="sourceText" style="width:100%" rows=5 placeholder="Testun i'w gyfieithu"></textarea>
</td>
<td width="10%" style="vertical-align:top">
<button ng-click="translate()">Cyfieithu ~ Translate >> </button>
<img ng-show="loading" src="http://techiaith.cymru/wp-content/uploads/2015/11/ripple.gif"/>
</td>
<td width="45%" style="vertical-align:top">
<p ng-bind="translatedText" style="font-size:2em;"></p>
</td></tr></table>
</div>
</div>
"""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-ip', help='server ip to bind to, default: localhost', default="127.0.0.1")
parser.add_argument('-port', action='store', help='server port to bind to, default: 8080', type=int, default=8080)
parser.add_argument('-nthreads', help='number of server threads, default: 8', type=int, default=8)
parser.add_argument('-mosesurl', dest="moses_url", action='store', help='url of mosesserver', required=True)
parser.add_argument('-recaserurl', dest="recaser_url", action='store', help='url of moses recaser', required=True)
parser.add_argument('-moseshome', dest="moses_home", action='store', help='path to mosesdecoder installation', required=True)
parser.add_argument('-timeout', help='timeout for call to translation engine, default: unlimited', type=int)
parser.add_argument('-pretty', action='store_true', help='pretty print json')
parser.add_argument('-slang', help='source language code')
parser.add_argument('-tlang', help='target language code')
parser.add_argument('-logprefix', help='logfile prefix, default: write to stderr')
parser.add_argument('-verbose', help='verbosity level, default: 0', type=int, default=0)
args = parser.parse_args(sys.argv[1:])
if args.logprefix:
init_log("%s.trans.log" %args.logprefix)
cherrypy.config.update({'server.request_queue_size' : 1000,
'server.socket_port': args.port,
'server.thread_pool': args.nthreads,
'server.socket_host': args.ip})
cherrypy.config.update({'error_page.default': json_error})
cherrypy.config.update({'log.screen': True})
if args.logprefix:
cherrypy.config.update({'log.access_file': "%s.access.log" %args.logprefix,
'log.error_file': "%s.error.log" %args.logprefix})
cherrypy.quickstart(Root(args.moses_home,
args.moses_url, args.recaser_url,
slang = args.slang, tlang = args.tlang,
pretty = args.pretty,
verbose = args.verbose))
| mit |
heidsoft/VirtualBox | src/libs/libxml2-2.6.31/python/tests/pushSAX.py | 87 | 1490 | #!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
log = ""
class callback:
def startDocument(self):
global log
log = log + "startDocument:"
def endDocument(self):
global log
log = log + "endDocument:"
def startElement(self, tag, attrs):
global log
log = log + "startElement %s %s:" % (tag, attrs)
def endElement(self, tag):
global log
log = log + "endElement %s:" % (tag)
def characters(self, data):
global log
log = log + "characters: %s:" % (data)
def warning(self, msg):
global log
log = log + "warning: %s:" % (msg)
def error(self, msg):
global log
log = log + "error: %s:" % (msg)
def fatalError(self, msg):
global log
log = log + "fatalError: %s:" % (msg)
handler = callback()
ctxt = libxml2.createPushParser(handler, "<foo", 4, "test.xml")
chunk = " url='tst'>b"
ctxt.parseChunk(chunk, len(chunk), 0)
chunk = "ar</foo>"
ctxt.parseChunk(chunk, len(chunk), 1)
ctxt=None
reference = "startDocument:startElement foo {'url': 'tst'}:characters: bar:endElement foo:endDocument:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-2.0 |
ds-hwang/chromium-crosswalk | tools/cr/cr/base/host.py | 89 | 5957 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module for build host support."""
import os
import pipes
import signal
import subprocess
import cr
# Controls what verbosity level turns on command trail logging
_TRAIL_VERBOSITY = 2
def PrintTrail(trail):
print 'Command expanded the following variables:'
for key, value in trail:
if value == None:
value = ''
print ' ', key, '=', value
class Host(cr.Plugin, cr.Plugin.Type):
"""Base class for implementing cr hosts.
The host is the main access point to services provided by the machine cr
is running on. It exposes information about the machine, and runs external
commands on behalf of the actions.
"""
def __init__(self):
super(Host, self).__init__()
def Matches(self):
"""Detects whether this is the correct host implementation.
This method is overridden by the concrete implementations.
Returns:
true if the plugin matches the machine it is running on.
"""
return False
@classmethod
def Select(cls):
for host in cls.Plugins():
if host.Matches():
return host
def _Execute(self, command,
shell=False, capture=False, silent=False,
ignore_dry_run=False, return_status=False,
ignore_interrupt_signal=False):
"""This is the only method that launches external programs.
It is a thin wrapper around subprocess.Popen that handles cr specific
issues. The command is expanded in the active context so that variables
are substituted.
Args:
command: the command to run.
shell: whether to run the command using the shell.
capture: controls wether the output of the command is captured.
ignore_dry_run: Normally, if the context is in dry run mode the command is
printed but not executed. This flag overrides that behaviour, causing
the command to be run anyway.
return_status: switches the function to returning the status code rather
the output.
ignore_interrupt_signal: Ignore the interrupt signal (i.e., Ctrl-C) while
the command is running. Useful for letting interactive programs manage
Ctrl-C by themselves.
Returns:
the status if return_status is true, or the output if capture is true,
otherwise nothing.
"""
with cr.context.Trace():
command = [cr.context.Substitute(arg) for arg in command if arg]
trail = cr.context.trail
if not command:
print 'Empty command passed to execute'
exit(1)
if cr.context.verbose:
print ' '.join(command)
if cr.context.verbose >= _TRAIL_VERBOSITY:
PrintTrail(trail)
if ignore_dry_run or not cr.context.dry_run:
out = None
if capture:
out = subprocess.PIPE
elif silent:
out = open(os.devnull, "w")
try:
p = subprocess.Popen(
command, shell=shell,
env={k: str(v) for k, v in cr.context.exported.items()},
stdout=out)
except OSError:
print 'Failed to exec', command
# Don't log the trail if we already have
if cr.context.verbose < _TRAIL_VERBOSITY:
PrintTrail(trail)
exit(1)
try:
if ignore_interrupt_signal:
signal.signal(signal.SIGINT, signal.SIG_IGN)
output, _ = p.communicate()
finally:
if ignore_interrupt_signal:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if silent:
out.close()
if return_status:
return p.returncode
if p.returncode != 0:
print 'Error {0} executing command {1}'.format(p.returncode, command)
exit(p.returncode)
return output or ''
return ''
@cr.Plugin.activemethod
def Shell(self, *command):
command = ' '.join([pipes.quote(arg) for arg in command])
return self._Execute([command], shell=True, ignore_interrupt_signal=True)
@cr.Plugin.activemethod
def Execute(self, *command):
return self._Execute(command, shell=False)
@cr.Plugin.activemethod
def ExecuteSilently(self, *command):
return self._Execute(command, shell=False, silent=True)
@cr.Plugin.activemethod
def CaptureShell(self, *command):
return self._Execute(command,
shell=True, capture=True, ignore_dry_run=True)
@cr.Plugin.activemethod
def Capture(self, *command):
return self._Execute(command, capture=True, ignore_dry_run=True)
@cr.Plugin.activemethod
def ExecuteStatus(self, *command):
return self._Execute(command,
ignore_dry_run=True, return_status=True)
@cr.Plugin.activemethod
def YesNo(self, question, default=True):
"""Ask the user a yes no question
This blocks until the user responds.
Args:
question: The question string to show the user
default: True if the default response is Yes
Returns:
True if the response was yes.
"""
options = 'Y/n' if default else 'y/N'
result = raw_input(question + ' [' + options + '] ').lower()
if result == '':
return default
return result in ['y', 'yes']
@classmethod
def SearchPath(cls, name, paths=[]):
"""Searches the PATH for an executable.
Args:
name: the name of the binary to search for.
Returns:
the set of executables found, or an empty list if none.
"""
result = []
extensions = ['']
extensions.extend(os.environ.get('PATHEXT', '').split(os.pathsep))
paths = [cr.context.Substitute(path) for path in paths if path]
paths = paths + os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
partial = os.path.join(path, name)
for extension in extensions:
filename = partial + extension
if os.path.exists(filename) and filename not in result:
result.append(filename)
return result
| bsd-3-clause |
Juniper/nova | nova/tests/functional/regressions/test_bug_1541691.py | 11 | 2126 | # Copyright 2016 HPE, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.scheduler.utils
import nova.servicegroup
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
class TestServerValidation(test.TestCase):
REQUIRES_LOCKING = True
microversion = None
def setUp(self):
super(TestServerValidation, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.api = api_fixture.api
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_name_validation(self):
"""Regression test for bug #1541691.
The current jsonschema validation spits a giant wall of regex
at you (about 500k characters). This is not useful to
determine why your request actually failed.
Ensure that once we fix this it doesn't regress.
"""
server = dict(name='server1 ',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server_args = {'server': server}
self.assertRaises(client.OpenStackApiException, self.api.post_server,
server_args)
| apache-2.0 |
xpansa/hr | hr_contract_default_trial_length/models/hr_contract.py | 13 | 1530 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2015 Salton Massally (<smassally@idtlabs.sl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from dateutil.relativedelta import relativedelta
from openerp import fields, models, api
class HrContract(models.Model):
_inherit = 'hr.contract'
@api.onchange('trial_date_start', 'type_id')
@api.multi
def onchange_trial_date_start(self):
self.ensure_one()
if self.trial_date_start and len(self.type_id):
res = self.type_id.trial_length
if res:
end_dt = fields.Date.from_string(
self.trial_date_start) + relativedelta(days=res)
self.trial_date_end = fields.Date.to_string(end_dt)
| agpl-3.0 |
jherico/qtwebkit | Tools/Scripts/webkitpy/port/builders_unittest.py | 124 | 1909 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import builders
import unittest2 as unittest
class BuildersTest(unittest.TestCase):
def test_path_from_name(self):
tests = {
'test': 'test',
'Mac 10.6 (dbg)(1)': 'Mac_10_6__dbg__1_',
'(.) ': '____',
}
for name, expected in tests.items():
self.assertEqual(expected, builders.builder_path_from_name(name))
| lgpl-3.0 |
charlesccychen/beam | sdks/python/apache_beam/io/gcp/internal/clients/bigquery/bigquery_v2_client.py | 5 | 28371 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated client library for bigquery version v2."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from apache_beam.io.gcp.internal.clients.bigquery import \
bigquery_v2_messages as messages
class BigqueryV2(base_api.BaseApiClient):
"""Generated client library for service bigquery version v2."""
MESSAGES_MODULE = messages
_PACKAGE = u'bigquery'
_SCOPES = [u'https://www.googleapis.com/auth/bigquery', u'https://www.googleapis.com/auth/bigquery.insertdata', u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/devstorage.full_control', u'https://www.googleapis.com/auth/devstorage.read_only', u'https://www.googleapis.com/auth/devstorage.read_write']
_VERSION = u'v2'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'BigqueryV2'
_URL_VERSION = u'v2'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new bigquery handle."""
url = url or u'https://www.googleapis.com/bigquery/v2/'
super(BigqueryV2, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.datasets = self.DatasetsService(self)
self.jobs = self.JobsService(self)
self.projects = self.ProjectsService(self)
self.tabledata = self.TabledataService(self)
self.tables = self.TablesService(self)
class DatasetsService(base_api.BaseApiService):
"""Service class for the datasets resource."""
_NAME = u'datasets'
def __init__(self, client):
super(BigqueryV2.DatasetsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'bigquery.datasets.delete',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[u'deleteContents'],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field='',
request_type_name=u'BigqueryDatasetsDeleteRequest',
response_type_name=u'BigqueryDatasetsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.datasets.get',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field='',
request_type_name=u'BigqueryDatasetsGetRequest',
response_type_name=u'Dataset',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.datasets.insert',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets',
request_field=u'dataset',
request_type_name=u'BigqueryDatasetsInsertRequest',
response_type_name=u'Dataset',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.datasets.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'all', u'maxResults', u'pageToken'],
relative_path=u'projects/{projectId}/datasets',
request_field='',
request_type_name=u'BigqueryDatasetsListRequest',
response_type_name=u'DatasetList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'bigquery.datasets.patch',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field=u'dataset',
request_type_name=u'BigqueryDatasetsPatchRequest',
response_type_name=u'Dataset',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'bigquery.datasets.update',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field=u'dataset',
request_type_name=u'BigqueryDatasetsUpdateRequest',
response_type_name=u'Dataset',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.
Args:
request: (BigqueryDatasetsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BigqueryDatasetsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the dataset specified by datasetID.
Args:
request: (BigqueryDatasetsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new empty dataset.
Args:
request: (BigqueryDatasetsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all datasets in the specified project to which you have been granted the READER dataset role.
Args:
request: (BigqueryDatasetsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DatasetList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.
Args:
request: (BigqueryDatasetsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.
Args:
request: (BigqueryDatasetsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class JobsService(base_api.BaseApiService):
"""Service class for the jobs resource."""
_NAME = u'jobs'
def __init__(self, client):
super(BigqueryV2.JobsService, self).__init__(client)
self._method_configs = {
'Cancel': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.jobs.cancel',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'project/{projectId}/jobs/{jobId}/cancel',
request_field='',
request_type_name=u'BigqueryJobsCancelRequest',
response_type_name=u'JobCancelResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.jobs.get',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/jobs/{jobId}',
request_field='',
request_type_name=u'BigqueryJobsGetRequest',
response_type_name=u'Job',
supports_download=False,
),
'GetQueryResults': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.jobs.getQueryResults',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[u'maxResults', u'pageToken', u'startIndex', u'timeoutMs'],
relative_path=u'projects/{projectId}/queries/{jobId}',
request_field='',
request_type_name=u'BigqueryJobsGetQueryResultsRequest',
response_type_name=u'GetQueryResultsResponse',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.jobs.insert',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/jobs',
request_field=u'job',
request_type_name=u'BigqueryJobsInsertRequest',
response_type_name=u'Job',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.jobs.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'allUsers', u'maxResults', u'pageToken', u'projection', u'stateFilter'],
relative_path=u'projects/{projectId}/jobs',
request_field='',
request_type_name=u'BigqueryJobsListRequest',
response_type_name=u'JobList',
supports_download=False,
),
'Query': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.jobs.query',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/queries',
request_field=u'queryRequest',
request_type_name=u'BigqueryJobsQueryRequest',
response_type_name=u'QueryResponse',
supports_download=False,
),
}
self._upload_configs = {
'Insert': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=True,
resumable_path=u'/resumable/upload/bigquery/v2/projects/{projectId}/jobs',
simple_multipart=True,
simple_path=u'/upload/bigquery/v2/projects/{projectId}/jobs',
),
}
def Cancel(self, request, global_params=None):
"""Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.
Args:
request: (BigqueryJobsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobCancelResponse) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.
Args:
request: (BigqueryJobsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def GetQueryResults(self, request, global_params=None):
"""Retrieves the results of a query job.
Args:
request: (BigqueryJobsGetQueryResultsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetQueryResultsResponse) The response message.
"""
config = self.GetMethodConfig('GetQueryResults')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None, upload=None):
"""Starts a new asynchronous job. Requires the Can View project role.
Args:
request: (BigqueryJobsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Insert')
upload_config = self.GetUploadConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config)
def List(self, request, global_params=None):
"""Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.
Args:
request: (BigqueryJobsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Query(self, request, global_params=None):
"""Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.
Args:
request: (BigqueryJobsQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(QueryResponse) The response message.
"""
config = self.GetMethodConfig('Query')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(BigqueryV2.ProjectsService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.projects.list',
ordered_params=[],
path_params=[],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects',
request_field='',
request_type_name=u'BigqueryProjectsListRequest',
response_type_name=u'ProjectList',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists all projects to which you have been granted any project role.
Args:
request: (BigqueryProjectsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ProjectList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class TabledataService(base_api.BaseApiService):
"""Service class for the tabledata resource."""
_NAME = u'tabledata'
def __init__(self, client):
super(BigqueryV2.TabledataService, self).__init__(client)
self._method_configs = {
'InsertAll': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.tabledata.insertAll',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll',
request_field=u'tableDataInsertAllRequest',
request_type_name=u'BigqueryTabledataInsertAllRequest',
response_type_name=u'TableDataInsertAllResponse',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.tabledata.list',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[u'maxResults', u'pageToken', u'startIndex'],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data',
request_field='',
request_type_name=u'BigqueryTabledataListRequest',
response_type_name=u'TableDataList',
supports_download=False,
),
}
self._upload_configs = {
}
def InsertAll(self, request, global_params=None):
"""Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.
Args:
request: (BigqueryTabledataInsertAllRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableDataInsertAllResponse) The response message.
"""
config = self.GetMethodConfig('InsertAll')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves table data from a specified set of rows. Requires the READER dataset role.
Args:
request: (BigqueryTabledataListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableDataList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class TablesService(base_api.BaseApiService):
"""Service class for the tables resource."""
_NAME = u'tables'
def __init__(self, client):
super(BigqueryV2.TablesService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'bigquery.tables.delete',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field='',
request_type_name=u'BigqueryTablesDeleteRequest',
response_type_name=u'BigqueryTablesDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.tables.get',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field='',
request_type_name=u'BigqueryTablesGetRequest',
response_type_name=u'Table',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.tables.insert',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables',
request_field=u'table',
request_type_name=u'BigqueryTablesInsertRequest',
response_type_name=u'Table',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.tables.list',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables',
request_field='',
request_type_name=u'BigqueryTablesListRequest',
response_type_name=u'TableList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'bigquery.tables.patch',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field=u'table',
request_type_name=u'BigqueryTablesPatchRequest',
response_type_name=u'Table',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'bigquery.tables.update',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field=u'table',
request_type_name=u'BigqueryTablesUpdateRequest',
response_type_name=u'Table',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.
Args:
request: (BigqueryTablesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BigqueryTablesDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.
Args:
request: (BigqueryTablesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new, empty table in the dataset.
Args:
request: (BigqueryTablesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all tables in the specified dataset. Requires the READER dataset role.
Args:
request: (BigqueryTablesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.
Args:
request: (BigqueryTablesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.
Args:
request: (BigqueryTablesUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
| apache-2.0 |
alda519/GreenTea | apps/core/migrations/0004_auto_20160105_1533.py | 5 | 1679 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20151215_1537'),
]
operations = [
migrations.AlterField(
model_name='author',
name='email',
field=models.EmailField(
default=b'unknow@redhat.com', max_length=254, db_index=True),
),
migrations.AlterField(
model_name='author',
name='name',
field=models.CharField(
default=b'Unknown', max_length=255, db_index=True),
),
migrations.AlterField(
model_name='job',
name='date',
field=models.DateTimeField(
default=django.utils.timezone.now, db_index=True),
),
migrations.AlterField(
model_name='system',
name='hostname',
field=models.CharField(db_index=True, max_length=255, blank=True),
),
migrations.AlterField(
model_name='test',
name='folder',
field=models.CharField(
db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='test',
name='is_enable',
field=models.BooleanField(
default=True, db_index=True, verbose_name=b'enable'),
),
migrations.AlterField(
model_name='test',
name='name',
field=models.CharField(unique=True, max_length=255, db_index=True),
),
]
| gpl-2.0 |
eflowbeach/draw-your-taf | Pmw/Pmw_1_3/lib/PmwCounter.py | 6 | 11809 | import string
import sys
import types
import Tkinter
import Pmw
class Counter(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('autorepeat', 1, None),
('buttonaspect', 1.0, INITOPT),
('datatype', 'numeric', self._datatype),
('increment', 1, None),
('initwait', 300, None),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('orient', 'horizontal', INITOPT),
('padx', 0, INITOPT),
('pady', 0, INITOPT),
('repeatrate', 50, None),
('sticky', 'ew', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.MegaWidget.__init__(self, parent)
# Initialise instance variables.
self._timerId = None
self._normalRelief = None
# Create the components.
interior = self.interior()
# If there is no label, put the arrows and the entry directly
# into the interior, otherwise create a frame for them. In
# either case the border around the arrows and the entry will
# be raised (but not around the label).
if self['labelpos'] is None:
frame = interior
if not kw.has_key('hull_relief'):
frame.configure(relief = 'raised')
if not kw.has_key('hull_borderwidth'):
frame.configure(borderwidth = 1)
else:
frame = self.createcomponent('frame',
(), None,
Tkinter.Frame, (interior,),
relief = 'raised', borderwidth = 1)
frame.grid(column=2, row=2, sticky=self['sticky'])
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
# Create the down arrow.
self._downArrowBtn = self.createcomponent('downarrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
# Create the entry field.
self._counterEntry = self.createcomponent('entryfield',
(('entry', 'entryfield_entry'),), None,
Pmw.EntryField, (frame,))
# Create the up arrow.
self._upArrowBtn = self.createcomponent('uparrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
padx = self['padx']
pady = self['pady']
orient = self['orient']
if orient == 'horizontal':
self._downArrowBtn.grid(column = 0, row = 0)
self._counterEntry.grid(column = 1, row = 0,
sticky = self['sticky'])
self._upArrowBtn.grid(column = 2, row = 0)
frame.grid_columnconfigure(1, weight = 1)
frame.grid_rowconfigure(0, weight = 1)
if Tkinter.TkVersion >= 4.2:
frame.grid_columnconfigure(0, pad = padx)
frame.grid_columnconfigure(2, pad = padx)
frame.grid_rowconfigure(0, pad = pady)
elif orient == 'vertical':
self._upArrowBtn.grid(column = 0, row = 0, sticky = 's')
self._counterEntry.grid(column = 0, row = 1,
sticky = self['sticky'])
self._downArrowBtn.grid(column = 0, row = 2, sticky = 'n')
frame.grid_columnconfigure(0, weight = 1)
frame.grid_rowconfigure(0, weight = 1)
frame.grid_rowconfigure(2, weight = 1)
if Tkinter.TkVersion >= 4.2:
frame.grid_rowconfigure(0, pad = pady)
frame.grid_rowconfigure(2, pad = pady)
frame.grid_columnconfigure(0, pad = padx)
else:
raise ValueError, 'bad orient option ' + repr(orient) + \
': must be either \'horizontal\' or \'vertical\''
self.createlabel(interior)
self._upArrowBtn.bind('<Configure>', self._drawUpArrow)
self._upArrowBtn.bind('<1>', self._countUp)
self._upArrowBtn.bind('<Any-ButtonRelease-1>', self._stopCounting)
self._downArrowBtn.bind('<Configure>', self._drawDownArrow)
self._downArrowBtn.bind('<1>', self._countDown)
self._downArrowBtn.bind('<Any-ButtonRelease-1>', self._stopCounting)
self._counterEntry.bind('<Configure>', self._resizeArrow)
entry = self._counterEntry.component('entry')
entry.bind('<Down>', lambda event, s = self: s._key_decrement(event))
entry.bind('<Up>', lambda event, s = self: s._key_increment(event))
# Need to cancel the timer if an arrow button is unmapped (eg:
# its toplevel window is withdrawn) while the mouse button is
# held down. The canvas will not get the ButtonRelease event
# if it is not mapped, since the implicit grab is cancelled.
self._upArrowBtn.bind('<Unmap>', self._stopCounting)
self._downArrowBtn.bind('<Unmap>', self._stopCounting)
# Check keywords and initialise options.
self.initialiseoptions()
def _resizeArrow(self, event):
for btn in (self._upArrowBtn, self._downArrowBtn):
bw = (string.atoi(btn['borderwidth']) +
string.atoi(btn['highlightthickness']))
newHeight = self._counterEntry.winfo_reqheight() - 2 * bw
newWidth = int(newHeight * self['buttonaspect'])
btn.configure(width=newWidth, height=newHeight)
self._drawArrow(btn)
def _drawUpArrow(self, event):
self._drawArrow(self._upArrowBtn)
def _drawDownArrow(self, event):
self._drawArrow(self._downArrowBtn)
def _drawArrow(self, arrow):
if self['orient'] == 'vertical':
if arrow == self._upArrowBtn:
direction = 'up'
else:
direction = 'down'
else:
if arrow == self._upArrowBtn:
direction = 'right'
else:
direction = 'left'
Pmw.drawarrow(arrow, self['entry_foreground'], direction, 'arrow')
def _stopCounting(self, event = None):
if self._timerId is not None:
self.after_cancel(self._timerId)
self._timerId = None
if self._normalRelief is not None:
button, relief = self._normalRelief
button.configure(relief=relief)
self._normalRelief = None
def _countUp(self, event):
self._normalRelief = (self._upArrowBtn, self._upArrowBtn.cget('relief'))
self._upArrowBtn.configure(relief='sunken')
# Force arrow down (it may come up immediately, if increment fails).
self._upArrowBtn.update_idletasks()
self._count(1, 1)
def _countDown(self, event):
self._normalRelief = (self._downArrowBtn, self._downArrowBtn.cget('relief'))
self._downArrowBtn.configure(relief='sunken')
# Force arrow down (it may come up immediately, if increment fails).
self._downArrowBtn.update_idletasks()
self._count(-1, 1)
def increment(self):
self._forceCount(1)
def decrement(self):
self._forceCount(-1)
def _key_increment(self, event):
self._forceCount(1)
self.update_idletasks()
def _key_decrement(self, event):
self._forceCount(-1)
self.update_idletasks()
def _datatype(self):
datatype = self['datatype']
if type(datatype) is types.DictionaryType:
self._counterArgs = datatype.copy()
if self._counterArgs.has_key('counter'):
datatype = self._counterArgs['counter']
del self._counterArgs['counter']
else:
datatype = 'numeric'
else:
self._counterArgs = {}
if _counterCommands.has_key(datatype):
self._counterCommand = _counterCommands[datatype]
elif callable(datatype):
self._counterCommand = datatype
else:
validValues = _counterCommands.keys()
validValues.sort()
raise ValueError, ('bad datatype value "%s": must be a' +
' function or one of %s') % (datatype, validValues)
def _forceCount(self, factor):
if not self.valid():
self.bell()
return
text = self._counterEntry.get()
try:
value = apply(self._counterCommand,
(text, factor, self['increment']), self._counterArgs)
except ValueError:
self.bell()
return
previousICursor = self._counterEntry.index('insert')
if self._counterEntry.setentry(value) == Pmw.OK:
self._counterEntry.xview('end')
self._counterEntry.icursor(previousICursor)
def _count(self, factor, first):
if not self.valid():
self.bell()
return
self._timerId = None
origtext = self._counterEntry.get()
try:
value = apply(self._counterCommand,
(origtext, factor, self['increment']), self._counterArgs)
except ValueError:
# If text is invalid, stop counting.
self._stopCounting()
self.bell()
return
# If incrementing produces an invalid value, restore previous
# text and stop counting.
previousICursor = self._counterEntry.index('insert')
valid = self._counterEntry.setentry(value)
if valid != Pmw.OK:
self._stopCounting()
self._counterEntry.setentry(origtext)
if valid == Pmw.PARTIAL:
self.bell()
return
self._counterEntry.xview('end')
self._counterEntry.icursor(previousICursor)
if self['autorepeat']:
if first:
delay = self['initwait']
else:
delay = self['repeatrate']
self._timerId = self.after(delay,
lambda self=self, factor=factor: self._count(factor, 0))
def destroy(self):
self._stopCounting()
Pmw.MegaWidget.destroy(self)
Pmw.forwardmethods(Counter, Pmw.EntryField, '_counterEntry')
def _changeNumber(text, factor, increment):
value = string.atol(text)
if factor > 0:
value = (value / increment) * increment + increment
else:
value = ((value - 1) / increment) * increment
# Get rid of the 'L' at the end of longs (in python up to 1.5.2).
rtn = str(value)
if rtn[-1] == 'L':
return rtn[:-1]
else:
return rtn
def _changeReal(text, factor, increment, separator = '.'):
value = Pmw.stringtoreal(text, separator)
div = value / increment
# Compare reals using str() to avoid problems caused by binary
# numbers being only approximations to decimal numbers.
# For example, if value is -0.3 and increment is 0.1, then
# int(value/increment) = -2, not -3 as one would expect.
if str(div)[-2:] == '.0':
# value is an even multiple of increment.
div = round(div) + factor
else:
# value is not an even multiple of increment.
div = int(div) * 1.0
if value < 0:
div = div - 1
if factor > 0:
div = (div + 1)
value = div * increment
text = str(value)
if separator != '.':
index = string.find(text, '.')
if index >= 0:
text = text[:index] + separator + text[index + 1:]
return text
def _changeDate(value, factor, increment, format = 'ymd',
separator = '/', yyyy = 0):
jdn = Pmw.datestringtojdn(value, format, separator) + factor * increment
y, m, d = Pmw.jdntoymd(jdn)
result = ''
for index in range(3):
if index > 0:
result = result + separator
f = format[index]
if f == 'y':
if yyyy:
result = result + '%02d' % y
else:
result = result + '%02d' % (y % 100)
elif f == 'm':
result = result + '%02d' % m
elif f == 'd':
result = result + '%02d' % d
return result
_SECSPERDAY = 24 * 60 * 60
def _changeTime(value, factor, increment, separator = ':', time24 = 0):
unixTime = Pmw.timestringtoseconds(value, separator)
if factor > 0:
chunks = unixTime / increment + 1
else:
chunks = (unixTime - 1) / increment
unixTime = chunks * increment
if time24:
while unixTime < 0:
unixTime = unixTime + _SECSPERDAY
while unixTime >= _SECSPERDAY:
unixTime = unixTime - _SECSPERDAY
if unixTime < 0:
unixTime = -unixTime
sign = '-'
else:
sign = ''
secs = unixTime % 60
unixTime = unixTime / 60
mins = unixTime % 60
hours = unixTime / 60
return '%s%02d%s%02d%s%02d' % (sign, hours, separator, mins, separator, secs)
# hexadecimal, alphabetic, alphanumeric not implemented
_counterCommands = {
'numeric' : _changeNumber, # } integer
'integer' : _changeNumber, # } these two use the same function
'real' : _changeReal, # real number
'time' : _changeTime,
'date' : _changeDate,
}
| mit |
simonlynen/yaml-cpp.new-api | test/gmock-1.7.0/test/gmock_leak_test.py | 779 | 4384 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| mit |
teto/i3pystatus | tests/test_core_util.py | 6 | 8115 | #!/usr/bin/env python
import unittest
from unittest.mock import MagicMock
import string
import random
import types
import pytest
from i3pystatus.core.exceptions import ConfigAmbigiousClassesError, ConfigInvalidModuleError
from i3pystatus.core import util, ClassFinder
def test_lchop_prefixed():
assert util.lchop("12345", "12") == "345"
def test_lchop_no_prefix():
assert util.lchop("345", "") == "345"
def test_lchop_unmatched():
assert util.lchop("12345", "345") == "12345"
@pytest.mark.parametrize("iterable, limit, assrt", [
([1, 2, 3, 4], 3, [[1, 2], [3], [4]]),
([2, 1, 3, 4], 3, [[1, 2], [3], [4]]),
([0.33, 0.45, 0.89], 1, [[0.33, 0.45, 0.89]]),
([], 10, []),
])
def test_partition(iterable, limit, assrt):
partitions = util.partition(iterable, limit)
partitions = [sorted(partition) for partition in partitions]
for item in assrt:
assert sorted(item) in partitions
@pytest.mark.parametrize("iterable, predicate, assrt", [
([1, 2, 3, 4], lambda x: x < 2, []),
([1, 2, 3, 4], lambda x: x < 5 and x > 2, [4, 3]),
([1, 2, 3, 4], lambda x: x == 4, [4]),
([1, 2, 3, 4], lambda x: True, [4, 3, 2, 1]),
([1, 2], lambda x: False, []),
])
def test_popwhile(iterable, predicate, assrt):
assert list(util.popwhile(predicate, iterable)) == assrt
@pytest.mark.parametrize("valid, required, feed, missing", [
# ( valid, required, feed, missing )
(("foo", "bar", "baz"), ("foo",), ("bar",), ("foo",)),
(("foo", "bar", "baz"), ("foo",), tuple(), ("foo",)),
(("foo", "bar", "baz"), ("bar", "baz"), ("bar", "baz"), tuple()),
(("foo", "bar", "baz"), ("bar", "baz"), ("bar", "foo", "baz"), tuple()),
])
def test_keyconstraintdict_missing(valid, required, feed, missing):
kcd = util.KeyConstraintDict(valid_keys=valid, required_keys=required)
kcd.update(dict.fromkeys(feed))
assert kcd.missing() == set(missing)
class ModuleListTests(unittest.TestCase):
class ModuleBase:
pass
def setUp(self):
self.status_handler = MagicMock()
self.ml = util.ModuleList(self.status_handler, ClassFinder(self.ModuleBase))
def test_append_simple(self):
module = self.ModuleBase()
module.registered = MagicMock()
self.ml.append(module)
module.registered.assert_called_with(self.status_handler)
def _create_module_class(self, name, bases=None):
if not bases:
bases = (self.ModuleBase,)
return type(name, bases, {
"registered": MagicMock(),
"__init__": MagicMock(return_value=None),
})
def test_append_class_instanciation(self):
module_class = self._create_module_class("module_class")
self.ml.append(module_class)
module_class.__init__.assert_called_with()
module_class.registered.assert_called_with(self.status_handler)
def test_append_module(self):
pymod = types.ModuleType("test_mod")
pymod.some_class = self._create_module_class("some_class")
pymod.some_class.__module__ = "test_mod"
self.ml.append(pymod)
pymod.some_class.__init__.assert_called_with()
pymod.some_class.registered.assert_called_with(self.status_handler)
def test_append_module2(self):
# Here we test if imported classes are ignored as they should
pymod = types.ModuleType("test_mod")
pymod.some_class = self._create_module_class("some_class")
pymod.some_class.__module__ = "other_module"
with self.assertRaises(ConfigInvalidModuleError):
self.ml.append(pymod)
assert not pymod.some_class.__init__.called
assert not pymod.some_class.registered.called
def test_ambigious_classdef(self):
pymod = types.ModuleType("test_mod")
pymod.some_class = self._create_module_class("some_class")
pymod.some_class.__module__ = "test_mod"
pymod.some_other_class = self._create_module_class("some_other_class")
pymod.some_other_class.__module__ = "test_mod"
with self.assertRaises(ConfigAmbigiousClassesError):
self.ml.append(pymod)
def test_invalid_module(self):
pymod = types.ModuleType("test_mod")
with self.assertRaises(ConfigInvalidModuleError):
self.ml.append(pymod)
def test_append_class_inheritance(self):
in_between = self._create_module_class("in_between")
cls = self._create_module_class("cls", (in_between,))
self.ml.append(cls)
cls.__init__.assert_called_with()
cls.registered.assert_called_with(self.status_handler)
class KeyConstraintDictAdvancedTests(unittest.TestCase):
def test_invalid_1(self):
kcd = util.KeyConstraintDict(valid_keys=tuple(), required_keys=tuple())
with self.assertRaises(KeyError):
kcd["invalid"] = True
def test_invalid_2(self):
kcd = util.KeyConstraintDict(
valid_keys=("foo", "bar"), required_keys=tuple())
with self.assertRaises(KeyError):
kcd["invalid"] = True
def test_incomplete_iteration(self):
kcd = util.KeyConstraintDict(
valid_keys=("foo", "bar"), required_keys=("foo",))
with self.assertRaises(util.KeyConstraintDict.MissingKeys):
for x in kcd:
pass
def test_completeness(self):
kcd = util.KeyConstraintDict(
valid_keys=("foo", "bar"), required_keys=("foo",))
kcd["foo"] = False
for x in kcd:
pass
assert kcd.missing() == set()
def test_remove_required(self):
kcd = util.KeyConstraintDict(
valid_keys=("foo", "bar"), required_keys=("foo",))
kcd["foo"] = None
assert kcd.missing() == set()
del kcd["foo"]
assert kcd.missing() == {"foo"}
def test_set_twice(self):
kcd = util.KeyConstraintDict(
valid_keys=("foo", "bar"), required_keys=("foo",))
kcd["foo"] = 1
kcd["foo"] = 2
assert kcd.missing() == set()
del kcd["foo"]
assert kcd.missing() == {"foo"}
class FormatPTests(unittest.TestCase):
def test_escaping(self):
assert util.formatp(r"[razamba \[ mabe \]]") == "razamba [ mabe ]"
def test_numerical(self):
assert util.formatp("[{t} - [schmuh {x}]]", t=1, x=2) == "1 - schmuh 2"
assert util.formatp("[{t} - [schmuh {x}]]", t=1, x=0) == "1 - "
assert util.formatp("[{t} - [schmuh {x}]]", t=0, x=0) == ""
def test_nesting(self):
s = "[[{artist} - ]{album} - ]{title}"
assert util.formatp(s, title="Black rose") == "Black rose"
assert util.formatp(
s, artist="In Flames", title="Gyroscope") == "Gyroscope"
assert util.formatp(
s, artist="SOAD", album="Toxicity", title="Science") == "SOAD - Toxicity - Science"
assert util.formatp(
s, album="Toxicity", title="Science") == "Toxicity - Science"
def test_bare(self):
assert util.formatp("{foo} blar", foo="bar") == "bar blar"
def test_presuffix(self):
assert util.formatp(
"ALINA[{title} schnacke]KOMMAHER", title="") == "ALINAKOMMAHER"
assert util.formatp("grml[{title}]") == "grml"
assert util.formatp("[{t}]grml") == "grml"
def test_side_by_side(self):
s = "{status} [{artist} / [{album} / ]]{title}[ {song_elapsed}/{song_length}]"
assert util.formatp(s, status="▷", title="Only For The Weak",
song_elapsed="1:41", song_length="4:55") == "▷ Only For The Weak 1:41/4:55"
assert util.formatp(
s, status="", album="Foo", title="Die, Die, Crucified", song_elapsed="2:52") == " Die, Die, Crucified"
assert util.formatp("[[{a}][{b}]]", b=1) == "1"
def test_complex_field(self):
class NS:
pass
obj = NS()
obj.attr = "bar"
s = "[{a:.3f} m]{obj.attr}"
assert util.formatp(s, a=3.14123456789, obj=obj) == "3.141 mbar"
assert util.formatp(s, a=0.0, obj=obj) == "bar"
| mit |
orgito/ansible | lib/ansible/modules/network/f5/bigip_profile_dns.py | 14 | 24745 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_profile_dns
short_description: Manage DNS profiles on a BIG-IP
description:
- Manage DNS profiles on a BIG-IP. Many DNS profiles; each with their
own adjustments to the standard C(dns) profile. Users of this module should be aware
that many of the adjustable knobs have no module default. Instead, the default is
assigned by the BIG-IP system itself which, in most cases, is acceptable.
version_added: 2.6
options:
name:
description:
- Specifies the name of the DNS profile.
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(dns) profile.
enable_dns_express:
description:
- Specifies whether the DNS Express engine is enabled.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- The DNS Express engine receives zone transfers from the authoritative DNS server
for the zone. If the C(enable_zone_transfer) setting is also C(yes) on this profile,
the DNS Express engine also responds to zone transfer requests made by the nameservers
configured as zone transfer clients for the DNS Express zone.
type: bool
enable_zone_transfer:
description:
- Specifies whether the system answers zone transfer requests for a DNS zone created
on the system.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- The C(enable_dns_express) and C(enable_zone_transfer) settings on a DNS profile
affect how the system responds to zone transfer requests.
- When the C(enable_dns_express) and C(enable_zone_transfer) settings are both C(yes),
if a zone transfer request matches a DNS Express zone, then DNS Express answers the
request.
- When the C(enable_dns_express) setting is C(no) and the C(enable_zone_transfer)
setting is C(yes), the BIG-IP system processes zone transfer requests based on the
last action and answers the request from local BIND or a pool member.
type: bool
enable_dnssec:
description:
- Specifies whether the system signs responses with DNSSEC keys and replies to DNSSEC
specific queries (e.g., DNSKEY query type).
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
enable_gtm:
description:
- Specifies whether the system uses Global Traffic Manager to manage the response.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
process_recursion_desired:
description:
- Specifies whether to process client-side DNS packets with Recursion Desired set in
the header.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- If set to C(no), processing of the packet is subject to the unhandled-query-action
option.
type: bool
use_local_bind:
description:
- Specifies whether the system forwards non-wide IP queries to the local BIND server
on the BIG-IP system.
- For best performance, disable this setting when using a DNS cache.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
enable_dns_firewall:
description:
- Specifies whether DNS firewall capability is enabled.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
enable_cache:
description:
- Specifies whether the system caches DNS responses.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- When C(yes), the BIG-IP system caches DNS responses handled by the virtual
servers associated with this profile. When you enable this setting, you must
also specify a value for C(cache_name).
- When C(no), the BIG-IP system does not cache DNS responses handled by the
virtual servers associated with this profile. However, the profile retains
the association with the DNS cache in the C(cache_name) parameter. Disable
this setting when you want to debug the system.
type: bool
version_added: 2.7
cache_name:
description:
- Specifies the user-created cache that the system uses to cache DNS responses.
- When you select a cache for the system to use, you must also set C(enable_dns_cache)
to C(yes)
version_added: 2.7
unhandled_query_action:
description:
- Specifies the action to take when a query does not match a Wide IP or a DNS Express Zone.
- When C(allow), the BIG-IP system forwards queries to a DNS server or pool member.
If a pool is not associated with a listener and the Use BIND Server on BIG-IP setting
is set to Enabled, requests are forwarded to the local BIND server.
- When C(drop), the BIG-IP system does not respond to the query.
- When C(reject), the BIG-IP system returns the query with the REFUSED return code.
- When C(hint), the BIG-IP system returns the query with a list of root name servers.
- When C(no-error), the BIG-IP system returns the query with the NOERROR return code.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
choices:
- allow
- drop
- reject
- hint
- no-error
version_added: 2.7
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a DNS profile
bigip_profile_dns:
name: foo
enable_dns_express: no
enable_dnssec: no
enable_gtm: no
process_recursion_desired: no
use_local_bind: no
enable_dns_firewall: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
enable_dns_express:
description: Whether DNS Express is enabled on the resource or not.
returned: changed
type: bool
sample: yes
enable_zone_transfer:
description: Whether zone transfer are enabled on the resource or not.
returned: changed
type: bool
sample: no
enable_dnssec:
description: Whether DNSSEC is enabled on the resource or not.
returned: changed
type: bool
sample: no
enable_gtm:
description: Whether GTM is used to manage the resource or not.
returned: changed
type: bool
sample: yes
process_recursion_desired:
description: Whether client-side DNS packets are processed with Recursion Desired set.
returned: changed
type: bool
sample: yes
use_local_bind:
description: Whether non-wide IP queries are forwarded to the local BIND server or not.
returned: changed
type: bool
sample: no
enable_dns_firewall:
description: Whether DNS firewall capability is enabled or not.
returned: changed
type: bool
sample: no
enable_cache:
description: Whether DNS caching is enabled or not.
returned: changed
type: bool
sample: no
cache_name:
description: Name of the cache used by DNS.
returned: changed
type: str
sample: /Common/cache1
unhandled_query_action:
description: What to do with unhandled queries
returned: changed
type: str
sample: allow
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'enableDnsFirewall': 'enable_dns_firewall',
'useLocalBind': 'use_local_bind',
'processRd': 'process_recursion_desired',
'enableGtm': 'enable_gtm',
'enableDnssec': 'enable_dnssec',
'processXfr': 'enable_zone_transfer',
'enableDnsExpress': 'enable_dns_express',
'defaultsFrom': 'parent',
'enableCache': 'enable_cache',
'cache': 'cache_name',
'unhandledQueryAction': 'unhandled_query_action',
}
api_attributes = [
'enableDnsFirewall',
'useLocalBind',
'processRd',
'enableGtm',
'enableDnssec',
'processXfr',
'enableDnsExpress',
'defaultsFrom',
'cache',
'enableCache',
'unhandledQueryAction',
]
returnables = [
'enable_dns_firewall',
'use_local_bind',
'process_recursion_desired',
'enable_gtm',
'enable_dnssec',
'enable_zone_transfer',
'enable_dns_express',
'cache_name',
'enable_cache',
'unhandled_query_action',
]
updatables = [
'enable_dns_firewall',
'use_local_bind',
'process_recursion_desired',
'enable_gtm',
'enable_dnssec',
'enable_zone_transfer',
'enable_dns_express',
'cache_name',
'enable_cache',
'unhandled_query_action',
]
class ApiParameters(Parameters):
@property
def enable_dns_firewall(self):
if self._values['enable_dns_firewall'] is None:
return None
if self._values['enable_dns_firewall'] == 'yes':
return True
return False
@property
def use_local_bind(self):
if self._values['use_local_bind'] is None:
return None
if self._values['use_local_bind'] == 'yes':
return True
return False
@property
def process_recursion_desired(self):
if self._values['process_recursion_desired'] is None:
return None
if self._values['process_recursion_desired'] == 'yes':
return True
return False
@property
def enable_gtm(self):
if self._values['enable_gtm'] is None:
return None
if self._values['enable_gtm'] == 'yes':
return True
return False
@property
def enable_cache(self):
if self._values['enable_cache'] is None:
return None
if self._values['enable_cache'] == 'yes':
return True
return False
@property
def enable_dnssec(self):
if self._values['enable_dnssec'] is None:
return None
if self._values['enable_dnssec'] == 'yes':
return True
return False
@property
def enable_zone_transfer(self):
if self._values['enable_zone_transfer'] is None:
return None
if self._values['enable_zone_transfer'] == 'yes':
return True
return False
@property
def enable_dns_express(self):
if self._values['enable_dns_express'] is None:
return None
if self._values['enable_dns_express'] == 'yes':
return True
return False
@property
def unhandled_query_action(self):
if self._values['unhandled_query_action'] is None:
return None
elif self._values['unhandled_query_action'] == 'noerror':
return 'no-error'
return self._values['unhandled_query_action']
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def cache_name(self):
if self._values['cache_name'] is None:
return None
if self._values['cache_name'] == '':
return ''
result = fq_name(self.partition, self._values['cache_name'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def enable_dns_firewall(self):
if self._values['enable_dns_firewall'] is None:
return None
if self._values['enable_dns_firewall']:
return 'yes'
return 'no'
@property
def use_local_bind(self):
if self._values['use_local_bind'] is None:
return None
if self._values['use_local_bind']:
return 'yes'
return 'no'
@property
def process_recursion_desired(self):
if self._values['process_recursion_desired'] is None:
return None
if self._values['process_recursion_desired']:
return 'yes'
return 'no'
@property
def enable_gtm(self):
if self._values['enable_gtm'] is None:
return None
if self._values['enable_gtm']:
return 'yes'
return 'no'
@property
def enable_cache(self):
if self._values['enable_cache'] is None:
return None
if self._values['enable_cache']:
return 'yes'
return 'no'
@property
def enable_dnssec(self):
if self._values['enable_dnssec'] is None:
return None
if self._values['enable_dnssec']:
return 'yes'
return 'no'
@property
def enable_zone_transfer(self):
if self._values['enable_zone_transfer'] is None:
return None
if self._values['enable_zone_transfer']:
return 'yes'
return 'no'
@property
def enable_dns_express(self):
if self._values['enable_dns_express'] is None:
return None
if self._values['enable_dns_express']:
return 'yes'
return 'no'
@property
def unhandled_query_action(self):
if self._values['unhandled_query_action'] is None:
return None
elif self._values['unhandled_query_action'] == 'no-error':
return 'noerror'
return self._values['unhandled_query_action']
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.changes.enable_cache is True or self.have.enable_cache is True:
if not self.have.cache_name or self.changes.cache_name == '':
raise F5ModuleError(
"To enable DNS cache, a DNS cache must be specified."
)
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.want.enable_cache is True and not self.want.cache_name:
raise F5ModuleError(
"You must specify a 'cache_name' when creating a DNS profile that sets 'enable_cache' to 'yes'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
enable_dns_express=dict(type='bool'),
enable_zone_transfer=dict(type='bool'),
enable_dnssec=dict(type='bool'),
enable_gtm=dict(type='bool'),
process_recursion_desired=dict(type='bool'),
use_local_bind=dict(type='bool'),
enable_dns_firewall=dict(type='bool'),
enable_cache=dict(type='bool'),
unhandled_query_action=dict(
choices=['allow', 'drop', 'reject', 'hint', 'no-error']
),
cache_name=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
jaruba/chromium.src | chrome/tools/inconsistent-eol.py | 185 | 4330 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Find and fix files with inconsistent line endings.
This script requires 'dos2unix.exe' and 'unix2dos.exe' from Cygwin; they
must be in the user's PATH.
Arg: Either one or more files to examine, or (with --file-list) one or more
files that themselves contain lists of files. The argument(s) passed to
this script, as well as the paths in the file if any, may be relative or
absolute Windows-style paths (with either type of slash). The list might
be generated with 'find -type f' or extracted from a gcl change listing,
for example.
"""
import errno
import logging
import optparse
import subprocess
import sys
# Whether to produce excessive debugging output for each file in the list.
DEBUGGING = False
class Error(Exception):
"""Local exception class."""
pass
def CountChars(text, str):
"""Count the number of instances of the given string in the text."""
split = text.split(str)
logging.debug(len(split) - 1)
return len(split) - 1
def PrevailingEOLName(crlf, cr, lf):
"""Describe the most common line ending.
Args:
crlf: How many CRLF (\r\n) sequences are in the file.
cr: How many CR (\r) characters are in the file, excluding CRLF sequences.
lf: How many LF (\n) characters are in the file, excluding CRLF sequences.
Returns:
A string describing the most common of the three line endings.
"""
most = max(crlf, cr, lf)
if most == cr:
return 'cr'
if most == crlf:
return 'crlf'
return 'lf'
def FixEndings(file, crlf, cr, lf):
"""Change the file's line endings to CRLF or LF, whichever is more common."""
most = max(crlf, cr, lf)
if most == crlf:
result = subprocess.call('unix2dos.exe %s' % file, shell=True)
if result:
raise Error('Error running unix2dos.exe %s' % file)
else:
result = subprocess.call('dos2unix.exe %s' % file, shell=True)
if result:
raise Error('Error running dos2unix.exe %s' % file)
def ProcessFiles(filelist):
"""Fix line endings in each file in the filelist list."""
for filename in filelist:
filename = filename.strip()
logging.debug(filename)
try:
# Open in binary mode to preserve existing line endings.
text = open(filename, 'rb').read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
logging.warning('File %s not found.' % filename)
continue
crlf = CountChars(text, '\r\n')
cr = CountChars(text, '\r') - crlf
lf = CountChars(text, '\n') - crlf
if options.force_lf:
if crlf > 0 or cr > 0:
print '%s: forcing to LF' % filename
# Fudge the counts to force switching to LF.
FixEndings(filename, 0, 0, 1)
else:
if ((crlf > 0 and cr > 0) or
(crlf > 0 and lf > 0) or
( lf > 0 and cr > 0)):
print '%s: mostly %s' % (filename, PrevailingEOLName(crlf, cr, lf))
FixEndings(filename, crlf, cr, lf)
def process(options, args):
"""Process the files."""
if not args or len(args) < 1:
raise Error('No files given.')
if options.file_list:
for arg in args:
filelist = open(arg, 'r').readlines()
ProcessFiles(filelist)
else:
filelist = args
ProcessFiles(filelist)
return 0
def main():
if DEBUGGING:
debug_level = logging.DEBUG
else:
debug_level = logging.INFO
logging.basicConfig(level=debug_level,
format='%(asctime)s %(levelname)-7s: %(message)s',
datefmt='%H:%M:%S')
option_parser = optparse.OptionParser()
option_parser.add_option("", "--file-list", action="store_true",
default=False,
help="Treat the arguments as files containing "
"lists of files to examine, rather than as "
"the files to be checked.")
option_parser.add_option("", "--force-lf", action="store_true",
default=False,
help="Force any files with CRLF to LF instead.")
options, args = option_parser.parse_args()
return process(options, args)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/colorama/win32.py | 196 | 4911 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
import ctypes
from ctypes import LibraryLoader
try:
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", wintypes._COORD),
("dwCursorPosition", wintypes._COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", wintypes._COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
wintypes._COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = wintypes._COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| lgpl-3.0 |
miaojianxin/client_mjx | libs/jsoncpp-src-0.5.0/scons-tools/srcdist.py | 264 | 5224 | import os
import os.path
from fnmatch import fnmatch
import targz
##def DoxyfileParse(file_contents):
## """
## Parse a Doxygen source file and return a dictionary of all the values.
## Values will be strings and lists of strings.
## """
## data = {}
##
## import shlex
## lex = shlex.shlex(instream = file_contents, posix = True)
## lex.wordchars += "*+./-:"
## lex.whitespace = lex.whitespace.replace("\n", "")
## lex.escape = ""
##
## lineno = lex.lineno
## last_backslash_lineno = lineno
## token = lex.get_token()
## key = token # the first token should be a key
## last_token = ""
## key_token = False
## next_key = False
## new_data = True
##
## def append_data(data, key, new_data, token):
## if new_data or len(data[key]) == 0:
## data[key].append(token)
## else:
## data[key][-1] += token
##
## while token:
## if token in ['\n']:
## if last_token not in ['\\']:
## key_token = True
## elif token in ['\\']:
## pass
## elif key_token:
## key = token
## key_token = False
## else:
## if token == "+=":
## if not data.has_key(key):
## data[key] = list()
## elif token == "=":
## data[key] = list()
## else:
## append_data( data, key, new_data, token )
## new_data = True
##
## last_token = token
## token = lex.get_token()
##
## if last_token == '\\' and token != '\n':
## new_data = False
## append_data( data, key, new_data, '\\' )
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
## if len(v) == 0:
## data.pop(k)
##
## # items in the following list will be kept as lists and not converted to strings
## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
## continue
##
## if len(v) == 1:
## data[k] = v[0]
##
## return data
##
##def DoxySourceScan(node, env, path):
## """
## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
## any files used to generate docs to the list of source files.
## """
## default_file_patterns = [
## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
## '*.py',
## ]
##
## default_exclude_patterns = [
## '*~',
## ]
##
## sources = []
##
## data = DoxyfileParse(node.get_contents())
##
## if data.get("RECURSIVE", "NO") == "YES":
## recursive = True
## else:
## recursive = False
##
## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
##
## for node in data.get("INPUT", []):
## if os.path.isfile(node):
## sources.add(node)
## elif os.path.isdir(node):
## if recursive:
## for root, dirs, files in os.walk(node):
## for f in files:
## filename = os.path.join(root, f)
##
## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
##
## if pattern_check and not exclude_check:
## sources.append(filename)
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
## sources = map( lambda path: env.File(path), sources )
## return sources
##
##
##def DoxySourceScanCheck(node, env):
## """Check if we should scan this file"""
## return os.path.isfile(node.path)
def srcDistEmitter(source, target, env):
## """Doxygen Doxyfile emitter"""
## # possible output formats and their default values and output locations
## output_formats = {
## "HTML": ("YES", "html"),
## "LATEX": ("YES", "latex"),
## "RTF": ("NO", "rtf"),
## "MAN": ("YES", "man"),
## "XML": ("NO", "xml"),
## }
##
## data = DoxyfileParse(source[0].get_contents())
##
## targets = []
## out_dir = data.get("OUTPUT_DIRECTORY", ".")
##
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
##
## # don't clobber targets
## for node in targets:
## env.Precious(node)
##
## # set up cleaning stuff
## for node in targets:
## env.Clean(node, node)
##
## return (targets, source)
return (target,source)
def generate(env):
"""
Add builders and construction variables for the
SrcDist tool.
"""
## doxyfile_scanner = env.Scanner(
## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
## )
if targz.exists(env):
srcdist_builder = targz.makeBuilder( srcDistEmitter )
env['BUILDERS']['SrcDist'] = srcdist_builder
def exists(env):
"""
Make sure srcdist exists.
"""
return targz.exists(env)
| apache-2.0 |
ONSdigital/eq-survey-runner | tests/app/submitter/test_convert_payload_0_0_1.py | 1 | 31260 | from app.data_model.answer_store import AnswerStore
from app.questionnaire.location import Location
from app.questionnaire.questionnaire_schema import QuestionnaireSchema
from app.submitter.convert_payload_0_0_1 import convert_answers_to_payload_0_0_1
from app.submitter.converter import convert_answers
from tests.app.submitter.schema import make_schema
from tests.app.submitter.test_converter import TestConverter, create_answer
class TestConvertPayload001(TestConverter): # pylint: disable=too-many-public-methods
def test_convert_answers_to_payload_0_0_1_with_key_error(self):
with self._app.test_request_context():
user_answer = [create_answer('ABC', '2016-01-01', group_id='group-1', block_id='block-1'),
create_answer('DEF', '2016-03-30', group_id='group-1', block_id='block-1'),
create_answer('GHI', '2016-05-30', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-1',
'answers': [
{
'id': 'LMN',
'type': 'TextField',
'q_code': '001'
},
{
'id': 'DEF',
'type': 'TextField',
'q_code': '002'
},
{
'id': 'JKL',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = (convert_answers_to_payload_0_0_1(AnswerStore(user_answer), QuestionnaireSchema(questionnaire), routing_path))
self.assertEqual(answer_object['002'], '2016-03-30')
self.assertEqual(len(answer_object), 1)
def test_answer_with_zero(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 0, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('0', answer_object['data']['003'])
def test_answer_with_float(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 10.02, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('10.02', answer_object['data']['003'])
def test_answer_with_string(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 'String test + !', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('String test + !', answer_object['data']['003'])
def test_answer_with_multiple_instances(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 0, group_id='group-1', block_id='block-1'),
create_answer('GHI', value=1, answer_instance=1, group_id='group-1', block_id='block-1'),
create_answer('GHI', value=2, answer_instance=2, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual(answer_object['data']['003'], ['0', '1', '2'])
def test_answer_without_qcode(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 'String test + !', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
self.assertEqual(len(answer_object['data']), 0)
def test_get_checkbox_answer_with_duplicate_detail_answer_ids(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', 'Other', group_id='favourite-food', block_id='crisps',
group_instance=1)]
answers += [create_answer('other-answer-mandatory', 'Other', group_id='favourite-food', block_id='crisps',
group_instance=1)]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {'id': 'other-answer-mandatory'}
}
]
}
]
}
])
with self.assertRaises(Exception) as err:
convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
self.assertEqual('Multiple answers found for {}'.format('other-answer-mandatory'), str(err.exception))
def test_converter_checkboxes_with_q_codes(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Sweet chilli'
], group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['2'], 'Sweet chilli')
def test_converter_checkboxes_with_q_codes_and_other_value(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', 'Bacon', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['4'], 'Bacon')
def test_converter_checkboxes_with_q_codes_and_empty_other_value(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', '', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['4'], 'Other')
def test_converter_q_codes_for_empty_strings(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', '', group_id='favourite-food', block_id='crisps')]
answers += [
create_answer('other-crisps-answer', 'Ready salted', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'TextArea',
'options': [],
'q_code': '1'
},
{
'id': 'other-crisps-answer',
'type': 'TextArea',
'options': [],
'q_code': '2'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['2'], 'Ready salted')
def test_radio_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='radio-group', group_instance=0, block_id='radio-block')]
user_answer = [create_answer('radio-answer', 'Coffee', group_id='radio-group', block_id='radio-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'radio-block', 'radio-block', [
{
'id': 'radio-question',
'answers': [
{
'type': 'Radio',
'id': 'radio-answer',
'q_code': '1',
'options': [
{
'label': 'Coffee',
'value': 'Coffee'
},
{
'label': 'Tea',
'value': 'Tea'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'Coffee')
def test_number_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='number-group', group_instance=0, block_id='number-block')]
user_answer = [create_answer('number-answer', 0.9999, group_id='number-block', block_id='number-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'number-block', 'number-block', [
{
'id': 'number-question',
'answers': [
{
'id': 'number-answer',
'type': 'Number',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '0.9999')
def test_percentage_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='percentage-group', group_instance=0, block_id='percentage-block')]
user_answer = [create_answer('percentage-answer', 100, group_id='percentage-group', block_id='percentage-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'percentage-block', 'percentage-block', [
{
'id': 'percentage-question',
'answers': [
{
'id': 'percentage-answer',
'type': 'Percentage',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '100')
def test_textarea_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='textarea-group', group_instance=0, block_id='textarea-block')]
user_answer = [create_answer('textarea-answer', 'example text.', group_id='textarea-group', block_id='textarea-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'textarea-block', 'textarea-block', [
{
'id': 'textarea-question',
'answers': [
{
'id': 'textarea-answer',
'q_code': '1',
'type': 'TextArea'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'example text.')
def test_currency_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='currency-group', group_instance=0, block_id='currency-block')]
user_answer = [create_answer('currency-answer', 99.99, group_id='currency-group', block_id='currency-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'currency-block', 'currency-block', [
{
'id': 'currency-question',
'answers': [
{
'id': 'currency-answer',
'type': 'Currency',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '99.99')
def test_dropdown_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='dropdown-group', group_instance=0, block_id='dropdown-block')]
user_answer = [create_answer('dropdown-answer', 'Liverpool', group_id='dropdown-group', block_id='dropdown-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'dropdown-block', 'dropdown-block', [
{
'id': 'dropdown-question',
'answers': [
{
'id': 'dropdown-answer',
'type': 'Dropdown',
'q_code': '1',
'options': [
{
'label': 'Liverpool',
'value': 'Liverpool'
},
{
'label': 'Chelsea',
'value': 'Chelsea'
},
{
'label': 'Rugby is better!',
'value': 'Rugby is better!'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'Liverpool')
def test_date_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='date-group', group_instance=0, block_id='date-block')]
user_answer = [create_answer('single-date-answer', '1990-02-01', group_id='date-group', block_id='date-block'),
create_answer('month-year-answer', '1990-01', group_id='date-group', block_id='date-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'date-block', 'date-block', [
{
'id': 'single-date-question',
'answers': [
{
'id': 'single-date-answer',
'type': 'Date',
'q_code': '1'
}
]
},
{
'id': 'month-year-question',
'answers': [
{
'id': 'month-year-answer',
'type': 'MonthYearDate',
'q_code': '2'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], '01/02/1990')
self.assertEqual(answer_object['data']['2'], '01/1990')
def test_unit_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='unit-group', group_instance=0, block_id='unit-block')]
user_answer = [create_answer('unit-answer', 10, group_id='unit-group', block_id='unit-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'unit-block', 'unit-block', [
{
'id': 'unit-question',
'answers': [
{
'id': 'unit-answer',
'type': 'Unit',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '10')
def test_relationship_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='relationship-group', group_instance=0, block_id='relationship-block')]
user_answer = [create_answer('relationship-answer', 'Unrelated', group_id='relationship-group', block_id='relationship-block'),
create_answer('relationship-answer', 'Partner', group_id='relationship-group', block_id='relationship-block',
answer_instance=1),
create_answer('relationship-answer', 'Husband or wife', group_id='relationship-group', block_id='relationship-block',
answer_instance=2)]
questionnaire = make_schema('0.0.1', 'section-1', 'relationship-block', 'relationship-block', [
{
'id': 'relationship-question',
'type': 'Relationship',
'answers': [
{
'id': 'relationship-answer',
'q_code': '1',
'type': 'Relationship',
'options': [
{
'label': 'Husband or wife',
'value': 'Husband or wife'
},
{
'label': 'Partner',
'value': 'Partner'
},
{
'label': 'Unrelated',
'value': 'Unrelated'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], ['Unrelated', 'Partner', 'Husband or wife'])
| mit |
JohnOrlando/gnuradio-bitshark | gnuradio-core/src/python/gnuradio/blks2impl/pfb_decimator.py | 5 | 1791 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
class pfb_decimator_ccf(gr.hier_block2):
'''
Make a Polyphase Filter decimator (complex in, complex out, floating-point taps)
This simplifies the interface by allowing a single input stream to connect to this block.
It will then output a stream that is the decimated output stream.
'''
def __init__(self, decim, taps, channel=0):
gr.hier_block2.__init__(self, "pfb_decimator_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._decim = decim
self._taps = taps
self._channel = channel
self.s2ss = gr.stream_to_streams(gr.sizeof_gr_complex, self._decim)
self.pfb = gr.pfb_decimator_ccf(self._decim, self._taps, self._channel)
self.connect(self, self.s2ss)
for i in xrange(self._decim):
self.connect((self.s2ss,i), (self.pfb,i))
self.connect(self.pfb, self)
| gpl-3.0 |
dmitrijus/hltd | test/testRunStop.py | 2 | 3495 | #!/bin/env python
import os,sys,shutil,time
def lineSearch(logfile,*args):
searchTimeout = 10.
searchTimeAccum = 0.
doLoop = True
while doLoop:
line = logfile.readline()
if not line:
time.sleep(0.1)
searchTimeAccum += 0.1
continue
foundAll = True
for arg in args:
if line.find(arg) == -1:
foundAll = False
break
if foundAll == True:
return line
if searchTimeAccum >= searchTimeout:
print "Error: 10s timeout waiting hlt to log a line containing all of: " + str(args)
sys.exit(-1)
testBuDir = '/fff/BU/ramdisk'
testDataDir = '/fff/data'
testCMSSWcfg = 'testEmptySource.py'
cmssw_version = 'CMSSW_7_0_0'
scram_arch = 'slc6_amd64_gcc481'
testRunNumber = 999998
testRunNumber2 = 999999
RunDirPrefix = 'run'
EoRDirPrefix = 'end'
menu_dir = testBuDir+'/hlt'
cmssw_config = menu_dir+'/HltConfig.py'
print "test script 1! killing all cmssw and restarting hltd"
os.system("killall hltd")
os.system("killall cmsRun")
time.sleep(1)
os.system("/etc/init.d/hltd restart")
try:
shutil.rmtree(menu_dir)
except OSError as oserror:
print "no old dir to delete.OK"
os.mkdir(menu_dir)
shutil.copy(testCMSSWcfg,cmssw_config)
fcmssw = open(menu_dir+'/CMSSW_VERSION','w')
fcmssw.write(cmssw_version)
fcmssw.close()
fscram = open(menu_dir+'/SCRAM_ARCH','w')
fscram.write(scram_arch)
fscram.close()
#create input run dir (empty)
try:
shutil.rmtree(testBuDir+'/'+RunDirPrefix+str(testRunNumber))
except OSError as oserror:
print "no old dir to delete.OK"
try:
shutil.rmtree(testBuDir+'/'+RunDirPrefix+str(testRunNumber2))
except OSError as oserror:
print "no old dir to delete.OK"
try:
os.remove(testDataDir+'/'+EoRDirPrefix+str(testRunNumber))
except OSError as oserror:
print "no old file to delete.OK"
try:
os.remove(testDataDir+'/'+EoRDirPrefix+str(testRunNumber2))
except OSError as oserror:
print "no old file to delete.OK"
os.mkdir(testBuDir+'/'+RunDirPrefix+str(testRunNumber))
#open hltd log for reading
logfile = open("/var/log/hltd.log","rt")
logfile.seek(0,2)#goto file end
#while True:
# line = logfile.readline()
# if not line:
# break
#fire up a new run by creating a data dir (avoiding cgi here)
print "starting run for hltd"
try:
shutil.rmtree(testDataDir+'/'+RunDirPrefix+str(testRunNumber))
except OSError as oserror:
print "no old dir to delete.OK"
time.sleep(0.5)
os.mkdir(testDataDir+'/'+RunDirPrefix+str(testRunNumber))
#wait until hltd reacts
time.sleep(0.5)
print "created data dir"
#look at the hltd log
retval = lineSearch(logfile,"started process")
print "hltd printed line: " + retval
time.sleep(1)
#signal EoR
print "writing EoR file"
eorFile = open(testDataDir+'/'+EoRDirPrefix+str(testRunNumber),"w")
eorFile.close()
time.sleep(0.3)
retval = lineSearch(logfile,"end run")
print "hltd printed line: " + retval
time.sleep(2)
#second set of test run dirs
os.mkdir(testBuDir+'/'+RunDirPrefix+str(testRunNumber2))
print "starting run " + str(testRunNumber2) +" for hltd"
try:
shutil.rmtree(testDataDir+'/'+RunDirPrefix+str(testRunNumber2))
except OSError as oserror:
print "no old dir to delete.OK"
time.sleep(0.1)
os.mkdir(testDataDir+'/'+RunDirPrefix+str(testRunNumber2))
time.sleep(1)
logfile.seek(0,2)
print "running killall cmsRun"
os.system("killall cmsRun")
time.sleep(1)
print "waiting for next..."
retval = lineSearch(logfile,"started process")
print "hltd printed line: " + retval
############################
sys.exit(0)
| lgpl-3.0 |
dwysocki/ASTP-601-602 | presentations/fourier-teaser/py/plot.py | 2 | 1520 | #!/usr/bin/env python3
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
def main():
if len(sys.argv) != 5:
print("Usage: plot [phot file] [period] [star name] [output file]",
file=sys.stderr)
exit()
filename, period, name, output = sys.argv[1:]
period = float(period)
data = np.loadtxt(filename)
plot_lightcurve(name, data, period, output)
def plot_lightcurve(name, data, period, output):
# initialize Figure and Axes objects
fig, ax = plt.subplots(figsize=(6.97, 4.31))
# format the x- and y-axis
ax.invert_yaxis()
if period > 0:
ax.set_xlim(0,2)
# Plot points used
time, mag, error = data.T
if period > 0:
time = (time/period) % 1.0
time = np.hstack((time,1+time))
mag = np.hstack((mag,mag))
error = np.hstack((error,error))
points = ax.errorbar(time, mag, yerr=error,
color="darkblue",
ls='None',
ms=.01, mew=.01, capsize=0)
if period > 0:
ax.set_xlabel('Phase ({0:0.7} day period)'.format(period))
else:
ax.set_xlabel('Time (days)')
ax.set_ylabel('Magnitude')
ax.xaxis.set_minor_locator(AutoMinorLocator(5))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.set_title(name)
fig.tight_layout(pad=0.1)
fig.savefig(output)
plt.close(fig)
if __name__ == "__main__":
exit(main())
| mit |
hgl888/chromium-crosswalk | chrome/test/mini_installer/uninstall_chrome.py | 123 | 2753 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Uninstalls Chrome.
This script reads the uninstall command from registry, calls it, and verifies
the output status code.
"""
import _winreg
import optparse
import subprocess
import sys
def main():
parser = optparse.OptionParser(description='Uninstall Chrome.')
parser.add_option('--system-level', action='store_true', dest='system_level',
default=False, help='Uninstall Chrome at system level.')
parser.add_option('--chrome-long-name', default='Google Chrome',
help='Google Chrome or Chromium)')
parser.add_option('--interactive', action='store_true', dest='interactive',
default=False, help='Ask before uninstalling Chrome.')
parser.add_option('--no-error-if-absent', action='store_true',
dest='no_error_if_absent', default=False,
help='No error if the registry key for uninstalling Chrome '
'is absent.')
options, _ = parser.parse_args()
# TODO(sukolsak): Add support for uninstalling MSI-based Chrome installs when
# we support testing MSIs.
if options.system_level:
root_key = _winreg.HKEY_LOCAL_MACHINE
else:
root_key = _winreg.HKEY_CURRENT_USER
sub_key = ('SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\%s' %
options.chrome_long_name)
# Query the key. It will throw a WindowsError if the key doesn't exist.
try:
key = _winreg.OpenKey(root_key, sub_key, 0, _winreg.KEY_QUERY_VALUE)
except WindowsError:
if options.no_error_if_absent:
return 0
raise KeyError('Registry key %s\\%s is missing' % (
'HKEY_LOCAL_MACHINE' if options.system_level else 'HKEY_CURRENT_USER',
sub_key))
if options.interactive:
prompt = ('Warning: This will uninstall %s at %s. Do you want to continue? '
'(y/N) ' % (options.chrome_long_name,
'system-level' if
options.system_level else 'user-level'))
if raw_input(prompt).strip() != 'y':
print >> sys.stderr, 'User aborted'
return 1
uninstall_string, _ = _winreg.QueryValueEx(key, 'UninstallString')
exit_status = subprocess.call(uninstall_string + ' --force-uninstall',
shell=True)
# The exit status for successful uninstallation of Chrome is 19 (see
# chrome/installer/util/util_constants.h).
if exit_status != 19:
raise Exception('Could not uninstall Chrome. The installer exited with '
'status %d.' % exit_status)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
edgarRd/incubator-airflow | airflow/contrib/operators/sftp_operator.py | 2 | 4578 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.ssh_hook import SSHHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SFTPOperation(object):
PUT = 'put'
GET = 'get'
class SFTPOperator(BaseOperator):
"""
SFTPOperator for transferring files from remote host to local or vice a versa.
This operator uses ssh_hook to open sftp trasport channel that serve as basis
for file transfer.
:param ssh_hook: predefined ssh_hook to use for remote execution
:type ssh_hook: :class:`SSHHook`
:param ssh_conn_id: connection id from airflow Connections
:type ssh_conn_id: str
:param remote_host: remote host to connect (templated)
:type remote_host: str
:param local_filepath: local file path to get or put. (templated)
:type local_filepath: str
:param remote_filepath: remote file path to get or put. (templated)
:type remote_filepath: str
:param operation: specify operation 'get' or 'put', defaults to put
:type get: bool
:param confirm: specify if the SFTP operation should be confirmed, defaults to True
:type confirm: bool
"""
template_fields = ('local_filepath', 'remote_filepath', 'remote_host')
@apply_defaults
def __init__(self,
ssh_hook=None,
ssh_conn_id=None,
remote_host=None,
local_filepath=None,
remote_filepath=None,
operation=SFTPOperation.PUT,
confirm=True,
*args,
**kwargs):
super(SFTPOperator, self).__init__(*args, **kwargs)
self.ssh_hook = ssh_hook
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.local_filepath = local_filepath
self.remote_filepath = remote_filepath
self.operation = operation
self.confirm = confirm
if not (self.operation.lower() == SFTPOperation.GET or
self.operation.lower() == SFTPOperation.PUT):
raise TypeError("unsupported operation value {0}, expected {1} or {2}"
.format(self.operation, SFTPOperation.GET, SFTPOperation.PUT))
def execute(self, context):
file_msg = None
try:
if self.ssh_conn_id and not self.ssh_hook:
self.ssh_hook = SSHHook(ssh_conn_id=self.ssh_conn_id)
if not self.ssh_hook:
raise AirflowException("can not operate without ssh_hook or ssh_conn_id")
if self.remote_host is not None:
self.ssh_hook.remote_host = self.remote_host
with self.ssh_hook.get_conn() as ssh_client:
sftp_client = ssh_client.open_sftp()
if self.operation.lower() == SFTPOperation.GET:
file_msg = "from {0} to {1}".format(self.remote_filepath,
self.local_filepath)
self.log.debug("Starting to transfer %s", file_msg)
sftp_client.get(self.remote_filepath, self.local_filepath)
else:
file_msg = "from {0} to {1}".format(self.local_filepath,
self.remote_filepath)
self.log.debug("Starting to transfer file %s", file_msg)
sftp_client.put(self.local_filepath,
self.remote_filepath,
confirm=self.confirm)
except Exception as e:
raise AirflowException("Error while transferring {0}, error: {1}"
.format(file_msg, str(e)))
return None
| apache-2.0 |
Kodiack/WhileyWeb | lib/cherrypy/_cpmodpy.py | 82 | 11201 | """Native adapter for serving CherryPy via mod_python
Basic usage:
##########################################
# Application in a module called myapp.py
##########################################
import cherrypy
class Root:
@cherrypy.expose
def index(self):
return 'Hi there, Ho there, Hey there'
# We will use this method from the mod_python configuration
# as the entry point to our application
def setup_server():
cherrypy.tree.mount(Root())
cherrypy.config.update({'environment': 'production',
'log.screen': False,
'show_tracebacks': False})
##########################################
# mod_python settings for apache2
# This should reside in your httpd.conf
# or a file that will be loaded at
# apache startup
##########################################
# Start
DocumentRoot "/"
Listen 8080
LoadModule python_module /usr/lib/apache2/modules/mod_python.so
<Location "/">
PythonPath "sys.path+['/path/to/my/application']"
SetHandler python-program
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup myapp::setup_server
PythonDebug On
</Location>
# End
The actual path to your mod_python.so is dependent on your
environment. In this case we suppose a global mod_python
installation on a Linux distribution such as Ubuntu.
We do set the PythonPath configuration setting so that
your application can be found by from the user running
the apache2 instance. Of course if your application
resides in the global site-package this won't be needed.
Then restart apache2 and access http://127.0.0.1:8080
"""
import logging
import sys
import cherrypy
from cherrypy._cpcompat import BytesIO, copyitems, ntob
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
# ------------------------------ Request-handling
def setup(req):
from mod_python import apache
# Run any setup functions defined by a "PythonOption cherrypy.setup" directive.
options = req.get_options()
if 'cherrypy.setup' in options:
for function in options['cherrypy.setup'].split():
atoms = function.split('::', 1)
if len(atoms) == 1:
mod = __import__(atoms[0], globals(), locals())
else:
modname, fname = atoms
mod = __import__(modname, globals(), locals(), [fname])
func = getattr(mod, fname)
func()
cherrypy.config.update({'log.screen': False,
"tools.ignore_headers.on": True,
"tools.ignore_headers.headers": ['Range'],
})
engine = cherrypy.engine
if hasattr(engine, "signal_handler"):
engine.signal_handler.unsubscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.unsubscribe()
engine.autoreload.unsubscribe()
cherrypy.server.unsubscribe()
def _log(msg, level):
newlevel = apache.APLOG_ERR
if logging.DEBUG >= level:
newlevel = apache.APLOG_DEBUG
elif logging.INFO >= level:
newlevel = apache.APLOG_INFO
elif logging.WARNING >= level:
newlevel = apache.APLOG_WARNING
# On Windows, req.server is required or the msg will vanish. See
# http://www.modpython.org/pipermail/mod_python/2003-October/014291.html.
# Also, "When server is not specified...LogLevel does not apply..."
apache.log_error(msg, newlevel, req.server)
engine.subscribe('log', _log)
engine.start()
def cherrypy_cleanup(data):
engine.exit()
try:
# apache.register_cleanup wasn't available until 3.1.4.
apache.register_cleanup(cherrypy_cleanup)
except AttributeError:
req.server.register_cleanup(req, cherrypy_cleanup)
class _ReadOnlyRequest:
expose = ('read', 'readline', 'readlines')
def __init__(self, req):
for method in self.expose:
self.__dict__[method] = getattr(req, method)
recursive = False
_isSetUp = False
def handler(req):
from mod_python import apache
try:
global _isSetUp
if not _isSetUp:
setup(req)
_isSetUp = True
# Obtain a Request object from CherryPy
local = req.connection.local_addr
local = httputil.Host(local[0], local[1], req.connection.local_host or "")
remote = req.connection.remote_addr
remote = httputil.Host(remote[0], remote[1], req.connection.remote_host or "")
scheme = req.parsed_uri[0] or 'http'
req.get_basic_auth_pw()
try:
# apache.mpm_query only became available in mod_python 3.1
q = apache.mpm_query
threaded = q(apache.AP_MPMQ_IS_THREADED)
forked = q(apache.AP_MPMQ_IS_FORKED)
except AttributeError:
bad_value = ("You must provide a PythonOption '%s', "
"either 'on' or 'off', when running a version "
"of mod_python < 3.1")
threaded = options.get('multithread', '').lower()
if threaded == 'on':
threaded = True
elif threaded == 'off':
threaded = False
else:
raise ValueError(bad_value % "multithread")
forked = options.get('multiprocess', '').lower()
if forked == 'on':
forked = True
elif forked == 'off':
forked = False
else:
raise ValueError(bad_value % "multiprocess")
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
send_response(req, '404 Not Found', [], '')
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.uri
qs = req.args or ""
reqproto = req.protocol
headers = copyitems(req.headers_in)
rfile = _ReadOnlyRequest(req)
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(local, remote, scheme,
"HTTP/1.1")
request.login = req.user
request.multithread = bool(threaded)
request.multiprocess = bool(forked)
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, reqproto, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = BytesIO()
send_response(req, response.output_status, response.header_list,
response.body, response.stream)
finally:
app.release_serving()
except:
tb = format_exc()
cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR)
s, h, b = bare_error()
send_response(req, s, h, b)
return apache.OK
def send_response(req, status, headers, body, stream=False):
# Set response status
req.status = int(status[:3])
# Set response headers
req.content_type = "text/plain"
for header, value in headers:
if header.lower() == 'content-type':
req.content_type = value
continue
req.headers_out.add(header, value)
if stream:
# Flush now so the status and headers are sent immediately.
req.flush()
# Set response body
if isinstance(body, basestring):
req.write(body)
else:
for seg in body:
req.write(seg)
# --------------- Startup tools for CherryPy + mod_python --------------- #
import os
import re
try:
import subprocess
def popen(fullcmd):
p = subprocess.Popen(fullcmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
return p.stdout
except ImportError:
def popen(fullcmd):
pipein, pipeout = os.popen4(fullcmd)
return pipeout
def read_process(cmd, args=""):
fullcmd = "%s %s" % (cmd, args)
pipeout = popen(fullcmd)
try:
firstline = pipeout.readline()
if (re.search(ntob("(not recognized|No such file|not found)"), firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
class ModPythonServer(object):
template = """
# Apache2 server configuration file for running CherryPy with mod_python.
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
<Location %(loc)s>
SetHandler python-program
PythonHandler %(handler)s
PythonDebug On
%(opts)s
</Location>
"""
def __init__(self, loc="/", port=80, opts=None, apache_path="apache",
handler="cherrypy._cpmodpy::handler"):
self.loc = loc
self.port = port
self.opts = opts
self.apache_path = apache_path
self.handler = handler
def start(self):
opts = "".join([" PythonOption %s %s\n" % (k, v)
for k, v in self.opts])
conf_data = self.template % {"port": self.port,
"loc": self.loc,
"opts": opts,
"handler": self.handler,
}
mpconf = os.path.join(os.path.dirname(__file__), "cpmodpy.conf")
f = open(mpconf, 'wb')
try:
f.write(conf_data)
finally:
f.close()
response = read_process(self.apache_path, "-k start -f %s" % mpconf)
self.ready = True
return response
def stop(self):
os.popen("apache -k stop")
self.ready = False
| bsd-3-clause |
SNoiraud/gramps | gramps/gen/plug/report/_reportbase.py | 9 | 3440 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001 David R. Hampton
# Copyright (C) 2001-2006 Donald N. Allingham
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
from ...utils.grampslocale import GrampsLocale
from ...display.name import NameDisplay
from ...config import config
#-------------------------------------------------------------------------
#
# Report
#
#-------------------------------------------------------------------------
class Report:
"""
The Report base class. This is a base class for generating
customized reports. It cannot be used as is, but it can be easily
sub-classed to create a functional report generator.
"""
def __init__(self, database, options_class, user):
self.database = database
self.options_class = options_class
self._user = user
self.doc = options_class.get_document()
creator = database.get_researcher().get_name()
self.doc.set_creator(creator)
output = options_class.get_output()
if output:
self.standalone = True
self.doc.open(options_class.get_output())
else:
self.standalone = False
def begin_report(self):
pass
def set_locale(self, language):
"""
Set the translator to one selected with
stdoptions.add_localization_option().
"""
from ...datehandler import LANG_TO_DISPLAY, main_locale
if language == GrampsLocale.DEFAULT_TRANSLATION_STR: # the UI language
locale = glocale
elif language in LANG_TO_DISPLAY: # a displayer exists
locale = LANG_TO_DISPLAY[main_locale[language]]._locale
else: # no displayer
locale = GrampsLocale(lang=language)
self._ = locale.translation.sgettext
self._get_date = locale.get_date
self._get_type = locale.get_type
self._ldd = locale.date_displayer
self.doc.set_rtl_doc(locale.rtl_locale)
self._name_display = NameDisplay(locale) # a legacy/historical name
self._name_display.set_name_format(self.database.name_formats)
fmt_default = config.get('preferences.name-format')
self._name_display.set_default_format(fmt_default)
self._locale = locale # define it here rather than in every report
return locale
def write_report(self):
pass
def end_report(self):
if self.standalone:
self.doc.close()
| gpl-2.0 |
codelv/enaml-native | src/enamlnative/widgets/flexbox.py | 1 | 8205 | """
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import (
Typed, ForwardTyped, Bool, Tuple, Float, Int, Enum, observe, set_default
)
from enaml.core.declarative import d_
from .view_group import ViewGroup, ProxyViewGroup
class ProxyFlexbox(ProxyViewGroup):
""" The abstract definition of a proxy Flexbox object.
"""
#: A reference to the Label declaration.
declaration = ForwardTyped(lambda: Flexbox)
def set_align_content(self, alignment):
raise NotImplementedError
def set_align_items(self, alignment):
raise NotImplementedError
def set_align_self(self, alignment):
raise NotImplementedError
def set_flex_direction(self, direction):
raise NotImplementedError
def set_flex_grow(self, grow):
raise NotImplementedError
def set_flex_shrink(self, shrink):
raise NotImplementedError
def set_flex_basis(self, basis):
raise NotImplementedError
def set_flex_wrap(self, wrap):
raise NotImplementedError
def set_left(self, left):
raise NotImplementedError
def set_top(self, top):
raise NotImplementedError
def set_bottom(self, bottom):
raise NotImplementedError
def set_right(self, right):
raise NotImplementedError
def set_start(self, start):
raise NotImplementedError
def set_end(self, end):
raise NotImplementedError
def set_justify_content(self, justify):
raise NotImplementedError
def set_min_height(self, height):
raise NotImplementedError
def set_max_height(self, height):
raise NotImplementedError
def set_min_width(self, width):
raise NotImplementedError
def set_max_width(self, width):
raise NotImplementedError
def set_margin_left(self, left):
raise NotImplementedError
def set_margin_top(self, top):
raise NotImplementedError
def set_margin_bottom(self, bottom):
raise NotImplementedError
def set_margin_right(self, right):
raise NotImplementedError
def set_margin_start(self, start):
raise NotImplementedError
def set_margin_end(self, end):
raise NotImplementedError
def set_margin(self, margin):
raise NotImplementedError
def set_padding_left(self, left):
raise NotImplementedError
def set_padding_top(self, top):
raise NotImplementedError
def set_padding_bottom(self, bottom):
raise NotImplementedError
def set_padding_right(self, right):
raise NotImplementedError
def set_padding_start(self, start):
raise NotImplementedError
def set_padding_end(self, end):
raise NotImplementedError
def set_padding(self, padding):
raise NotImplementedError
def set_border_left(self, left):
raise NotImplementedError
def set_border_top(self, top):
raise NotImplementedError
def set_border_bottom(self, bottom):
raise NotImplementedError
def set_border_right(self, right):
raise NotImplementedError
def set_border_start(self, start):
raise NotImplementedError
def set_border_end(self, end):
raise NotImplementedError
def set_border(self, border):
raise NotImplementedError
class Flexbox(ViewGroup):
""" A layout widget implementing flexbox's layout.
This uses Facebook's yoga.
"""
# #: Default is to stretch so fill the parent
# layout_width = set_default('match_parent')
#
# #: Default is to stretch so fill the parent
# layout_height = set_default('match_parent')
#: How to align children along the cross axis of their container
align_items = d_(Enum('stretch', 'flex_start', 'flex_end', 'center'))
#: How to align children along the cross axis of their container
#align_self = d_(Enum('stretch', 'flex_start', 'flex_end', 'center'))
#: Control how multiple lines of content are aligned within a
#: container which uses FlexWrap
align_content = d_(Enum('flex_start', 'flex_end', 'center',
'space_between', 'space_around'))
#: Should the layout be a column or a row.
flex_direction = d_(Enum('row', 'column', 'row_reversed',
'column_reversed'))
#: The FlexBasis property is an axis-independent way of providing the default size of an item
#: on the main axis. Setting the FlexBasis of a child is similar to setting the Width of that
#: child if its parent is a container with FlexDirection = row or setting the Height of a child
#: if its parent is a container with FlexDirection = column. The FlexBasis of an item is the d
#: efault size of that item, the size of the item before any FlexGrow and FlexShrink
#: calculations are performed.
# flex_basis = d_(Int())
#
# #: The FlexGrow property describes how any space within a container should be distributed
# #: among its children along the main axis. After laying out its children, a container will
# #: distribute any remaining space according to the FlexGrow values specified by its children.
# flex_grow = d_(Float(strict=False))
#
# #: The FlexShrink property describes how to shrink children along the main axis
# #: in the case that the total size of the children overflow the size of the container
# #: on the main axis.
# flex_shrink = d_(Float(strict=False))
#
#: Wrap or nowrap
flex_wrap = d_(Enum('nowrap', 'wrap', 'wrap_reverse'))
#: How to align children within the main axis of a container
justify_content = d_(Enum('flex_start', 'flex_end', 'center',
'space_between', 'space_around'))
# #: The Position property tells Flexbox how you want your item to be positioned within its
# #: parent.
# position = d_(Enum('relative', 'absolute'))
#
# left = d_(Int())
# top = d_(Int())
# right = d_(Int())
# bottom = d_(Int())
# start = d_(Int())
# end = d_(Int())
#
# min_height = d_(Int())
# max_height = d_(Int())
#
# min_width = d_(Int())
# max_width = d_(Int())
#
# margin_left = d_(Int())
# margin_top = d_(Int())
# margin_right = d_(Int())
# margin_bottom = d_(Int())
# margin_start = d_(Int())
# margin_end = d_(Int())
# margin = d_(Int())
#
# padding_left = d_(Int())
# padding_top = d_(Int())
# padding_right = d_(Int())
# padding_bottom = d_(Int())
# padding_start = d_(Int())
# padding_end = d_(Int())
# padding = d_(Int())
#
# border_left = d_(Int())
# border_top = d_(Int())
# border_right = d_(Int())
# border_bottom = d_(Int())
# border_start = d_(Int())
# border_end = d_(Int())
# border = d_(Int())
#: A reference to the ProxyFlexbox object.
proxy = Typed(ProxyFlexbox)
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe('align_items', 'align_content', 'align_self',
'flex_direction', 'flex_wrap', 'flex_grow', 'flex_shrink',
'flex_basis',
'left', 'top', 'right', 'bottom', 'start', 'end',
'margin_left', 'margin_top', 'margin_right', 'margin_bottom',
'margin_start', 'margin_end', 'margin',
'border_left', 'border_top', 'border_right', 'border_bottom',
'border_start', 'border_end', 'border',
'padding_left', 'padding_top', 'padding_right', 'padding_bottom',
'padding_start', 'padding_end', 'padding',
'min_width', 'min_height', 'max_width', 'max_height',
'justify_content', 'position')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
# The superclass implementation is sufficient.
super(Flexbox, self)._update_proxy(change)
| mit |
hoangt/tpzsimul.gem5 | tests/long/se/70.twolf/test.py | 83 | 2122 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
m5.util.addToPath('../configs/common')
from cpu2000 import twolf
import os
workload = twolf(isa, opsys, 'smred')
root.system.cpu[0].workload = workload.makeLiveProcess()
cwd = root.system.cpu[0].workload[0].cwd
#Remove two files who's presence or absence affects execution
sav_file = os.path.join(cwd, workload.input_set + '.sav')
sv2_file = os.path.join(cwd, workload.input_set + '.sv2')
try:
os.unlink(sav_file)
except:
print "Couldn't unlink ", sav_file
try:
os.unlink(sv2_file)
except:
print "Couldn't unlink ", sv2_file
| bsd-3-clause |
shreyasva/tensorflow | tensorflow/python/kernel_tests/learn_test.py | 2 | 14529 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.python.platform # pylint: disable=unused-import,g-bad-import-order
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.framework import tensor_util
def assert_summary_scope(regexp):
"""Assert that all generated summaries match regexp."""
for summary in tf.get_collection(tf.GraphKeys.SUMMARIES):
tag = tf.unsupported.constant_value(summary.op.inputs[0])
assert tag is not None, 'All summaries must have constant tags'
tag = str(tag)
assert isinstance(tag[0], six.string_types), tag[0]
assert re.match(regexp, tag), "tag doesn't match %s: %s" % (regexp, tag)
class FullyConnectedTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
tf.set_random_seed(1234)
self.input = tf.constant([[1., 2., 3.], [-4., 5., -6.]])
assert not tf.get_collection(tf.GraphKeys.SUMMARIES)
def test_basic_use(self):
output = tf.learn.fully_connected(self.input, 8, activation_fn=tf.nn.relu)
with tf.Session() as sess:
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(output)
tf.initialize_all_variables().run()
out_value = sess.run(output)
self.assertEqual(output.get_shape().as_list(), [2, 8])
self.assertTrue(np.all(out_value >= 0),
'Relu should have capped all values.')
self.assertGreater(tf.get_collection(tf.GraphKeys.SUMMARIES), 0,
'Some summaries should have been added.')
self.assertEqual(2,
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(0,
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
assert_summary_scope('fully_connected')
def test_variable_reuse_with_scope(self):
with tf.variable_scope('test') as vs:
output1 = tf.learn.fully_connected(self.input,
8,
activation_fn=tf.nn.relu)
output2 = tf.learn.fully_connected(self.input,
8,
activation_fn=tf.nn.relu)
with tf.variable_scope(vs, reuse=True):
output3 = tf.learn.fully_connected(self.input,
8,
activation_fn=tf.nn.relu)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2, out_value3 = sess.run([output1, output2, output3])
self.assertFalse(np.allclose(out_value1, out_value2))
self.assertAllClose(out_value1, out_value3)
def test_variable_reuse_with_template(self):
tmpl1 = tf.make_template('test',
tf.learn.fully_connected,
num_output_nodes=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
assert_summary_scope(r'test(_\d)?/fully_connected')
def test_custom_initializers(self):
output = tf.learn.fully_connected(self.input,
2,
activation_fn=tf.nn.relu,
weight_init=tf.constant_initializer(2.0),
bias_init=tf.constant_initializer(1.0))
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value = sess.run(output)
self.assertAllClose(np.array([[13.0, 13.0], [0.0, 0.0]]), out_value)
def test_custom_collections(self):
tf.learn.fully_connected(self.input,
2,
activation_fn=tf.nn.relu,
weight_collections=['unbiased'],
bias_collections=['biased'])
self.assertEquals(1, len(tf.get_collection('unbiased')))
self.assertEquals(1, len(tf.get_collection('biased')))
def test_all_custom_collections(self):
tf.learn.fully_connected(self.input,
2,
activation_fn=tf.nn.relu,
weight_collections=['unbiased', 'all'],
bias_collections=['biased', 'all'])
self.assertEquals(1, len(tf.get_collection('unbiased')))
self.assertEquals(1, len(tf.get_collection('biased')))
self.assertEquals(2, len(tf.get_collection('all')))
self.assertEquals(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),
tf.get_collection('all'))
def test_no_summaries(self):
tf.learn.fully_connected(self.input,
2,
activation_fn=tf.nn.relu,
create_summaries=False)
self.assertEquals([], tf.get_collection(tf.GraphKeys.SUMMARIES))
# Verify fix of a bug where no_summaries + activation_fn=None led to a
# NoneType exception.
def test_no_summaries_no_activation(self):
tf.learn.fully_connected(self.input,
2,
activation_fn=None,
create_summaries=False)
self.assertEquals([], tf.get_collection(tf.GraphKeys.SUMMARIES))
def test_regularizer(self):
cnt = [0]
tensor = tf.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
tf.learn.fully_connected(self.input, 2, weight_regularizer=test_fn)
self.assertEqual([tensor],
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_shape_enforcement(self):
place = tf.placeholder(tf.float32)
with self.assertRaises(ValueError):
tf.learn.fully_connected(place, 8)
tf.learn.fully_connected(place, 8, num_input_nodes=5) # No error
place.set_shape([None, None])
with self.assertRaises(ValueError):
tf.learn.fully_connected(place, 8)
tf.learn.fully_connected(place, 8, num_input_nodes=5) # No error
place.set_shape([None, 6])
tf.learn.fully_connected(place, 8) # No error
with self.assertRaises(ValueError):
tf.learn.fully_connected(place, 8, num_input_nodes=5)
place = tf.placeholder(tf.float32)
place.set_shape([2, 6, 5])
with self.assertRaises(ValueError):
tf.learn.fully_connected(place, 8)
def test_no_bias(self):
tf.learn.fully_connected(self.input, 2, bias_init=None)
self.assertEqual(1,
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
class Convolution2dTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
tf.set_random_seed(1234)
self.input = tf.constant(np.arange(2 * 3 * 3 * 4).reshape(
[2, 3, 3, 4]).astype(np.float32))
assert not tf.get_collection(tf.GraphKeys.SUMMARIES)
def test_basic_use(self):
output = tf.learn.convolution2d(self.input, 8, (3, 3),
activation_fn=tf.nn.relu)
with tf.Session() as sess:
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(output)
tf.initialize_all_variables().run()
out_value = sess.run(output)
self.assertEqual(output.get_shape().as_list(), [2, 3, 3, 8])
self.assertTrue(np.all(out_value >= 0),
'Relu should have capped all values.')
self.assertGreater(tf.get_collection(tf.GraphKeys.SUMMARIES), 0,
'Some summaries should have been added.')
self.assertEqual(2,
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(0,
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
assert_summary_scope('convolution2d')
def test_variable_reuse_with_scope(self):
with tf.variable_scope('test') as vs:
output1 = tf.learn.convolution2d(self.input,
8, (3, 3),
activation_fn=tf.nn.relu)
output2 = tf.learn.convolution2d(self.input,
8, (3, 3),
activation_fn=tf.nn.relu)
with tf.variable_scope(vs, reuse=True):
output3 = tf.learn.convolution2d(self.input,
8, (3, 3),
activation_fn=tf.nn.relu)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2, out_value3 = sess.run([output1, output2, output3])
self.assertFalse(np.allclose(out_value1, out_value2))
self.assertAllClose(out_value1, out_value3)
def test_variable_reuse_with_template(self):
tmpl1 = tf.make_template('test',
tf.learn.convolution2d,
kernel_size=(3, 3),
num_output_channels=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
assert_summary_scope(r'test(_\d)?/convolution2d')
def test_custom_initializers(self):
output = tf.learn.convolution2d(self.input,
2,
(3, 3),
activation_fn=tf.nn.relu,
weight_init=tf.constant_initializer(2.0),
bias_init=tf.constant_initializer(1.0),
padding='VALID')
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value = sess.run(output)
self.assertAllClose(
np.array([[[[1261., 1261.]]], [[[3853., 3853.]]]]), out_value)
def test_custom_collections(self):
tf.learn.convolution2d(self.input,
2, (3, 3),
activation_fn=tf.nn.relu,
weight_collections=['unbiased'],
bias_collections=['biased'])
self.assertEquals(1, len(tf.get_collection('unbiased')))
self.assertEquals(1, len(tf.get_collection('biased')))
def test_all_custom_collections(self):
tf.learn.convolution2d(self.input,
2, (3, 3),
activation_fn=tf.nn.relu,
weight_collections=['unbiased', 'all'],
bias_collections=['biased', 'all'])
self.assertEquals(1, len(tf.get_collection('unbiased')))
self.assertEquals(1, len(tf.get_collection('biased')))
self.assertEquals(2, len(tf.get_collection('all')))
self.assertEquals(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),
tf.get_collection('all'))
def test_no_summaries(self):
tf.learn.convolution2d(self.input,
2, (3, 3),
activation_fn=tf.nn.relu,
create_summaries=False)
self.assertEquals([], tf.get_collection(tf.GraphKeys.SUMMARIES))
def test_regularizer(self):
cnt = [0]
tensor = tf.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
tf.learn.convolution2d(self.input, 2, (3, 3), weight_regularizer=test_fn)
self.assertEqual([tensor],
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_shape_enforcement(self):
place = tf.placeholder(tf.float32)
with self.assertRaises(ValueError):
tf.learn.convolution2d(place, 8, (3, 3))
tf.learn.convolution2d(place, 8, (3, 3), num_input_channels=5) # No error
place.set_shape([None, None, None, None])
with self.assertRaises(ValueError):
tf.learn.convolution2d(place, 8, (3, 3))
tf.learn.convolution2d(place, 8, (3, 3), num_input_channels=5) # No error
place.set_shape([None, None, None, 6])
tf.learn.convolution2d(place, 8, (3, 3)) # No error
with self.assertRaises(ValueError):
tf.learn.convolution2d(place, 8, (3, 3), num_input_channels=5)
place = tf.placeholder(tf.float32)
place.set_shape([2, 6, 5])
with self.assertRaises(ValueError):
tf.learn.convolution2d(place, 8, (3, 3))
def test_no_bias(self):
tf.learn.convolution2d(self.input, 2, (3, 3), bias_init=None)
self.assertEqual(1,
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
class RegularizerTest(tf.test.TestCase):
def test_l1(self):
with self.assertRaises(ValueError):
tf.learn.l1_regularizer(2.)
with self.assertRaises(ValueError):
tf.learn.l1_regularizer(-1.)
with self.assertRaises(ValueError):
tf.learn.l1_regularizer(0)
self.assertIsNone(tf.learn.l1_regularizer(0.)(None))
values = np.array([1., -1., 4., 2.])
weights = tf.constant(values)
with tf.Session() as sess:
result = sess.run(tf.learn.l1_regularizer(.5)(weights))
self.assertAllClose(np.abs(values).sum() * .5, result)
def test_l2(self):
with self.assertRaises(ValueError):
tf.learn.l2_regularizer(2.)
with self.assertRaises(ValueError):
tf.learn.l2_regularizer(-1.)
with self.assertRaises(ValueError):
tf.learn.l2_regularizer(0)
self.assertIsNone(tf.learn.l2_regularizer(0.)(None))
values = np.array([1., -1., 4., 2.])
weights = tf.constant(values)
with tf.Session() as sess:
result = sess.run(tf.learn.l2_regularizer(.42)(weights))
self.assertAllClose(np.power(values, 2).sum() / 2.0 * .42, result)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
srivassumit/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_collection.py | 187 | 23182 | import pytest, py
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED
class TestCollector:
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if py.std.sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1,fn2,fn3:
assert fn != 3
assert fn != modcol
assert fn != [1,2,3]
assert [1,2,3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol("""
class TestClass:
def test_foo():
pass
""")
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(
testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
""")
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
class TestCollectFS:
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("_darcs", 'test_notfound.py')
tmpdir.ensure("CVS", 'test_notfound.py')
tmpdir.ensure("{arch}", 'test_notfound.py')
tmpdir.ensure(".whatever", 'test_notfound.py')
tmpdir.ensure(".bzr", 'test_notfound.py')
tmpdir.ensure("normal", 'test_found.py')
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
def test_custom_norecursedirs(self, testdir):
testdir.makeini("""
[pytest]
norecursedirs = mydir xyz*
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini("""
[pytest]
testpaths = gui uts
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems('-v')
assert [x.name for x in items] == ['test_gui', 'test_uts']
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ('env', 'gui', 'uts'):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ['test_%s' % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ('env', 'gui', 'uts'):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ['test_%s' % dirname]
class TestCollectPluginHookRelay:
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_file(self, path, parent):
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == '.abc'
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback:
def test_collection_error(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
result = testdir.runpytest(p)
assert "__import__" not in result.stdout.str(), "too long traceback"
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*mport*not_exists*"
])
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*hello world*",
])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_make_collect_report(__multicall__):
rep = __multicall__.execute()
rep.headerlines += ["header1"]
return rep
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*header1*",
])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
""")
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return True
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines("*1 passed*")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines("*collected 0 items*")
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest("""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
""")
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest("""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
""")
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule*",
"*test_x*"
])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest("""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
""")
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest("""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
""")
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule1*",
"*MyModule2*",
"*test_x*"
])
class TestSession:
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
#rootid = rcol.nodeid
#root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
#assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
py.std.pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
("pytest_collectreport", "report.nodeid == ''")
])
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
normid = p.basename + "::TestClass::()::test_method"
for id in [p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest("""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
""" % p.basename)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
py.std.pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains([
("pytest_collectstart",
"collector.fspath == collector.session.fspath"),
("pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'"),
("pytest_collectstart",
"collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
#("pytest_collectreport",
# "report.fspath == %r" % str(rcol.fspath)),
])
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport",
"report.nodeid.startswith('aaa/test_aaa.py')"),
])
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
])
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
arg = p.basename + ("::TestClass::test_method")
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
class Test_getinitialnodes:
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.name == testdir.tmpdir.basename
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems:
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile("""
def test_1():
pass
def test_2():
pass
""")
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile('''
def testone():
pass
class TestX:
def testmethod_one(self):
pass
class TestY(TestX):
pass
''')
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == 'testone'
assert items[1].name == 'testmethod_one'
assert items[2].name == 'testmethod_one'
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini("""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
""")
p = testdir.makepyfile('''
class MyTestSuite:
def x_test(self):
pass
class TestCase:
def test_y(self):
pass
''')
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ['MyTestSuite.x_test', 'TestCase.test_y']
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest("""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2:
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
""")
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*",
])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines([
"*1 passed*",
])
class TestNodekeywords:
def test_no_under(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
l = list(modcol.keywords)
assert modcol.name in l
for x in l:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile("""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
""")
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
| mpl-2.0 |
kerwinxu/barcodeManager | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/sgiar.py | 1 | 2644 | """SCons.Tool.sgiar
Tool-specific initialization for SGI ar (library archive). If CC
exists, static libraries should be built with it, so the prelinker has
a chance to resolve C++ template instantiations.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgiar.py 5023 2010/06/14 22:05:46 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-ar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-2-clause |
ionutbalutoiu/ironic | ironic/tests/unit/drivers/modules/test_virtualbox.py | 3 | 18788 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for VirtualBox Driver Modules."""
import mock
from oslo_config import cfg
from pyremotevbox import exception as pyremotevbox_exc
from pyremotevbox import vbox as pyremotevbox_vbox
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules import virtualbox
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = {
'virtualbox_vmname': 'baremetal1',
'virtualbox_host': '10.0.2.2',
'virtualbox_username': 'username',
'virtualbox_password': 'password',
'virtualbox_port': 12345,
}
CONF = cfg.CONF
class VirtualBoxMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(VirtualBoxMethodsTestCase, self).setUp()
driver_info = INFO_DICT.copy()
mgr_utils.mock_the_extension_manager(driver="fake_vbox")
self.node = obj_utils.create_test_node(self.context,
driver='fake_vbox',
driver_info=driver_info)
def test__parse_driver_info(self):
info = virtualbox._parse_driver_info(self.node)
self.assertEqual('baremetal1', info['vmname'])
self.assertEqual('10.0.2.2', info['host'])
self.assertEqual('username', info['username'])
self.assertEqual('password', info['password'])
self.assertEqual(12345, info['port'])
def test__parse_driver_info_missing_vmname(self):
del self.node.driver_info['virtualbox_vmname']
self.assertRaises(exception.MissingParameterValue,
virtualbox._parse_driver_info, self.node)
def test__parse_driver_info_missing_host(self):
del self.node.driver_info['virtualbox_host']
self.assertRaises(exception.MissingParameterValue,
virtualbox._parse_driver_info, self.node)
def test__parse_driver_info_invalid_port(self):
self.node.driver_info['virtualbox_port'] = 'invalid-port'
self.assertRaises(exception.InvalidParameterValue,
virtualbox._parse_driver_info, self.node)
def test__parse_driver_info_missing_port(self):
del self.node.driver_info['virtualbox_port']
info = virtualbox._parse_driver_info(self.node)
self.assertEqual(18083, info['port'])
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost', autospec=True)
def test__run_virtualbox_method(self, host_mock):
host_object_mock = mock.MagicMock(spec_set=['find_vm'])
func_mock = mock.MagicMock(spec_set=[])
vm_object_mock = mock.MagicMock(spec_set=['foo'], foo=func_mock)
host_mock.return_value = host_object_mock
host_object_mock.find_vm.return_value = vm_object_mock
func_mock.return_value = 'return-value'
return_value = virtualbox._run_virtualbox_method(
self.node, 'some-ironic-method', 'foo', 'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
func_mock.assert_called_once_with('args', kwarg='kwarg')
self.assertEqual('return-value', return_value)
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost', autospec=True)
def test__run_virtualbox_method_get_host_fails(self, host_mock):
host_mock.side_effect = pyremotevbox_exc.PyRemoteVBoxException
self.assertRaises(exception.VirtualBoxOperationFailed,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost', autospec=True)
def test__run_virtualbox_method_find_vm_fails(self, host_mock):
host_object_mock = mock.MagicMock(spec_set=['find_vm'])
host_mock.return_value = host_object_mock
exc = pyremotevbox_exc.PyRemoteVBoxException
host_object_mock.find_vm.side_effect = exc
self.assertRaises(exception.VirtualBoxOperationFailed,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo', 'args',
kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost', autospec=True)
def test__run_virtualbox_method_func_fails(self, host_mock):
host_object_mock = mock.MagicMock(spec_set=['find_vm'])
host_mock.return_value = host_object_mock
func_mock = mock.MagicMock()
vm_object_mock = mock.MagicMock(spec_set=['foo'], foo=func_mock)
host_object_mock.find_vm.return_value = vm_object_mock
func_mock.side_effect = pyremotevbox_exc.PyRemoteVBoxException
self.assertRaises(exception.VirtualBoxOperationFailed,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
func_mock.assert_called_once_with('args', kwarg='kwarg')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost', autospec=True)
def test__run_virtualbox_method_invalid_method(self, host_mock):
host_object_mock = mock.MagicMock(spec_set=['find_vm'])
host_mock.return_value = host_object_mock
vm_object_mock = mock.MagicMock(spec_set=[])
host_object_mock.find_vm.return_value = vm_object_mock
del vm_object_mock.foo
self.assertRaises(exception.InvalidParameterValue,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost', autospec=True)
def test__run_virtualbox_method_vm_wrong_power_state(self, host_mock):
host_object_mock = mock.MagicMock(spec_set=['find_vm'])
host_mock.return_value = host_object_mock
func_mock = mock.MagicMock(spec_set=[])
vm_object_mock = mock.MagicMock(spec_set=['foo'], foo=func_mock)
host_object_mock.find_vm.return_value = vm_object_mock
func_mock.side_effect = pyremotevbox_exc.VmInWrongPowerState
# _run_virtualbox_method() doesn't catch VmInWrongPowerState and
# lets caller handle it.
self.assertRaises(pyremotevbox_exc.VmInWrongPowerState,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
func_mock.assert_called_once_with('args', kwarg='kwarg')
class VirtualBoxPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(VirtualBoxPowerTestCase, self).setUp()
driver_info = INFO_DICT.copy()
mgr_utils.mock_the_extension_manager(driver="fake_vbox")
self.node = obj_utils.create_test_node(self.context,
driver='fake_vbox',
driver_info=driver_info)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = task.driver.power.get_properties()
self.assertIn('virtualbox_vmname', properties)
self.assertIn('virtualbox_host', properties)
@mock.patch.object(virtualbox, '_parse_driver_info', autospec=True)
def test_validate(self, parse_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.validate(task)
parse_info_mock.assert_called_once_with(task.node)
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_get_power_state(self, run_method_mock):
run_method_mock.return_value = 'PoweredOff'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
power_state = task.driver.power.get_power_state(task)
run_method_mock.assert_called_once_with(task.node,
'get_power_state',
'get_power_status')
self.assertEqual(states.POWER_OFF, power_state)
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_get_power_state_invalid_state(self, run_method_mock):
run_method_mock.return_value = 'invalid-state'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
power_state = task.driver.power.get_power_state(task)
run_method_mock.assert_called_once_with(task.node,
'get_power_state',
'get_power_status')
self.assertEqual(states.ERROR, power_state)
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_set_power_state_off(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_OFF)
run_method_mock.assert_called_once_with(task.node,
'set_power_state',
'stop')
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_set_power_state_on(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
run_method_mock.assert_called_once_with(task.node,
'set_power_state',
'start')
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_set_power_state_reboot(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.REBOOT)
run_method_mock.assert_any_call(task.node,
'reboot',
'stop')
run_method_mock.assert_any_call(task.node,
'reboot',
'start')
def test_set_power_state_invalid_state(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.set_power_state,
task, 'invalid-state')
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_reboot(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
run_method_mock.assert_any_call(task.node,
'reboot',
'stop')
run_method_mock.assert_any_call(task.node,
'reboot',
'start')
class VirtualBoxManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(VirtualBoxManagementTestCase, self).setUp()
driver_info = INFO_DICT.copy()
mgr_utils.mock_the_extension_manager(driver="fake_vbox")
self.node = obj_utils.create_test_node(self.context,
driver='fake_vbox',
driver_info=driver_info)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = task.driver.management.get_properties()
self.assertIn('virtualbox_vmname', properties)
self.assertIn('virtualbox_host', properties)
@mock.patch.object(virtualbox, '_parse_driver_info', autospec=True)
def test_validate(self, parse_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.validate(task)
parse_info_mock.assert_called_once_with(task.node)
def test_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
devices = task.driver.management.get_supported_boot_devices(task)
self.assertIn(boot_devices.PXE, devices)
self.assertIn(boot_devices.DISK, devices)
self.assertIn(boot_devices.CDROM, devices)
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_get_boot_device_ok(self, run_method_mock):
run_method_mock.return_value = 'Network'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret_val = task.driver.management.get_boot_device(task)
run_method_mock.assert_called_once_with(task.node,
'get_boot_device',
'get_boot_device')
self.assertEqual(boot_devices.PXE, ret_val['boot_device'])
self.assertTrue(ret_val['persistent'])
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_get_boot_device_invalid(self, run_method_mock):
run_method_mock.return_value = 'invalid-boot-device'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret_val = task.driver.management.get_boot_device(task)
self.assertIsNone(ret_val['boot_device'])
self.assertIsNone(ret_val['persistent'])
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_set_boot_device_ok(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE)
run_method_mock.assert_called_once_with(task.node,
'set_boot_device',
'set_boot_device',
'Network')
@mock.patch.object(virtualbox, 'LOG', autospec=True)
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_set_boot_device_wrong_power_state(self, run_method_mock,
log_mock):
run_method_mock.side_effect = pyremotevbox_exc.VmInWrongPowerState
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE)
log_mock.error.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(virtualbox, '_run_virtualbox_method', autospec=True)
def test_set_boot_device_invalid(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, 'invalid-boot-device')
def test_get_sensors_data(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(NotImplementedError,
task.driver.management.get_sensors_data,
task)
| apache-2.0 |
AnirudhBhat/HackerNewsAPI | hn/hn.py | 1 | 2693 | import json
import requests
from post import Post
from user import User
TOP_POSTS_URL = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
TOP_POSTS_ITEM = "https://hacker-news.firebaseio.com/v0/item/%s.json?print=pretty"
NEW_POSTS_URL = "https://hacker-news.firebaseio.com/v0/newstories.json?print=pretty"
ASK_POSTS_URL = "https://hacker-news.firebaseio.com/v0/askstories.json?print=pretty"
SHOW_POSTS_URL = "https://hacker-news.firebaseio.com/v0/showstories.json?print=pretty"
JOB_POSTS_URL = "https://hacker-news.firebaseio.com/v0/jobstories.json?print=pretty"
post_types = ['top_posts', 'new_posts', 'askhn_posts', 'showhn_posts', 'job_posts']
def parse_json(limit, json_data):
posts = []
for i in range(limit):
p = Post()
url = TOP_POSTS_ITEM %(json_data[i])
item_json_data = json.loads(make_request(url))
try:
p.submitter = item_json_data['by']
p.points = item_json_data['score']
p.title = item_json_data['title']
p.url = item_json_data['url']
p.story_type = item_json_data['type']
p.num_comments = item_json_data['descendants']
except:
pass
posts.append(p)
return posts
def get_json(url):
data = make_request(url)
json_data = json.loads(data)
return json_data
def parse_json_for_id(json_data):
p = Post()
p.submitter = json_data['by']
p.points = json_data['score']
p.title = json_data['title']
p.url = json_data['url']
p.story_type = json_data['type']
p.num_comments = json_data['descendants']
return p
def user(name):
u = User()
USER_URL = "https://hacker-news.firebaseio.com/v0/user/%s.json?print=pretty"%name
user_json_data = json.loads(make_request(USER_URL))
try:
u.name = user_json_data['id']
u.karma = user_json_data['karma']
u.about = user_json_data['about']
except:
pass
return u
def id(id):
url = TOP_POSTS_ITEM %id
json_data = get_json(url)
return parse_json_for_id(json_data)
def top_posts(limit):
json_data = get_json(TOP_POSTS_URL)
return parse_json(limit, json_data)
def new_posts(limit):
json_data = get_json(NEW_POSTS_URL)
return parse_json(limit, json_data)
def askhn_posts(limit):
json_data = get_json(ASK_POSTS_URL)
return parse_json(limit, json_data)
def showhn_posts(limit):
json_data = get_json(SHOW_POSTS_URL)
return parse_json(limit, json_data)
def job_posts(limit):
json_data = get_json(JOB_POSTS_URL)
return parse_json(limit, json_data)
def make_request(url):
r = requests.get(url).text
return r
class InvalidPostTypeException(Exception):
pass
def get_post(post_type = 'top_posts', limit = 10):
if post_type not in post_types:
raise InvalidPostTypeException('invalid post type!')
else:
return eval(post_type + '(' + str(limit) + ')')
| mit |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.3/django/conf/__init__.py | 146 | 6707 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import os
import re
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.utils.functional import LazyObject
from django.utils import importlib
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
# NOTE: This is arguably an EnvironmentError, but that causes
# problems with Python's interactive help.
raise ImportError("Settings cannot be imported, because environment variable %s is undefined." % ENVIRONMENT_VARIABLE)
self._wrapped = Settings(settings_module)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped != None:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return bool(self._wrapped)
configured = property(configured)
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
warnings.warn('If set, %s must end with a slash' % name,
PendingDeprecationWarning)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError, e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in self.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = importlib.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
app_subdirs.sort()
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in app_subdirs:
if name_pattern.match(d) and os.path.isdir(os.path.join(appdir, d)):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
self.INSTALLED_APPS = new_installed_apps
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if self.LOGGING_CONFIG:
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.default_settings = default_settings
def __getattr__(self, name):
return getattr(self.default_settings, name)
def __dir__(self):
return self.__dict__.keys() + dir(self.default_settings)
# For Python < 2.6:
__members__ = property(lambda self: self.__dir__())
settings = LazySettings()
| mit |
nicolargo/intellij-community | python/helpers/pydev/pydev_ipython/inputhookgtk.py | 55 | 1086 | # encoding: utf-8
"""
Enable pygtk to be used interacive by setting PyOS_InputHook.
Authors: Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import gtk, gobject
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
gtk.main_quit()
return False
def create_inputhook_gtk(stdin_file):
def inputhook_gtk():
gobject.io_add_watch(stdin_file, gobject.IO_IN, _main_quit)
gtk.main()
return 0
return inputhook_gtk
| apache-2.0 |
minlexx/pyevemon | esi_client/models/get_characters_character_id_mail_lists_200_ok.py | 1 | 4109 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCharactersCharacterIdMailLists200Ok(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, mailing_list_id=None, name=None):
"""
GetCharactersCharacterIdMailLists200Ok - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'mailing_list_id': 'int',
'name': 'str'
}
self.attribute_map = {
'mailing_list_id': 'mailing_list_id',
'name': 'name'
}
self._mailing_list_id = mailing_list_id
self._name = name
@property
def mailing_list_id(self):
"""
Gets the mailing_list_id of this GetCharactersCharacterIdMailLists200Ok.
Mailing list ID
:return: The mailing_list_id of this GetCharactersCharacterIdMailLists200Ok.
:rtype: int
"""
return self._mailing_list_id
@mailing_list_id.setter
def mailing_list_id(self, mailing_list_id):
"""
Sets the mailing_list_id of this GetCharactersCharacterIdMailLists200Ok.
Mailing list ID
:param mailing_list_id: The mailing_list_id of this GetCharactersCharacterIdMailLists200Ok.
:type: int
"""
if mailing_list_id is None:
raise ValueError("Invalid value for `mailing_list_id`, must not be `None`")
self._mailing_list_id = mailing_list_id
@property
def name(self):
"""
Gets the name of this GetCharactersCharacterIdMailLists200Ok.
name string
:return: The name of this GetCharactersCharacterIdMailLists200Ok.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this GetCharactersCharacterIdMailLists200Ok.
name string
:param name: The name of this GetCharactersCharacterIdMailLists200Ok.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCharactersCharacterIdMailLists200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 |
steveblamey/nautilus-archive | extension/nautilus-archive.py | 1 | 9193 | # coding: utf-8
#
# Copyright 2013 Steve Blamey
#
# This file is part of Nautilus-archive.
#
# Nautilus-archive is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nautilus-archive is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nautilus-archive. If not, see <http://www.gnu.org/licenses/>.
#
import gi
gi.require_version('Nautilus', '3.0')
from gi.repository import Nautilus, GObject, Gtk, Gdk, Gio
from trackertag import TrackerTag
import os
import datetime
class ColumnExtension(GObject.GObject, Nautilus.MenuProvider, Nautilus.InfoProvider, Nautilus.LocationWidgetProvider):
def __init__(self):
print("Initializing nautilus-archive")
self.tracker = TrackerTag()
self.archive_folder = Gio.file_new_for_path(os.path.join(os.getenv("HOME"), "Archive"))
self.tag_settings = {
'archive': ('emblem-package', self.archive_folder.get_uri()),
'test': ('emblem-generic', self.archive_uri_join('Work')),
#'project': ('emblem-generic', self.archive_uri_join('Project')),
}
#create Archive folder
if not self.archive_folder.query_exists(None):
self.archive_folder.make_directory(None)
# set folder icon
self.archive_folder.set_attribute_string("metadata::custom-icon-name", 'folder-documents', Gio.FileQueryInfoFlags.NONE, None)
#create sub-folders and tracker tags
for tag, settings in self.tag_settings.items():
folder = Gio.file_new_for_uri(settings[1])
if not folder.query_exists(None) and tag != 'archive':
folder.make_directory(None)
if not self.tracker.tag_exists(tag):
self.tracker.new_tag(tag)
def archive_uri_join(self, *args):
segments = (self.archive_folder.get_uri(),) + (args)
return "/".join(map(str, segments))
def get_widget(self, uri, window):
#Only show archive bar in the Archive directory
if uri == self.archive_folder.get_uri():
tags = tuple(tag for tag in self.tag_settings)
tagged_uris = self.tracker.tagged_files(tags)
if tagged_uris:
button_msg = "Archive %s Files" % len(tagged_uris)
else:
button_msg = "Nothing to Archive"
archive_bar = Gtk.Box(spacing=6)
archive_bar.set_name('archive-bar')
main_glabel = Gtk.Label()
main_glabel.set_markup('<b>Archive</b>')
main_glabel.set_name('archive-label')
archive_gbutton = Gtk.Button(button_msg)
archive_gbutton.set_name('archive-button')
archive_gbutton.set_tooltip_text("Move all tagged files to the Archive folder")
#Deactivate archive button if no files are tagged
if not tagged_uris:
archive_gbutton.set_sensitive(False)
archive_gbutton.connect("clicked", self.on_archive_gbutton_clicked, window)
archive_gbutton.set_relief(Gtk.ReliefStyle.HALF)
archive_button = Gtk.ButtonBox()
archive_button.set_border_width(6)
archive_button.add(archive_gbutton)
archive_bar.pack_end(archive_button, False, False, 0)
background_box = Gtk.InfoBar()
background_box.add(archive_bar)
background_box_content = background_box.get_content_area()
background_box_content.add(main_glabel)
background_box.show_all()
return background_box
else:
return
def on_archive_gbutton_clicked(self, button, window):
tags = [tag for tag in self.tag_settings]
for tag in tags:
file_uris = self.tracker.tagged_files(tag)
if file_uris:
errors = 0
for f in file_uris:
try:
source_file = Gio.file_new_for_uri(f)
target_file = Gio.file_new_for_uri("/".join((self.tag_settings[tag][1], source_file.get_basename())))
tag_removed = self.tracker.remove_tag(f, tag)
if tag_removed:
source_file.move(target_file, Gio.FileCopyFlags.NONE, None, None, None)
button.set_sensitive(False)
button.set_label("Nothing to Archive")
else:
raise Exception("Unable to remove tag")
except:
self.tracker.add_tag(f, tag)
errors +=1
raise
if errors:
dialog = Gtk.MessageDialog(window, 0, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, "No Files Archived")
dialog.format_secondary_text("There was an error when trying to move $s files to the archive" % errors)
dialog.run()
dialog.destroy()
def update_file_info_full(self, provider, handle, closure, file):
# Some dropbox folder cause the mess below with cpu @100% :
#File "/usr/share/nautilus-python/extensions/nautilus-archive.py", line 162, in update_emblem
# if self.tracker.has_tag(file.get_uri(), tag):
# File "/usr/local/lib/python2.7/dist-packages/trackertag.py", line 61, in has_tag
# cursor = self.connection.query (squery, None)
# File "/usr/lib/python2.7/dist-packages/gi/types.py", line 113, in function
# return info.invoke(*args, **kwargs)
#gi._glib.GError: 1.86: syntax error, expected `}'
#Traceback (most recent call last):
# File "/usr/lib/python2.7/dist-packages/gi/overrides/GLib.py", line 629, in <lambda>
# return (lambda data: callback(*data), user_data)
if file.get_uri_scheme() != 'file' or "Dropbox" in file.get_uri():
return Nautilus.OperationResult.COMPLETE
GObject.idle_add(self.update_emblem, provider, handle, closure, file)
#file.invalidate_extension_info()
return Nautilus.OperationResult.IN_PROGRESS
def update_emblem(self, provider, handle, closure, file):
for tag in self.tag_settings:
if self.tracker.has_tag(file.get_uri(), tag):
file.add_emblem(self.tag_settings[tag][0])
file.invalidate_extension_info()
Nautilus.info_provider_update_complete_invoke(closure, provider, handle, Nautilus.OperationResult.COMPLETE)
return False
#return Nautilus.OperationResult.COMPLETE
def tag_file_cb(self, menu, file, tag):
for f in file:
for f_tag in self.tag_settings:
if self.tracker.has_tag(f.get_uri(), f_tag):
self.tracker.remove_tag(f.get_uri(), f_tag)
tag_added = self.tracker.add_tag(f.get_uri(), tag)
if tag_added:
f.add_emblem(self.tag_settings[tag][0])
#f.invalidate_extension_info()
def tag_file_remove_cb(self, menu, file):
for f in file:
for tag in self.tag_settings:
if self.tracker.has_tag(f.get_uri(), tag):
self.tracker.remove_tag(f.get_uri(), tag)
f.invalidate_extension_info()
def get_file_items(self, window, files):
# Show the menu if there is at least one file selected
if len(files) == 0:
return
for fd in files:
# Only for local files
if fd.get_uri_scheme() != 'file':
return
# Not in the Archive
if self.archive_folder.get_uri() in fd.get_uri():
return
items = []
tags = [tag for tag in self.tag_settings]
tags.sort()
top_menuitem = Nautilus.MenuItem(
name="ArchiveTagExtension::Menu",
label="Archive Tags"
)
submenu = Nautilus.Menu()
top_menuitem.set_submenu(submenu)
for tag in tags:
set_tag = Nautilus.MenuItem(
name="ArchiveTagExtension::Add_%s_Tag" % tag,
label="Tag for %s" % tag.capitalize(),
)
set_tag.connect('activate', self.tag_file_cb, files, tag)
submenu.append_item(set_tag)
remove_tag = Nautilus.MenuItem(
name="ArchiveTagExtension::Remove_Archive_Tag",
label="Remove Archive Tags",
)
remove_tag.connect('activate', self.tag_file_remove_cb, files)
submenu.append_item(remove_tag)
return top_menuitem,
| gpl-3.0 |
pilou-/ansible | lib/ansible/parsing/quoting.py | 241 | 1141 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def is_quoted(data):
return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
| gpl-3.0 |
abdellatifkarroum/odoo | addons/sale_crm/__openerp__.py | 260 | 2036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yelizariev/hr | __unported__/hr_report_turnover/__openerp__.py | 19 | 1459 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Employee Turn-over Report',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'description': """
Employee Turn-over by Department
================================
""",
'author': "Michael Telahun Makonnen <mmakonnen@gmail.com>,Odoo Community Association (OCA)",
'website': 'http://miketelahun.wordpress.com',
'license': 'AGPL-3',
'depends': [
'hr',
'hr_attendance',
'hr_employee_state',
'report_aeroo',
],
'data': [
'reports.xml',
'wizard/employee_turnover_view.xml',
],
'test': [
],
'installable': False,
}
| agpl-3.0 |
protatremy/buildbot | master/buildbot/test/fake/change.py | 10 | 1229 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from buildbot.process.properties import Properties
from buildbot.test.fake.state import State
class Change(State):
project = ''
repository = ''
branch = ''
category = ''
codebase = ''
properties = {}
def __init__(self, **kw):
State.__init__(self, **kw)
# change.properties is a IProperties
props = Properties()
props.update(self.properties, "test")
self.properties = props
| gpl-2.0 |
daltonmaag/robofab | Scripts/RoboFabIntro/intro_Groups.py | 8 | 2671 | #FLM: RoboFab Intro, Font Groups
#
#
# demo of RoboFab font groups
#
#
# RoboFab font objects have several objects associated with them
# kerning, info, groups, etc. Let's talk about groups. Groups are,
# well groups of glyph names. These can be useful for lots of things
# like kerning and OpenType features. The number of uses for
# groups is only limited by your imagination. Enough of this
# silly pep talk. Let's check it out.
from robofab.world import CurrentFont
# gString has lots of glyph lists, these two will be useful for this demo
from robofab.gString import uppercase_plain, lowercase_plain
# (you will need t have a font open in FontLab for this demo)
font = CurrentFont()
# First off, let's gather up some glyph names.
# gString's uppercase_plain and lowercase_plain
# lists will do for now. Let's go through these lists
# and see if they contain any glyphs that are in this font.
uppercase = []
lowercase = []
for glyphName in uppercase_plain:
if font.has_key(glyphName):
uppercase.append(glyphName)
for glyphName in lowercase_plain:
if font.has_key(glyphName):
lowercase.append(glyphName)
uppercase.sort()
lowercase.sort()
# And, we'll combine the uppercase glyph names and
# lowercase glyph names that we found into one list
both = uppercase + lowercase
both.sort()
# Just for kicks, let's get the list of glyphs
# that you have selected in the font window as well
selected = font.selection
# Ok, now that we have these lists, what do we do with them?
# Well, we'll store them in font.groups. That object is
# essentially a dictionary that has a few special tricks.
# The dictionary is keyed by the name of the group, and
# the value is the list of the glyphs. Pretty simple.
# Now, let's store these lists away.
groups = font.groups
groups['uppercase'] = uppercase
groups['lowercase'] = lowercase
groups['uppercaseAndLowercase'] = both
groups['selected'] = both
font.update()
# In FontLab the group info is visible in the classes panel.
# if you look there now, you'll (hopefully) see these new
# groups. Wow! Exciting! Amazing! But, what if you want to
# get these lists back? Easy:
groups = font.groups
print 'uppercase:'
print groups['uppercase']
print
print 'lowercase:'
print groups['lowercase']
print
print 'upper and lowercase:'
print groups['uppercaseAndLowercase']
print
print 'selected:'
print groups['selected']
print
# You can even search the groups for the names of groups
# that contain a certain glyph name. It works like this:
groups = font.groups
found = groups.findGlyph('a')
print '"a" is in these groups: %s'%str(found)
# Oh yeah, don't forget to update the font.
font.update()
# Super easy, huh? Now get to it!
| bsd-3-clause |
dsweet04/rekall | rekall-core/rekall/plugins/tools/dynamic_profiles.py | 3 | 12443 | # Rekall Memory Forensics
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Cohen <scudette@google.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This module implements dynamic profiles.
A Dynamic profile is a way of discovering certain parameters via running a
matching signature.
"""
from rekall import obj
from rekall.plugins.tools import disassembler
class DisassembleMatcher(object):
"""A matching engine for disassembler rules.
This matcher searcher for a sequence of rules in a disassmbly and tries to
match a certain rule pattern to the assembly. Ultimately if the rules match,
the rules may extract certain parameters from the patter.
"""
def __init__(self, name="", mode="AMD64", rules=None, session=None,
max_separation=10):
self.mode = mode
self.name = name
self.rules = rules
self.session = session
self.max_separation = max_separation
self.dis = disassembler.Capstone(self.mode, session=self.session)
def _CheckCaptureVariables(self, vector, contexts):
"""Checks that capture variables are consistent in the vector.
The vector is a list of disassembly lines which match the rules, e.g.
[16, 60, 61]
The context is the capture variables from these rules. In order
to be valid, the capture variables must all be consistent. For
example the following is not consistent (since var1 is RAX in
the first rule and RCX in the second rule):
contexts[16]
{'var1': u'RAX'}
contexts[60]
{'var1': u'RCX', 'out': u'0x88'}
contexts[61]
{}
"""
result = {}
for rule_number, item in enumerate(vector):
rule_context = contexts[rule_number]
# The capture variables in this rule only.
rule_capture_vars_values = {}
for k, v in rule_context[item].iteritems():
var_name = k.rsplit("_", 1)[0]
# Only consider variables (start with $).
if not var_name.startswith("$"):
continue
# If this var is previously known, this match must be the same
# as previously found.
if var_name in result and v != result[var_name]:
return
# If this capture variable's value is the same as another
# capture variable's value in the same rule, exclude the
# match. This means that an expression like:
#
# MOV $var2, [$var1+$out]
#
# Necessarily implies that $var1 and $var2 must be different
# registers.
if (v in rule_capture_vars_values and
rule_capture_vars_values[v] != var_name):
return
result[var_name] = v
rule_capture_vars_values[v] = var_name
return result
def _FindRuleIndex(self, instruction):
"""Generate all rules that match the current instruction."""
for i, rule in enumerate(self.rules):
context = dict(
instruction=instruction.text, offset=instruction.address)
if instruction.match_rule(rule, context):
yield i, context
def GenerateVector(self, hits, vector, level):
"""Generate possible hit vectors which match the rules."""
for item in hits.get(level, []):
if vector:
if item < vector[-1]:
continue
if item > self.max_separation + vector[-1]:
break
new_vector = vector + [item]
if level + 1 == len(hits):
yield new_vector
elif level + 1 < len(hits):
for result in self.GenerateVector(hits, new_vector, level+1):
yield result
def _GetMatch(self, hits, contexts):
"""Find the first vector that matches all the criteria."""
for vector in self.GenerateVector(hits, [], 0):
context = self._CheckCaptureVariables(vector, contexts)
if not context:
continue
return (vector, context)
return [], {}
def MatchFunction(self, func, length=1000):
return self.Match(
func.obj_offset, func.obj_vm.read(func.obj_offset, length))
def Match(self, offset=0, data=""):
hits = {}
contexts = {}
for hit, instruction in enumerate(self.dis.disassemble(data, offset)):
for rule_idx, context in self._FindRuleIndex(instruction):
hits.setdefault(rule_idx, []).append(hit)
contexts.setdefault(rule_idx, {})[hit] = context
# All the hits must match
if len(hits) < len(self.rules):
self.session.logging.error("Failed to find match for %s", self.name)
# Add some debugging messages here to make diagnosing errors easier.
for i, rule in enumerate(self.rules):
if i not in hits:
self.session.logging.debug("Unable to match rule: %s", rule)
return obj.NoneObject()
vector, context = self._GetMatch(hits, contexts)
if len(vector) < len(self.rules):
self.session.logging.error(
"Failed to find match for %s - Only matched %s/%s rules.",
self.name, len(vector), len(self.rules))
return obj.NoneObject()
self.session.logging.debug("Found match for %s", self.name)
result = {}
for i, hit in enumerate(vector):
hit_data = contexts[i][hit]
result.update(hit_data)
self.session.logging.debug(
"%#x %s", hit_data["offset"], hit_data["instruction"])
return result
class DisassembleConstantMatcher(object):
"""Search for the value of global constants using disassembly."""
def __init__(self, session, profile, name, args):
self.session = session
self.profile = profile
self.args = args
self.name = name
# Start address to disassemble - can be an exported function name.
self.start_address = args["start"]
# Disassemble capture rules.
self.rules = args["rules"]
def __call__(self):
resolver = self.session.address_resolver
func = self.session.profile.Function(resolver.get_address_by_name(
self.start_address))
matcher = DisassembleMatcher(
mode=func.mode, rules=self.rules, name=self.name,
session=self.session)
result = matcher.MatchFunction(func)
if result and "$out" in result:
return result["$out"]
class FirstOf(object):
"""Try a list of callables until one works."""
def __init__(self, list_of_callables, **kwargs):
self.list_of_callables = list_of_callables
self.kwargs = kwargs
def __call__(self, *args):
for func in self.list_of_callables:
result = func(*args, **self.kwargs)
if result != None:
return result
class DynamicConstantProfileLoader(obj.ProfileSectionLoader):
"""Produce a callable for a constant."""
name = "$DYNAMIC_CONSTANTS"
def LoadIntoProfile(self, session, profile, constants):
"""Parse the constants detectors and make callables."""
for constant_name, rules in constants.items():
detectors = []
# Each constant can have several different detectors.
for rule in rules:
detector_name = rule["type"]
detector_arg = rule["args"]
# We only support one type of detector right now.
if detector_name != "DisassembleConstantMatcher":
session.logging.error(
"Unimplemented detector %s", detector_name)
continue
detectors.append(
DisassembleConstantMatcher(
session, profile, constant_name, detector_arg))
profile.add_constants({constant_name: FirstOf(detectors)},
constants_are_absolute=True)
return profile
class DisassembleStructMatcher(DisassembleConstantMatcher):
"""Match a struct based on rules."""
def __call__(self, struct, member=None):
resolver = struct.obj_session.address_resolver
func = struct.obj_profile.Function(resolver.get_address_by_name(
self.start_address))
matcher = DisassembleMatcher(
mode=func.mode, rules=self.rules, name=self.name,
max_separation=self.args.get("max_separation", 10),
session=struct.obj_session)
struct.obj_session.logging.info(
"DisassembleStructMatcher: %s %s", self.name,
self.args.get("comment", ""))
result = matcher.MatchFunction(func)
if result:
# Match succeeded - create a new overlay for the Struct.
overlay = {self.name: [None, {}]}
fields = overlay[self.name][1]
for field, field_args in self.args["fields"].iteritems():
fields[field] = [result["$" + field], field_args]
# This should never happen?
if member not in fields:
return
# We calculated the types, now we add them to the profile so the
# next time a struct is instantiated it will be properly
# initialized.
struct.obj_profile.add_types(overlay)
# Now take care of the current struct which has already been
# initialized.
struct.members.update(struct.obj_profile.Object(self.name).members)
# Return the member from the current struct.
return struct.m(member)
class DynamicStructProfileLoader(obj.ProfileSectionLoader):
"""Produce a callable for a constant."""
name = "$DYNAMIC_STRUCTS"
def LoadIntoProfile(self, session, profile, data):
"""Parse the constants detectors and make callables."""
overlay = {}
for struct_name, signatures in data.items():
detectors = {}
# Each field can have several different detectors.
for rule in signatures:
detector_name = rule["type"]
detector_arg = rule["args"]
# We only support one type of detector right now.
if detector_name != "DisassembleStructMatcher":
session.logging.error(
"Unimplemented detector %s", detector_name)
continue
detector = DisassembleStructMatcher(
None, None, struct_name, detector_arg)
# Add the detector to each field. The initial detector is a
# pass-through which returns the normal member if one is defined
# in the conventional way. If None is defined, we launch our
# dynamic detector - which will store the conventional member
# definitions as a cache.
def PassThrough(struct, member=None):
return struct.m(member)
for field in detector_arg["fields"]:
detectors.setdefault(field, [PassThrough]).append(detector)
# Install an overlay with the chain of detectors.
overlay[struct_name] = [None, {}]
for field in detectors:
overlay[struct_name][1][field] = FirstOf(
detectors[field], member=field)
profile.add_overlay(overlay)
return profile
| gpl-2.0 |
nephthys/shr_im | urls.py | 1 | 2581 | # -*- coding: utf-8 -*-
#
# shr.im * just an URL shortener
# Copyright (C) 2009 Camille Bouiller <aftercem@gmail.com>
# Copyright (C) 2009 Rémy Hubscher <natim@users.sf.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'shr.url.views.homepage'),
(r'^login/?$', 'shr.oauthlogin.views.connection'),
(r'^login/callback/?$', 'shr.oauthlogin.views.callback'),
(r'^logout/?$', 'shr.oauthlogin.views.disconnect'),
(r'^api/?$', 'shr.url.views.view_doc_api'),
(r'^tools/?$', 'shr.url.views.view_tools'),
(r'^contact/?$', 'shr.url.views.contact'),
(r'^api/1.0/(?P<function>[-\w]+)$', 'shr.url.views.justAPIv1'),
(r'^api/1.0/(?P<function>[-\w]+).(?P<format_output>[-\w]+)$', 'shr.url.views.justAPIv1'),
(r'^ajax/(?P<function>[-\w]+)$', 'shr.url.views.ajax_pages'),
(r'^timeline/?$', 'shr.url.views.urls_timeline'),
(r'^popular/?$', 'shr.url.views.urls_popular'),
(r'^s/(?P<alias>.*)/$', 'shr.url.views.view_fiche'),
(r'^c/(?P<alias>.*)/$', 'shr.url.views.view_clics'),
(r'^u/(?P<url>.*)/$', 'shr.url.views.view_user'),
(r'^d/(?P<url>.*)/$', 'shr.url.views.view_domain'),
(r'^del/(?P<alias>.*)/$', 'shr.url.views.delete'),
(r'^admin/(.*)', admin.site.root),
(r'^(http://.*)$', 'shr.url.views.prefixed_access'),
(r'^(?P<alias>.*)/$', 'shr.url.views.redir'),
)
if settings.DEBUG:
urlpatterns += patterns('',(r'^/site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^upload/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),)
| gpl-3.0 |
brain-tec/partner-contact | partner_second_lastname/tests/test_config.py | 2 | 1258 | # -*- coding: utf-8 -*-
# Copyright 2017 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
class TestConfig(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestConfig, cls).setUpClass()
cls.wizard = cls.env['res.config.settings'].create({})
cls.partner = cls.env['res.partner'].create({
'firstname': "First",
'lastname': "Last",
'lastname2': "Second",
})
def test_last_first(self):
self.wizard.partner_names_order = 'last_first'
self.wizard.set_values()
self.wizard.action_recalculate_partners_name()
self.assertEqual(self.partner.name, "Last Second First")
def test_last_first_comma(self):
self.wizard.partner_names_order = 'last_first_comma'
self.wizard.set_values()
self.wizard.action_recalculate_partners_name()
self.assertEqual(self.partner.name, "Last Second, First")
def test_first_last(self):
self.wizard.partner_names_order = 'first_last'
self.wizard.set_values()
self.wizard.action_recalculate_partners_name()
self.assertEqual(self.partner.name, "First Last Second")
| agpl-3.0 |
MSOpenTech/edx-platform | common/test/acceptance/pages/lms/create_mode.py | 148 | 2538 | """Mode creation page (used to add modes to courses during testing)."""
import re
import urllib
from bok_choy.page_object import PageObject
from . import BASE_URL
class ModeCreationPage(PageObject):
"""The mode creation page.
When allowed by the Django settings file, visiting this page allows modes to be
created for an existing course.
"""
def __init__(self, browser, course_id, mode_slug=None, mode_display_name=None, min_price=None, suggested_prices=None, currency=None):
"""The mode creation page is an endpoint for HTTP GET requests.
By default, it will create an 'honor' mode for the given course with display name
'Honor Code', a minimum price of 0, no suggested prices, and using USD as the currency.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The ID of the course for which modes are to be created.
Keyword Arguments:
mode_slug (str): The mode to add, either 'honor', 'verified', or 'professional'
mode_display_name (str): Describes the new course mode
min_price (int): The minimum price a user must pay to enroll in the new course mode
suggested_prices (str): Comma-separated prices to suggest to the user.
currency (str): The currency in which to list prices.
"""
super(ModeCreationPage, self).__init__(browser)
self._course_id = course_id
self._parameters = {}
if mode_slug is not None:
self._parameters['mode_slug'] = mode_slug
if mode_display_name is not None:
self._parameters['mode_display_name'] = mode_display_name
if min_price is not None:
self._parameters['min_price'] = min_price
if suggested_prices is not None:
self._parameters['suggested_prices'] = suggested_prices
if currency is not None:
self._parameters['currency'] = currency
@property
def url(self):
"""Construct the mode creation URL."""
url = '{base}/course_modes/create_mode/{course_id}/'.format(
base=BASE_URL,
course_id=self._course_id
)
query_string = urllib.urlencode(self._parameters)
if query_string:
url += '?' + query_string
return url
def is_browser_on_page(self):
message = self.q(css='BODY').text[0]
match = re.search(r'Mode ([^$]+) created for ([^$]+).$', message)
return True if match else False
| agpl-3.0 |
arabenjamin/pyNES | pynes/tests/sprite_test.py | 28 | 4065 | # -*- coding: utf-8 -*-
import unittest
from pynes import sprite
class SpriteTest(unittest.TestCase):
def __init__(self, testcase_name):
unittest.TestCase.__init__(self, testcase_name)
f = open('fixtures/nerdynights/scrolling/mario.chr', 'rb')
content = f.read()
self.bin = [ord(c) for c in content]
self.mario1 = [
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 3, 3, 3, 2, 2],
[0, 0, 3, 2, 2, 3, 2, 2],
[0, 0, 3, 2, 2, 3, 3, 2],
[0, 3, 3, 2, 2, 3, 3, 2]
]
self.mario2 = [
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 2, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0],
[3, 2, 2, 2, 0, 0, 0, 0],
[3, 3, 2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 0],
[2, 2, 3, 2, 2, 2, 2, 0]
]
self.blank = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
def test_load_sprites(self):
sprites = sprite.load_sprites(
'fixtures/nerdynights/scrolling/mario.chr')
self.assertEquals(self.bin, sprites)
def test_decode_first_sprite(self):
channelA = self.bin[0:8]
channelB = self.bin[8:16]
s1 = sprite.decode_sprite(channelA, channelB)
self.assertEquals(self.mario1, s1)
def test_decode_second_sprite(self):
channelA = self.bin[16:24]
channelB = self.bin[24:32]
s2 = sprite.decode_sprite(channelA, channelB)
self.assertEquals(self.mario2, s2)
def test_get_first_sprite(self):
s1 = sprite.get_sprite(0, self.bin)
self.assertEquals(self.mario1, s1)
def test_get_second_sprite(self):
s2 = sprite.get_sprite(1, self.bin)
self.assertEquals(self.mario2, s2)
def test_sprite_length(self):
length = sprite.length(self.bin)
self.assertEquals(512, length)
def test_encode_first_sprite(self):
encoded = sprite.encode_sprite(self.mario1)
expected = self.bin[0:16]
self.assertEquals(expected, encoded)
def test_encode_second_sprite(self):
encoded = sprite.encode_sprite(self.mario2)
expected = self.bin[16:32]
self.assertEquals(expected, encoded)
def test_put_first_sprite(self):
expected = [
[0, 1, 2, 3, 0, 1, 2, 3],
[1, 0, 1, 2, 3, 0, 1, 2],
[2, 1, 0, 1, 2, 3, 0, 1],
[3, 2, 1, 0, 1, 2, 3, 0],
[0, 3, 2, 1, 0, 1, 2, 3],
[1, 0, 3, 2, 1, 0, 1, 2],
[2, 1, 0, 3, 2, 1, 0, 1],
[3, 2, 1, 0, 3, 2, 1, 0]
]
sprite.put_sprite(0, self.bin, expected)
s1 = sprite.get_sprite(0, self.bin)
self.assertEquals(expected, s1)
def test_put_second_sprite(self):
expected = [
[0, 1, 2, 3, 0, 1, 2, 3],
[1, 0, 1, 2, 3, 0, 1, 2],
[2, 1, 0, 1, 2, 3, 0, 1],
[3, 2, 1, 0, 1, 2, 3, 0],
[0, 3, 2, 1, 0, 1, 2, 3],
[1, 0, 3, 2, 1, 0, 1, 2],
[2, 1, 0, 3, 2, 1, 0, 1],
[3, 2, 1, 0, 3, 2, 1, 0]
]
sprite.put_sprite(1, self.bin, expected)
s1 = sprite.get_sprite(1, self.bin)
self.assertEquals(expected, s1)
def test_find_sprite_1(self):
index = sprite.find_sprite(self.bin, self.mario1)
self.assertEquals(0, index)
def test_find_sprite_2(self):
index = sprite.find_sprite(self.bin, self.mario2)
self.assertEquals(1, index)
def test_find_sprite_3(self):
index = sprite.find_sprite(self.bin, self.blank, 256)
self.assertEquals(292 - 256, index)
| bsd-3-clause |
msmolens/VTK | ThirdParty/Twisted/twisted/conch/ssh/agent.py | 59 | 9556 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implements the SSH v2 key agent protocol. This protocol is documented in the
SSH source code, in the file
U{PROTOCOL.agent<http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent>}.
Maintainer: Paul Swartz
"""
import struct
from twisted.conch.ssh.common import NS, getNS, getMP
from twisted.conch.error import ConchError, MissingKeyStoreError
from twisted.conch.ssh import keys
from twisted.internet import defer, protocol
class SSHAgentClient(protocol.Protocol):
"""
The client side of the SSH agent protocol. This is equivalent to
ssh-add(1) and can be used with either ssh-agent(1) or the SSHAgentServer
protocol, also in this package.
"""
def __init__(self):
self.buf = ''
self.deferreds = []
def dataReceived(self, data):
self.buf += data
while 1:
if len(self.buf) <= 4:
return
packLen = struct.unpack('!L', self.buf[:4])[0]
if len(self.buf) < 4 + packLen:
return
packet, self.buf = self.buf[4:4 + packLen], self.buf[4 + packLen:]
reqType = ord(packet[0])
d = self.deferreds.pop(0)
if reqType == AGENT_FAILURE:
d.errback(ConchError('agent failure'))
elif reqType == AGENT_SUCCESS:
d.callback('')
else:
d.callback(packet)
def sendRequest(self, reqType, data):
pack = struct.pack('!LB',len(data) + 1, reqType) + data
self.transport.write(pack)
d = defer.Deferred()
self.deferreds.append(d)
return d
def requestIdentities(self):
"""
@return: A L{Deferred} which will fire with a list of all keys found in
the SSH agent. The list of keys is comprised of (public key blob,
comment) tuples.
"""
d = self.sendRequest(AGENTC_REQUEST_IDENTITIES, '')
d.addCallback(self._cbRequestIdentities)
return d
def _cbRequestIdentities(self, data):
"""
Unpack a collection of identities into a list of tuples comprised of
public key blobs and comments.
"""
if ord(data[0]) != AGENT_IDENTITIES_ANSWER:
raise ConchError('unexpected response: %i' % ord(data[0]))
numKeys = struct.unpack('!L', data[1:5])[0]
keys = []
data = data[5:]
for i in range(numKeys):
blob, data = getNS(data)
comment, data = getNS(data)
keys.append((blob, comment))
return keys
def addIdentity(self, blob, comment = ''):
"""
Add a private key blob to the agent's collection of keys.
"""
req = blob
req += NS(comment)
return self.sendRequest(AGENTC_ADD_IDENTITY, req)
def signData(self, blob, data):
"""
Request that the agent sign the given C{data} with the private key
which corresponds to the public key given by C{blob}. The private
key should have been added to the agent already.
@type blob: C{str}
@type data: C{str}
@return: A L{Deferred} which fires with a signature for given data
created with the given key.
"""
req = NS(blob)
req += NS(data)
req += '\000\000\000\000' # flags
return self.sendRequest(AGENTC_SIGN_REQUEST, req).addCallback(self._cbSignData)
def _cbSignData(self, data):
if ord(data[0]) != AGENT_SIGN_RESPONSE:
raise ConchError('unexpected data: %i' % ord(data[0]))
signature = getNS(data[1:])[0]
return signature
def removeIdentity(self, blob):
"""
Remove the private key corresponding to the public key in blob from the
running agent.
"""
req = NS(blob)
return self.sendRequest(AGENTC_REMOVE_IDENTITY, req)
def removeAllIdentities(self):
"""
Remove all keys from the running agent.
"""
return self.sendRequest(AGENTC_REMOVE_ALL_IDENTITIES, '')
class SSHAgentServer(protocol.Protocol):
"""
The server side of the SSH agent protocol. This is equivalent to
ssh-agent(1) and can be used with either ssh-add(1) or the SSHAgentClient
protocol, also in this package.
"""
def __init__(self):
self.buf = ''
def dataReceived(self, data):
self.buf += data
while 1:
if len(self.buf) <= 4:
return
packLen = struct.unpack('!L', self.buf[:4])[0]
if len(self.buf) < 4 + packLen:
return
packet, self.buf = self.buf[4:4 + packLen], self.buf[4 + packLen:]
reqType = ord(packet[0])
reqName = messages.get(reqType, None)
if not reqName:
self.sendResponse(AGENT_FAILURE, '')
else:
f = getattr(self, 'agentc_%s' % reqName)
if getattr(self.factory, 'keys', None) is None:
self.sendResponse(AGENT_FAILURE, '')
raise MissingKeyStoreError()
f(packet[1:])
def sendResponse(self, reqType, data):
pack = struct.pack('!LB', len(data) + 1, reqType) + data
self.transport.write(pack)
def agentc_REQUEST_IDENTITIES(self, data):
"""
Return all of the identities that have been added to the server
"""
assert data == ''
numKeys = len(self.factory.keys)
resp = []
resp.append(struct.pack('!L', numKeys))
for key, comment in self.factory.keys.itervalues():
resp.append(NS(key.blob())) # yes, wrapped in an NS
resp.append(NS(comment))
self.sendResponse(AGENT_IDENTITIES_ANSWER, ''.join(resp))
def agentc_SIGN_REQUEST(self, data):
"""
Data is a structure with a reference to an already added key object and
some data that the clients wants signed with that key. If the key
object wasn't loaded, return AGENT_FAILURE, else return the signature.
"""
blob, data = getNS(data)
if blob not in self.factory.keys:
return self.sendResponse(AGENT_FAILURE, '')
signData, data = getNS(data)
assert data == '\000\000\000\000'
self.sendResponse(AGENT_SIGN_RESPONSE, NS(self.factory.keys[blob][0].sign(signData)))
def agentc_ADD_IDENTITY(self, data):
"""
Adds a private key to the agent's collection of identities. On
subsequent interactions, the private key can be accessed using only the
corresponding public key.
"""
# need to pre-read the key data so we can get past it to the comment string
keyType, rest = getNS(data)
if keyType == 'ssh-rsa':
nmp = 6
elif keyType == 'ssh-dss':
nmp = 5
else:
raise keys.BadKeyError('unknown blob type: %s' % keyType)
rest = getMP(rest, nmp)[-1] # ignore the key data for now, we just want the comment
comment, rest = getNS(rest) # the comment, tacked onto the end of the key blob
k = keys.Key.fromString(data, type='private_blob') # not wrapped in NS here
self.factory.keys[k.blob()] = (k, comment)
self.sendResponse(AGENT_SUCCESS, '')
def agentc_REMOVE_IDENTITY(self, data):
"""
Remove a specific key from the agent's collection of identities.
"""
blob, _ = getNS(data)
k = keys.Key.fromString(blob, type='blob')
del self.factory.keys[k.blob()]
self.sendResponse(AGENT_SUCCESS, '')
def agentc_REMOVE_ALL_IDENTITIES(self, data):
"""
Remove all keys from the agent's collection of identities.
"""
assert data == ''
self.factory.keys = {}
self.sendResponse(AGENT_SUCCESS, '')
# v1 messages that we ignore because we don't keep v1 keys
# open-ssh sends both v1 and v2 commands, so we have to
# do no-ops for v1 commands or we'll get "bad request" errors
def agentc_REQUEST_RSA_IDENTITIES(self, data):
"""
v1 message for listing RSA1 keys; superseded by
agentc_REQUEST_IDENTITIES, which handles different key types.
"""
self.sendResponse(AGENT_RSA_IDENTITIES_ANSWER, struct.pack('!L', 0))
def agentc_REMOVE_RSA_IDENTITY(self, data):
"""
v1 message for removing RSA1 keys; superseded by
agentc_REMOVE_IDENTITY, which handles different key types.
"""
self.sendResponse(AGENT_SUCCESS, '')
def agentc_REMOVE_ALL_RSA_IDENTITIES(self, data):
"""
v1 message for removing all RSA1 keys; superseded by
agentc_REMOVE_ALL_IDENTITIES, which handles different key types.
"""
self.sendResponse(AGENT_SUCCESS, '')
AGENTC_REQUEST_RSA_IDENTITIES = 1
AGENT_RSA_IDENTITIES_ANSWER = 2
AGENT_FAILURE = 5
AGENT_SUCCESS = 6
AGENTC_REMOVE_RSA_IDENTITY = 8
AGENTC_REMOVE_ALL_RSA_IDENTITIES = 9
AGENTC_REQUEST_IDENTITIES = 11
AGENT_IDENTITIES_ANSWER = 12
AGENTC_SIGN_REQUEST = 13
AGENT_SIGN_RESPONSE = 14
AGENTC_ADD_IDENTITY = 17
AGENTC_REMOVE_IDENTITY = 18
AGENTC_REMOVE_ALL_IDENTITIES = 19
messages = {}
for name, value in locals().copy().items():
if name[:7] == 'AGENTC_':
messages[value] = name[7:] # doesn't handle doubles
| bsd-3-clause |
henaras/sahara | sahara/plugins/hdp/versions/abstractversionhandler.py | 10 | 1809 | # Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AbstractVersionHandler(object):
@abc.abstractmethod
def get_config_items(self):
return
@abc.abstractmethod
def get_applicable_target(self, name):
return
@abc.abstractmethod
def get_cluster_spec(self, cluster, user_inputs, scaled_groups=None,
cluster_template=None):
return
@abc.abstractmethod
def get_ambari_client(self):
return
@abc.abstractmethod
def get_default_cluster_configuration(self):
return
@abc.abstractmethod
def get_node_processes(self):
return
@abc.abstractmethod
def install_swift_integration(self, servers):
return
@abc.abstractmethod
def get_version(self):
return
@abc.abstractmethod
def get_services_processor(self):
return
@abc.abstractmethod
def get_edp_engine(self, cluster, job_type):
return
@abc.abstractmethod
def get_edp_job_types(self):
return []
@abc.abstractmethod
def get_edp_config_hints(self, job_type):
return {}
@abc.abstractmethod
def get_open_ports(self, node_group):
return []
| apache-2.0 |
alivecor/tensorflow | tensorflow/contrib/training/python/training/training.py | 23 | 20136 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains various routines and helper functions for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
tf.contrib.losses.log_loss(predictions, labels)
total_loss = tf.contrib.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Run training.
tf.contrib.training.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to use the `train` function, one needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. tf.contrib.training.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=clip_gradient_norms_fn(3))
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
def transform_grads_fn(grads):
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
return tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=transform_grads_fn)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. tf.contrib.training.create_train_op
allows a user to pass in a list of update_ops to call along with the gradient
updates.
train_op = tf.contrib.training.create_train_op(
total_loss, optimizer, update_ops)
By default, tf.contrib.training.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, the
tf.contrib.layers.batch_norm function adds the moving mean and moving variance
updates to this collection. Consequently, users who want to use
tf.contrib.layers.batch_norm will not need to take any additional steps in order
to have the moving mean and moving variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force `create_train_op` to NOT use ANY update_ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use a set of update ops in addition to the default updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
One can use a tf.Scaffold and an initializing function to do so.
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = tf.contrib.framework.get_model_variables()
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
# Run training.
scaffold = tf.Scaffold(init_fn=init_fn)
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint':
tf.contrib.framework.get_unique_variable('var0'),
'name_var_1_in_checkpoint':
tf.contrib.framework.get_unique_variable('var1')
}
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
exclude=["conv"])
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values coming from an
arbitrary source (a text document, matlab file, etc). While this is technically
feasible using assign operations, this strategy results in the values of your
weights being stored in the graph. For large models, this becomes prohibitively
large. However, it's possible to perform this initial assignment without having
to store the values of the initial model in the graph itself by using
placeholders and a feed dictionary:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import monitored_session
from tensorflow.python.training import optimizer as tf_optimizer
# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and
# multiply_gradients into contrib/summaries and contrib/optimizers.py
__all__ = [
'add_gradients_summaries',
'clip_gradient_norms',
'clip_gradient_norms_fn',
'create_train_op',
'multiply_gradients',
'train',
]
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '_gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '_gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def clip_gradient_norms_fn(max_norm):
"""Returns a `transform_grads_fn` function for gradient clipping."""
def clip_norms(gradients_to_variables):
return clip_gradient_norms(gradients_to_variables, max_norm)
return clip_norms
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
transform_grads_fn=None,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
a warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.trainable_variables().
transform_grads_fn: A function which takes a single argument, a list of
gradient to variable pairs (tuples), performs any requested gradient
updates, such as gradient clipping or multipliers, and returns the updated
list.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
if global_step is _USE_GLOBAL_STEP:
global_step = variables.get_or_create_global_step()
# Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
if not global_update_ops.issubset(update_ops):
logging.warning('update_ops in create_train_op does not contain all the '
' update_ops in GraphKeys.UPDATE_OPS')
# Make sure update_ops are computed before total_loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='update_barrier')
total_loss = control_flow_ops.with_dependencies([barrier], total_loss)
if variables_to_train is None:
# Default to tf.trainable_variables()
variables_to_train = tf_variables.trainable_variables()
else:
# Make sure that variables_to_train are in tf.trainable_variables()
for v in variables_to_train:
assert v in tf_variables.trainable_variables()
assert variables_to_train
# Create the gradients. Note that apply_gradients adds the gradient
# computation to the current graph.
grads = optimizer.compute_gradients(
total_loss,
variables_to_train,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if transform_grads_fn:
grads = transform_grads_fn(grads)
# Summarize gradients.
if summarize_gradients:
with ops.name_scope('summarize_grads'):
add_gradients_summaries(grads)
# Create gradient updates.
grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
with ops.name_scope('train_op'):
# Make sure total_loss is valid.
if check_numerics:
total_loss = array_ops.check_numerics(total_loss,
'LossTensor is inf or nan')
# Ensure the train_tensor computes grad_updates.
train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)
# Add the operation used for training to the 'train_op' collection
train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if train_op not in train_ops:
train_ops.append(train_op)
return train_op
def train(train_op,
logdir,
master='',
is_chief=True,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None):
"""Runs the training loop.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where the graph and checkpoints are saved.
master: The URL of the master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
scaffold: An tf.train.Scaffold instance.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
training loop.
chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run
inside the training loop for the chief trainer only.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: An instance of `tf.ConfigProto`.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or
`save_summaries_steps` are `None.
"""
if logdir is None and is_chief:
if save_summaries_steps:
raise ValueError(
'logdir cannot be None when save_summaries_steps is not None')
if save_checkpoint_secs:
raise ValueError(
'logdir cannot be None when save_checkpoint_secs is not None')
with monitored_session.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=logdir,
scaffold=scaffold,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
config=config) as session:
loss = None
while not session.should_stop():
loss = session.run(train_op)
return loss
| apache-2.0 |
jacky-young/crosswalk-test-suite | webapi/webapi-appuri-w3c-tests/inst.wgt.py | 294 | 6758 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
JimCircadian/ansible | lib/ansible/modules/net_tools/ldap/ldap_attr.py | 9 | 8452 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: ldap_attr
short_description: Add or remove LDAP attribute values.
description:
- Add or remove LDAP attribute values.
notes:
- This only deals with attributes on existing entries. To add or remove
whole entries, see M(ldap_entry).
- The default authentication settings will attempt to use a SASL EXTERNAL
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a cn=peercred,cn=external,cn=auth ACL
rule allowing root to modify the server configuration. If you need to use
a simple bind to access your server, pass the credentials in I(bind_dn)
and I(bind_pw).
- For I(state=present) and I(state=absent), all value comparisons are
performed on the server for maximum accuracy. For I(state=exact), values
have to be compared in Python, which obviously ignores LDAP matching
rules. This should work out in most cases, but it is theoretically
possible to see spurious changes when target and actual values are
semantically identical but lexically distinct.
version_added: '2.3'
author:
- Jiri Tyr (@jtyr)
requirements:
- python-ldap
options:
name:
description:
- The name of the attribute to modify.
required: true
state:
description:
- The state of the attribute values. If C(present), all given
values will be added if they're missing. If C(absent), all given
values will be removed if present. If C(exact), the set of values
will be forced to exactly those provided and no others. If
I(state=exact) and I(value) is empty, all values for this
attribute will be removed.
choices: [present, absent, exact]
default: present
values:
description:
- The value(s) to add or remove. This can be a string or a list of
strings. The complex argument format is required in order to pass
a list of strings (see examples).
required: true
extends_documentation_fragment: ldap.documentation
"""
EXAMPLES = """
- name: Configure directory number 1 for example.com
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: olcSuffix
values: dc=example,dc=com
state: exact
# The complex argument format is required here to pass a list of ACL strings.
- name: Set up the ACL
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: olcAccess
values:
- >-
{0}to attrs=userPassword,shadowLastChange
by self write
by anonymous auth
by dn="cn=admin,dc=example,dc=com" write
by * none'
- >-
{1}to dn.base="dc=example,dc=com"
by dn="cn=admin,dc=example,dc=com" write
by * read
state: exact
- name: Declare some indexes
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: olcDbIndex
values: "{{ item }}"
with_items:
- objectClass eq
- uid eq
- name: Set up a root user, which we can use later to bootstrap the directory
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: "{{ item.key }}"
values: "{{ item.value }}"
state: exact
with_dict:
olcRootDN: cn=root,dc=example,dc=com
olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
- name: Get rid of an unneeded attribute
ldap_attr:
dn: uid=jdoe,ou=people,dc=example,dc=com
name: shadowExpire
values: ""
state: exact
server_uri: ldap://localhost/
bind_dn: cn=admin,dc=example,dc=com
bind_pw: password
#
# The same as in the previous example but with the authentication details
# stored in the ldap_auth variable:
#
# ldap_auth:
# server_uri: ldap://localhost/
# bind_dn: cn=admin,dc=example,dc=com
# bind_pw: password
- name: Get rid of an unneeded attribute
ldap_attr:
dn: uid=jdoe,ou=people,dc=example,dc=com
name: shadowExpire
values: ""
state: exact
params: "{{ ldap_auth }}"
"""
RETURN = """
modlist:
description: list of modified parameters
returned: success
type: list
sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.ldap import LdapGeneric, gen_specs
try:
import ldap
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
class LdapAttr(LdapGeneric):
def __init__(self, module):
LdapGeneric.__init__(self, module)
# Shortcuts
self.name = self.module.params['name']
self.state = self.module.params['state']
# Normalize values
if isinstance(self.module.params['values'], list):
self.values = map(to_bytes, self.module.params['values'])
else:
self.values = [to_bytes(self.module.params['values'])]
def add(self):
values_to_add = filter(self._is_value_absent, self.values)
if len(values_to_add) > 0:
modlist = [(ldap.MOD_ADD, self.name, values_to_add)]
else:
modlist = []
return modlist
def delete(self):
values_to_delete = filter(self._is_value_present, self.values)
if len(values_to_delete) > 0:
modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)]
else:
modlist = []
return modlist
def exact(self):
try:
results = self.connection.search_s(
self.dn, ldap.SCOPE_BASE, attrlist=[self.name])
except ldap.LDAPError as e:
self.fail("Cannot search for attribute %s" % self.name, e)
current = results[0][1].get(self.name, [])
modlist = []
if frozenset(self.values) != frozenset(current):
if len(current) == 0:
modlist = [(ldap.MOD_ADD, self.name, self.values)]
elif len(self.values) == 0:
modlist = [(ldap.MOD_DELETE, self.name, None)]
else:
modlist = [(ldap.MOD_REPLACE, self.name, self.values)]
return modlist
def _is_value_present(self, value):
""" True if the target attribute has the given value. """
try:
is_present = bool(
self.connection.compare_s(self.dn, self.name, value))
except ldap.NO_SUCH_ATTRIBUTE:
is_present = False
return is_present
def _is_value_absent(self, value):
""" True if the target attribute doesn't have the given value. """
return not self._is_value_present(value)
def main():
module = AnsibleModule(
argument_spec=gen_specs(
name=dict(required=True),
params=dict(type='dict'),
state=dict(
default='present',
choices=['present', 'absent', 'exact']),
values=dict(required=True, type='raw'),
),
supports_check_mode=True,
)
if not HAS_LDAP:
module.fail_json(
msg="Missing required 'ldap' module (pip install python-ldap)")
# Update module parameters with user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Instantiate the LdapAttr object
ldap = LdapAttr(module)
state = module.params['state']
# Perform action
if state == 'present':
modlist = ldap.add()
elif state == 'absent':
modlist = ldap.delete()
elif state == 'exact':
modlist = ldap.exact()
changed = False
if len(modlist) > 0:
changed = True
if not module.check_mode:
try:
ldap.connection.modify_s(ldap.dn, modlist)
except Exception as e:
module.fail_json(msg="Attribute action failed.", details=to_native(e),
exception=traceback.format_exc())
module.exit_json(changed=changed, modlist=modlist)
if __name__ == '__main__':
main()
| gpl-3.0 |
CentreForCorpusResearch/clic | docs/conf.py | 2 | 9403 | # -*- coding: utf-8 -*-
#
# CLiC Dickens documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 15 10:51:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../clic'))
sys.path.append(os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CLiC Dickens'
copyright = u'2016, J. de Joode'
author = u'J. de Joode'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.4'
# The full version, including alpha/beta/rc tags.
release = u'1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CLiCDickensdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CLiCDickens.tex', u'CLiC Dickens Documentation',
u'J. de Joode', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'clicdickens', u'CLiC Dickens Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CLiCDickens', u'CLiC Dickens Documentation',
author, 'CLiCDickens', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
AladdinSonni/youtube-dl | youtube_dl/extractor/expotv.py | 129 | 2591 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class ExpoTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])'
_TEST = {
'url': 'http://www.expotv.com/videos/reviews/1/24/LinneCardscom/17561',
'md5': '2985e6d7a392b2f7a05e0ca350fe41d0',
'info_dict': {
'id': '17561',
'ext': 'mp4',
'upload_date': '20060212',
'title': 'My Favorite Online Scrapbook Store',
'view_count': int,
'description': 'You\'ll find most everything you need at this virtual store front.',
'uploader': 'Anna T.',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
player_key = self._search_regex(
r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
config_url = 'http://client.expotv.com/video/config/%s/%s' % (
video_id, player_key)
config = self._download_json(
config_url, video_id,
note='Downloading video configuration')
formats = [{
'url': fcfg['file'],
'height': int_or_none(fcfg.get('height')),
'format_note': fcfg.get('label'),
'ext': self._search_regex(
r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'],
'file extension', default=None),
} for fcfg in config['sources']]
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = config.get('image')
view_count = int_or_none(self._search_regex(
r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts'))
uploader = self._search_regex(
r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader',
fatal=False)
upload_date = unified_strdate(self._search_regex(
r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date',
fatal=False))
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'view_count': view_count,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
}
| unlicense |
proxysh/Safejumper-for-Desktop | buildlinux/env32/lib/python2.7/site-packages/twisted/internet/_win32serialport.py | 19 | 4617 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial port support for Windows.
Requires PySerial and pywin32.
"""
from __future__ import division, absolute_import
# system imports
from serial import PARITY_NONE
from serial import STOPBITS_ONE
from serial import EIGHTBITS
import win32file, win32event
# twisted imports
from twisted.internet import abstract
# sibling imports
from twisted.internet.serialport import BaseSerialPort
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A serial device, acting as a transport, that uses a win32 event."""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, xonxoff = 0, rtscts = 0):
self._serial = self._serialFactory(
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
parity=parity, stopbits=stopbits, timeout=None,
xonxoff=xonxoff, rtscts=rtscts)
self.flushInput()
self.flushOutput()
self.reactor = reactor
self.protocol = protocol
self.outQueue = []
self.closed = 0
self.closedNotifies = 0
self.writeInProgress = 0
self.protocol = protocol
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.reactor.addEvent(self._overlappedRead.hEvent, self, 'serialReadEvent')
self.reactor.addEvent(self._overlappedWrite.hEvent, self, 'serialWriteEvent')
self.protocol.makeConnection(self)
self._finishPortSetup()
def _finishPortSetup(self):
"""
Finish setting up the serial port.
This is a separate method to facilitate testing.
"""
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def serialReadEvent(self):
#get that character we set up
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 0)
if n:
first = str(self.read_buf[:n])
#now we should get everything that is already in the buffer
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
if comstat.cbInQue:
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(comstat.cbInQue),
self._overlappedRead)
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 1)
#handle all the received data:
self.protocol.dataReceived(first + str(buf[:n]))
else:
#handle all the received data:
self.protocol.dataReceived(first)
#set up next one
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def write(self, data):
if data:
if self.writeInProgress:
self.outQueue.append(data)
else:
self.writeInProgress = 1
win32file.WriteFile(self._serial.hComPort, data, self._overlappedWrite)
def serialWriteEvent(self):
try:
dataToWrite = self.outQueue.pop(0)
except IndexError:
self.writeInProgress = 0
return
else:
win32file.WriteFile(self._serial.hComPort, dataToWrite, self._overlappedWrite)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
self.reactor.removeEvent(self._overlappedRead.hEvent)
self.reactor.removeEvent(self._overlappedWrite.hEvent)
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)
| gpl-2.0 |
sanketloke/scikit-learn | sklearn/svm/classes.py | 3 | 40654 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies as
``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
ric03uec/boto | boto/ec2/elb/healthcheck.py | 185 | 3775 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class HealthCheck(object):
"""
Represents an EC2 Access Point Health Check. See
:ref:`elb-configuring-a-health-check` for a walkthrough on configuring
load balancer health checks.
"""
def __init__(self, access_point=None, interval=30, target=None,
healthy_threshold=3, timeout=5, unhealthy_threshold=5):
"""
:ivar str access_point: The name of the load balancer this
health check is associated with.
:ivar int interval: Specifies how many seconds there are between
health checks.
:ivar str target: Determines what to check on an instance. See the
Amazon HealthCheck_ documentation for possible Target values.
.. _HealthCheck: http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/API_HealthCheck.html
"""
self.access_point = access_point
self.interval = interval
self.target = target
self.healthy_threshold = healthy_threshold
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
def __repr__(self):
return 'HealthCheck:%s' % self.target
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Interval':
self.interval = int(value)
elif name == 'Target':
self.target = value
elif name == 'HealthyThreshold':
self.healthy_threshold = int(value)
elif name == 'Timeout':
self.timeout = int(value)
elif name == 'UnhealthyThreshold':
self.unhealthy_threshold = int(value)
else:
setattr(self, name, value)
def update(self):
"""
In the case where you have accessed an existing health check on a
load balancer, this method applies this instance's health check
values to the load balancer it is attached to.
.. note:: This method will not do anything if the :py:attr:`access_point`
attribute isn't set, as is the case with a newly instantiated
HealthCheck instance.
"""
if not self.access_point:
return
new_hc = self.connection.configure_health_check(self.access_point,
self)
self.interval = new_hc.interval
self.target = new_hc.target
self.healthy_threshold = new_hc.healthy_threshold
self.unhealthy_threshold = new_hc.unhealthy_threshold
self.timeout = new_hc.timeout
| mit |
leilihh/cinder | cinder/tests/unit/test_volume_transfer.py | 14 | 7666 | # Copyright (c) 2013 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for volume transfers."""
import datetime
import mock
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit import utils
from cinder.transfer import api as transfer_api
class VolumeTransferTestCase(test.TestCase):
"""Test cases for volume transfer code."""
def setUp(self):
super(VolumeTransferTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id')
self.updated_at = datetime.datetime(1, 1, 1, 1, 1, 1)
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_volume_create_delete(self, mock_notify):
tx_api = transfer_api.API()
utils.create_volume(self.ctxt, id='1',
updated_at=self.updated_at)
response = tx_api.create(self.ctxt, '1', 'Description')
volume = db.volume_get(self.ctxt, '1')
self.assertEqual('awaiting-transfer', volume['status'],
'Unexpected state')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count)
tx_api.delete(self.ctxt, response['id'])
volume = db.volume_get(self.ctxt, '1')
self.assertEqual('available', volume['status'], 'Unexpected state')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"),
mock.call(self.ctxt, mock.ANY, "transfer.delete.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(4, mock_notify.call_count)
def test_transfer_invalid_volume(self):
tx_api = transfer_api.API()
utils.create_volume(self.ctxt, id='1', status='in-use',
updated_at=self.updated_at)
self.assertRaises(exception.InvalidVolume,
tx_api.create,
self.ctxt, '1', 'Description')
volume = db.volume_get(self.ctxt, '1')
self.assertEqual('in-use', volume['status'], 'Unexpected state')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept(self, mock_notify):
svc = self.start_service('volume', host='test_host')
tx_api = transfer_api.API()
utils.create_volume(self.ctxt, id='1',
updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, '1', 'Description')
volume = db.volume_get(self.ctxt, '1')
self.assertEqual('awaiting-transfer', volume['status'],
'Unexpected state')
self.assertRaises(exception.TransferNotFound,
tx_api.accept,
self.ctxt, '2', transfer['auth_key'])
self.assertRaises(exception.InvalidAuthKey,
tx_api.accept,
self.ctxt, transfer['id'], 'wrong')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count)
db.volume_update(self.ctxt, '1', {'status': 'wrong'})
self.assertRaises(exception.InvalidVolume,
tx_api.accept,
self.ctxt, transfer['id'], transfer['auth_key'])
db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'})
# Because the InvalidVolume exception is raised in tx_api, so there is
# only transfer.accept.start called and missing transfer.accept.end.
calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")]
mock_notify.assert_has_calls(calls)
self.assertEqual(3, mock_notify.call_count)
self.ctxt.user_id = 'new_user_id'
self.ctxt.project_id = 'new_project_id'
response = tx_api.accept(self.ctxt,
transfer['id'],
transfer['auth_key'])
volume = db.volume_get(self.ctxt, '1')
self.assertEqual('new_project_id', volume['project_id'],
'Unexpected project id')
self.assertEqual('new_user_id', volume['user_id'],
'Unexpected user id')
self.assertEqual(volume['id'], response['volume_id'],
'Unexpected volume id in response.')
self.assertEqual(transfer['id'], response['id'],
'Unexpected transfer id in response.')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"),
mock.call(self.ctxt, mock.ANY, "transfer.accept.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(5, mock_notify.call_count)
svc.stop()
def test_transfer_get(self):
tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt, id='1',
updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
t = tx_api.get(self.ctxt, transfer['id'])
self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id')
ts = tx_api.get_all(self.ctxt)
self.assertEqual(1, len(ts), 'Unexpected number of transfers.')
nctxt = context.RequestContext(user_id='new_user_id',
project_id='new_project_id')
utils.create_volume(nctxt, id='2', updated_at=self.updated_at)
self.assertRaises(exception.TransferNotFound,
tx_api.get,
nctxt,
transfer['id'])
ts = tx_api.get_all(nctxt)
self.assertEqual(0, len(ts), 'Unexpected transfers listed.')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_delete_transfer_with_deleted_volume(self, mock_notify):
# create a volume
volume = utils.create_volume(self.ctxt, id='1',
updated_at=self.updated_at)
# create a transfer
tx_api = transfer_api.API()
transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
t = tx_api.get(self.ctxt, transfer['id'])
self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count)
# force delete volume
db.volume_destroy(context.get_admin_context(), volume['id'])
# Make sure transfer has been deleted.
self.assertRaises(exception.TransferNotFound,
tx_api.get,
self.ctxt,
transfer['id'])
| apache-2.0 |
HimariO/VideoSum | unit-tests/dnc.py | 2 | 6743 | import tensorflow as tf
import numpy as np
import unittest
import shutil
import os
from dnc.dnc import DNC
from dnc.memory import Memory
from dnc.controller import BaseController
class DummyController(BaseController):
def network_vars(self):
self.W = tf.Variable(tf.truncated_normal([self.nn_input_size, 64]), name='layer_W')
self.b = tf.Variable(tf.zeros([64]), name='layer_b')
def network_op(self, X):
return tf.matmul(X, self.W) + self.b
class DummyRecurrentController(BaseController):
def network_vars(self):
self.lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(64)
self.state = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
self.output = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
def network_op(self, X, state):
X = tf.convert_to_tensor(X)
return self.lstm_cell(X, state)
def update_state(self, new_state):
return tf.group(
self.output.assign(new_state[0]),
self.state.assign(new_state[1])
)
def get_state(self):
return (self.output, self.state)
class DNCTest(unittest.TestCase):
@classmethod
def _clear(cls):
try:
current_dir = os.path.dirname(__file__)
ckpts_dir = os.path.join(current_dir, 'checkpoints')
shutil.rmtree(ckpts_dir)
except:
# swallow error
return
@classmethod
def setUpClass(cls):
cls._clear()
@classmethod
def tearDownClass(cls):
cls._clear()
def test_construction(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
computer = DNC(DummyController, 10, 20, 10, 10, 64, 1)
rcomputer = DNC(DummyRecurrentController, 10, 20, 10, 10, 64, 1)
self.assertEqual(computer.input_size, 10)
self.assertEqual(computer.output_size, 20)
self.assertEqual(computer.words_num, 10)
self.assertEqual(computer.word_size, 64)
self.assertEqual(computer.read_heads, 1)
self.assertEqual(computer.batch_size, 1)
self.assertTrue(isinstance(computer.memory, Memory))
self.assertTrue(isinstance(computer.controller, DummyController))
self.assertTrue(isinstance(rcomputer.controller, DummyRecurrentController))
def test_call(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
computer = DNC(DummyController, 10, 20, 10, 10, 64, 2, batch_size=3)
rcomputer = DNC(DummyRecurrentController, 10, 20, 10, 10, 64, 2, batch_size=3)
input_batches = np.random.uniform(0, 1, (3, 5, 10)).astype(np.float32)
session.run(tf.initialize_all_variables())
out_view = session.run(computer.get_outputs(), feed_dict={
computer.input_data: input_batches,
computer.sequence_length: 5
})
out, view = out_view
rout_rview, ro, rs = session.run([
rcomputer.get_outputs(),
rcomputer.controller.get_state()[0],
rcomputer.controller.get_state()[1]
], feed_dict={
rcomputer.input_data: input_batches,
rcomputer.sequence_length: 5
})
rout, rview = rout_rview
self.assertEqual(out.shape, (3, 5, 20))
self.assertEqual(view['free_gates'].shape, (3, 5, 2))
self.assertEqual(view['allocation_gates'].shape, (3, 5, 1))
self.assertEqual(view['write_gates'].shape, (3, 5, 1))
self.assertEqual(view['read_weightings'].shape, (3, 5, 10, 2))
self.assertEqual(view['write_weightings'].shape, (3, 5, 10))
self.assertEqual(rout.shape, (3, 5, 20))
self.assertEqual(rview['free_gates'].shape, (3, 5, 2))
self.assertEqual(rview['allocation_gates'].shape, (3, 5, 1))
self.assertEqual(rview['write_gates'].shape, (3, 5, 1))
self.assertEqual(rview['read_weightings'].shape, (3, 5, 10, 2))
self.assertEqual(rview['write_weightings'].shape, (3, 5, 10))
def test_save(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
computer = DNC(DummyController, 10, 20, 10, 10, 64, 2, batch_size=2)
session.run(tf.initialize_all_variables())
current_dir = os.path.dirname(__file__)
ckpts_dir = os.path.join(current_dir, 'checkpoints')
computer.save(session, ckpts_dir, 'test-save')
self.assert_(True)
def test_restore(self):
current_dir = os.path.dirname(__file__)
ckpts_dir = os.path.join(current_dir, 'checkpoints')
model1_output, model1_memview = None, None
sample_input = np.random.uniform(0, 1, (2, 5, 10)).astype(np.float32)
sample_seq_len = 5
graph1 = tf.Graph()
with graph1.as_default():
with tf.Session(graph=graph1) as session1:
computer = DNC(DummyController, 10, 20, 10, 10, 64, 2, batch_size=2)
session1.run(tf.initialize_all_variables())
saved_weights = session1.run([
computer.controller.nn_output_weights,
computer.controller.interface_weights,
computer.controller.mem_output_weights,
computer.controller.W,
computer.controller.b
])
computer.save(session1, ckpts_dir, 'test-restore')
graph2 = tf.Graph()
with graph2.as_default():
with tf.Session(graph=graph2) as session2:
computer = DNC(DummyController, 10, 20, 10, 10, 64, 2, batch_size=2)
session2.run(tf.initialize_all_variables())
computer.restore(session2, ckpts_dir, 'test-restore')
restored_weights = session2.run([
computer.controller.nn_output_weights,
computer.controller.interface_weights,
computer.controller.mem_output_weights,
computer.controller.W,
computer.controller.b
])
self.assertTrue(np.product([np.array_equal(restored_weights[i], saved_weights[i]) for i in range(5)]))
if __name__ == '__main__':
unittest.main(verbosity=2)
| mit |
bjodah/symengine.py | symengine/tests/test_number.py | 2 | 5036 | from symengine.utilities import raises
from symengine import Integer, I, S, Symbol, pi, Rational
from symengine.lib.symengine_wrapper import (perfect_power, is_square, integer_nthroot)
def test_integer():
i = Integer(5)
assert str(i) == "5"
assert int(i) == 5
assert float(i) == 5.0
assert complex(i) == 5.0 + 0j
assert i.real == i
assert i.imag == S.Zero
def test_integer_long():
py_int = 123434444444444444444
i = Integer(py_int)
assert str(i) == str(py_int)
assert int(i) == py_int
def test_integer_string():
assert Integer("133") == 133
def test_rational():
i = Integer(5)/10
assert str(i) == "1/2"
assert int(i) == 0
assert float(i) == 0.5
assert complex(i) == 0.5 + 0j
assert i.real == i
assert i.imag == S.Zero
def test_complex():
i = Integer(5)/10 + I
assert str(i) == "1/2 + I"
assert complex(i) == 0.5 + 1j
assert i.real == Integer(1)/2
assert i.imag == 1
i = 0.5 + I
assert str(i) == "0.5 + 1.0*I"
assert complex(i) == 0.5 + 1j
assert i.real == 0.5
assert i.imag == 1.0
def test_smallfloat_valid():
i = Integer(7.5)
assert str(i) == "7"
def test_bigfloat_valid():
i = Integer(13333333333333334.5)
assert str(i) == "13333333333333334"
def test_is_conditions():
i = Integer(-123)
assert not i.is_zero
assert not i.is_positive
assert i.is_negative
assert i.is_nonzero
assert i.is_nonpositive
assert not i.is_nonnegative
assert not i.is_complex
i = Integer(123)
assert not i.is_zero
assert i.is_positive
assert not i.is_negative
assert i.is_nonzero
assert not i.is_nonpositive
assert i.is_nonnegative
assert not i.is_complex
i = Integer(0)
assert i.is_zero
assert not i.is_positive
assert not i.is_negative
assert not i.is_nonzero
assert i.is_nonpositive
assert i.is_nonnegative
assert not i.is_complex
i = Integer(1) + I
assert not i.is_zero
assert not i.is_positive
assert not i.is_negative
assert not i.is_nonzero
assert not i.is_nonpositive
assert not i.is_nonnegative
assert i.is_complex
assert pi.is_number
def test_perfect_power():
assert perfect_power(1) == True
assert perfect_power(7) == False
assert perfect_power(8) == True
assert perfect_power(9) == True
assert perfect_power(10) == False
assert perfect_power(1024) == True
assert perfect_power(1025) == False
assert perfect_power(6**7) == True
assert perfect_power(-27) == True
assert perfect_power(-64) == True
assert perfect_power(-32) == True
def test_perfect_square():
assert is_square(7) == False
assert is_square(8) == False
assert is_square(9) == True
assert is_square(10) == False
assert perfect_power(49) == True
assert perfect_power(50) == False
def test_integer_nthroot():
assert integer_nthroot(1, 2) == (1, True)
assert integer_nthroot(1, 5) == (1, True)
assert integer_nthroot(2, 1) == (2, True)
assert integer_nthroot(2, 2) == (1, False)
assert integer_nthroot(2, 5) == (1, False)
assert integer_nthroot(4, 2) == (2, True)
assert integer_nthroot(123**25, 25) == (123, True)
assert integer_nthroot(123**25 + 1, 25) == (123, False)
assert integer_nthroot(123**25 - 1, 25) == (122, False)
assert integer_nthroot(1, 1) == (1, True)
assert integer_nthroot(0, 1) == (0, True)
assert integer_nthroot(0, 3) == (0, True)
assert integer_nthroot(10000, 1) == (10000, True)
assert integer_nthroot(4, 2) == (2, True)
assert integer_nthroot(16, 2) == (4, True)
assert integer_nthroot(26, 2) == (5, False)
assert integer_nthroot(1234567**7, 7) == (1234567, True)
assert integer_nthroot(1234567**7 + 1, 7) == (1234567, False)
assert integer_nthroot(1234567**7 - 1, 7) == (1234566, False)
b = 25**1000
assert integer_nthroot(b, 1000) == (25, True)
assert integer_nthroot(b + 1, 1000) == (25, False)
assert integer_nthroot(b - 1, 1000) == (24, False)
c = 10**400
c2 = c**2
assert integer_nthroot(c2, 2) == (c, True)
assert integer_nthroot(c2 + 1, 2) == (c, False)
assert integer_nthroot(c2 - 1, 2) == (c - 1, False)
def test_is_zero():
assert Symbol('x').is_zero is None
def test_is_positive():
assert Rational(1, 2).is_positive
assert not Rational(-2, 3).is_positive
assert Symbol('x').is_positive is None
def test_is_negative():
assert not Rational(1, 2).is_negative
assert Rational(-2, 3).is_negative
assert Symbol('x').is_negative is None
def test_is_nonpositive():
assert not Rational(1, 2).is_nonpositive
assert Rational(-2, 3).is_nonpositive
assert Symbol('x').is_nonpositive is None
def test_is_nonnegative():
assert Rational(1, 2).is_nonnegative
assert not Rational(-2, 3).is_nonnegative
assert Symbol('x').is_nonnegative is None
def test_is_real():
assert Rational(1, 2).is_real
assert Symbol('x').is_real is None
| mit |
pozetroninc/micropython | tests/basics/int_big_and3.py | 61 | 2185 | # test - +
print( -97989513389222316022151446562729620153292831887555425160965597396
& 23716683549865351578586448630079789776107310103486834795830390982)
print( -53817081128841898634258263553430908085326601592682411889506742059
& 37042558948907407488299113387826240429667200950043601129661240876)
print( -26167512042587370698808974207700979337713004510730289760097826496
& 98456276326770292376138852628141531773120376436197321310863125849)
print( -21085380307304977067262070503651827226504797285572981274069266136
& 15928222825828272388778130358888206480162413547887287646273147570)
print( -40827393422334167255488276244226338235131323044408420081160772273
& 63815443187857978125545555033672525708399848575557475462799643340)
print( -5181013159871685724135944379095645225188360725917119022722046448
& 59734090450462480092384049604830976376887859531148103803093112493)
print( -283894311
& 86526825689187217371383854139783231460931720533100376593106943447)
print( -40019818573920230246248826511203818792007462193311949166285967147
& 9487909752)
# test + -
print( 97989513389222316022151446562729620153292831887555425160965597396
& -23716683549865351578586448630079789776107310103486834795830390982)
print( 53817081128841898634258263553430908085326601592682411889506742059
& -37042558948907407488299113387826240429667200950043601129661240876)
print( 26167512042587370698808974207700979337713004510730289760097826496
& -98456276326770292376138852628141531773120376436197321310863125849)
print( 21085380307304977067262070503651827226504797285572981274069266136
& -15928222825828272388778130358888206480162413547887287646273147570)
print( 40827393422334167255488276244226338235131323044408420081160772273
& -63815443187857978125545555033672525708399848575557475462799643340)
print( 5181013159871685724135944379095645225188360725917119022722046448
& -59734090450462480092384049604830976376887859531148103803093112493)
print( 283894311
& -86526825689187217371383854139783231460931720533100376593106943447)
print( 40019818573920230246248826511203818792007462193311949166285967147
& -9487909752)
| mit |
kaysoky/mesos | src/python/lib/mesos/http.py | 9 | 14070 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments
"""
Classes and functions for interacting with the Mesos HTTP RESTful API
"""
from urllib.parse import urlparse
from copy import deepcopy
import requests
import tenacity
import ujson
from mesos.exceptions import MesosException
from mesos.exceptions import MesosHTTPException
from mesos.exceptions import MesosAuthenticationException
from mesos.exceptions import MesosAuthorizationException
from mesos.exceptions import MesosBadRequestException
from mesos.exceptions import MesosInternalServerErrorException
from mesos.exceptions import MesosServiceUnavailableException
from mesos.exceptions import MesosUnprocessableException
METHOD_HEAD = 'HEAD'
METHOD_GET = 'GET'
METHOD_POST = 'POST'
METHOD_PUT = 'PUT'
METHOD_PATCH = 'PATCH'
METHOD_DELETE = 'DELETE'
METHODS = {
METHOD_HEAD,
METHOD_GET,
METHOD_POST,
METHOD_PUT,
METHOD_PATCH,
METHOD_DELETE}
REQUEST_JSON_HEADERS = {'Accept': 'application/json'}
REQUEST_GZIP_HEADERS = {'Accept-Encoding': 'gzip'}
BASE_HEADERS = {}
DEFAULT_TIMEOUT = 30
DEFAULT_AUTH = None
DEFAULT_USE_GZIP_ENCODING = True
DEFAULT_MAX_ATTEMPTS = 3
def simple_urljoin(base, other):
"""
Do a join by rstrip'ing / from base_url and lstrp'ing / from other.
This is needed since urlparse.urljoin tries to be too smart
and strips the subpath from base_url.
:type base: str
:type other: str
:rtype: str
"""
return '/'.join([base.rstrip('/'), other.lstrip('/')])
class Resource():
"""
Encapsulate the context for an HTTP resource.
Context for an HTTP resource may include properties such as the URL,
default timeout for connections, default headers to be included in each
request, and auth.
"""
SUCCESS_CODES = frozenset(range(200, 300))
ERROR_CODE_MAP = {c.STATUS_CODE: c for c in (
MesosBadRequestException,
MesosAuthenticationException,
MesosAuthorizationException,
MesosUnprocessableException,
MesosInternalServerErrorException,
MesosServiceUnavailableException)}
def __init__(self,
url,
default_headers=None,
default_timeout=DEFAULT_TIMEOUT,
default_auth=DEFAULT_AUTH,
default_use_gzip_encoding=DEFAULT_USE_GZIP_ENCODING,
default_max_attempts=DEFAULT_MAX_ATTEMPTS):
"""
:param url: URL identifying the resource
:type url: str
:param default_headers: headers to attache to requests
:type default_headers: dict[str, str]
:param default_timeout: timeout in seconds
:type default_timeout: float
:param default_auth: auth scheme
:type default_auth: requests.auth.AuthBase
:param default_use_gzip_encoding: use gzip encoding by default or not
:type default_use_gzip_encoding: bool
:param default_max_attempts: max number of attempts when retrying
:type default_max_attempts: int
"""
self.url = urlparse(url)
self.default_timeout = default_timeout
self.default_auth = default_auth
self.default_use_gzip_encoding = default_use_gzip_encoding
self.default_max_attempts = default_max_attempts
if default_headers is None:
self._default_headers = {}
else:
self._default_headers = deepcopy(default_headers)
def default_headers(self):
"""
Return a copy of the default headers.
:rtype: dict[str, str]
"""
return deepcopy(self._default_headers)
def subresource(self, subpath):
"""
Return a new Resource object at a subpath of the current resource's URL.
:param subpath: subpath of the resource
:type subpath: str
:return: Resource at subpath
:rtype: Resource
"""
return self.__class__(
url=simple_urljoin(self.url.geturl(), subpath),
default_headers=self.default_headers(),
default_timeout=self.default_timeout,
default_auth=self.default_auth,
default_use_gzip_encoding=self.default_use_gzip_encoding,
default_max_attempts=self.default_max_attempts,
)
def _request(self,
method,
additional_headers=None,
timeout=None,
auth=None,
use_gzip_encoding=None,
params=None,
**kwargs):
"""
Make an HTTP request with given method and an optional timeout.
:param method: request method
:type method: str
:param additional_headers: additional headers to include in the request
:type additional_headers: dict[str, str]
:param timeout: timeout in seconds
:type timeout: float
:param auth: auth scheme for request
:type auth: requests.auth.AuthBase
:param use_gzip_encoding: boolean indicating whether to
pass gzip encoding in the request
headers or not
:type use_gzip_encoding: boolean
:param params: additional params to include in the request
:type params: str | dict[str, T]
:param kwargs: additional arguments to pass to requests.request
:type kwargs: dict[str, T]
:return: HTTP response
:rtype: requests.Response
"""
headers = self.default_headers()
if additional_headers is not None:
headers.update(additional_headers)
if timeout is None:
timeout = self.default_timeout
if auth is None:
auth = self.default_auth
if use_gzip_encoding is None:
use_gzip_encoding = self.default_use_gzip_encoding
if headers and use_gzip_encoding:
headers.update(REQUEST_GZIP_HEADERS)
kwargs.update(dict(
url=self.url.geturl(),
method=method,
headers=headers,
timeout=timeout,
auth=auth,
params=params,
))
# Here we call request without a try..except block since all exceptions
# raised here will be used to determine whether or not a retry is
# necessary in self.request.
response = requests.request(**kwargs)
if response.status_code in self.SUCCESS_CODES:
return response
known_exception = self.ERROR_CODE_MAP.get(response.status_code)
if known_exception:
raise known_exception(response)
raise MesosHTTPException(response)
def request(self,
method,
additional_headers=None,
retry=True,
timeout=None,
auth=None,
use_gzip_encoding=None,
params=None,
max_attempts=None,
**kwargs):
"""
Make an HTTP request by calling self._request with backoff retry.
:param method: request method
:type method: str
:param additional_headers: additional headers to include in the request
:type additional_headers: dict[str, str]
:param retry: boolean indicating whether to retry if the request fails
:type retry: boolean
:param timeout: timeout in seconds, overrides default_timeout_secs
:type timeout: float
:param timeout: timeout in seconds
:type timeout: float
:param auth: auth scheme for the request
:type auth: requests.auth.AuthBase
:param use_gzip_encoding: boolean indicating whether to pass gzip
encoding in the request headers or not
:type use_gzip_encoding: boolean | None
:param params: additional params to include in the request
:type params: str | dict[str, T] | None
:param max_attempts: maximum number of attempts to try for any request
:type max_attempts: int
:param kwargs: additional arguments to pass to requests.request
:type kwargs: dict[str, T]
:return: HTTP response
:rtype: requests.Response
"""
request = self._request
if retry:
if max_attempts is None:
max_attempts = self.default_max_attempts
# We retry only when it makes sense: either due to a network
# partition (e.g. connection errors) or if the request failed
# due to a server error such as 500s, timeouts, and so on.
request = tenacity.retry(
stop=tenacity.stop_after_attempt(max_attempts),
wait=tenacity.wait_exponential(),
retry=tenacity.retry_if_exception_type((
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
MesosServiceUnavailableException,
MesosInternalServerErrorException,
)),
reraise=True,
)(request)
try:
return request(
method=method,
additional_headers=additional_headers,
timeout=timeout,
auth=auth,
use_gzip_encoding=use_gzip_encoding,
params=params,
**kwargs
)
# If the request itself failed, an exception subclassed from
# RequestException will be raised. Catch this and reraise as
# MesosException since we want the caller to be able to catch
# and handle this.
except requests.exceptions.RequestException as err:
raise MesosException('Request failed', err)
def request_json(self,
method,
timeout=None,
auth=None,
payload=None,
decoder=None,
params=None,
**kwargs):
"""
Make an HTTP request and deserialize the response as JSON. Optionally
decode the deserialized json dict into a decoded object.
:param method: request method
:type method: str
:param timeout: timeout in seconds
:type timeout: float
:param auth: auth scheme for the request
:type auth: requests.auth.AuthBase
:param payload: json payload in the request
:type payload: dict[str, T] | str
:param decoder: decoder for json response
:type decoder: (dict) -> T
:param params: additional params to include in the request
:type params: str | dict[str, T]
:param kwargs: additional arguments to pass to requests.request
:type kwargs: dict[str, T]
:return: JSON response
:rtype: dict[str, T]
"""
resp = self.request(method=method,
timeout=timeout,
auth=auth,
json=payload,
additional_headers=REQUEST_JSON_HEADERS,
params=params,
**kwargs)
try:
json_dict = ujson.loads(resp.text)
except ValueError as exception:
raise MesosException(
'could not load JSON from "{data}"'.format(data=resp.text),
exception)
if decoder is not None:
return decoder(json_dict)
return json_dict
def get_json(self,
timeout=None,
auth=None,
decoder=None,
params=None):
"""
Send a GET request.
:param timeout: timeout in seconds
:type timeout: float
:param auth: auth scheme for the request
:type auth: requests.auth.AuthBase
:param decoder: decoder for json response
:type decoder: (dict) -> T
:param params: additional params to include in the request
:type params: str | dict[str, U]
:rtype: dict[str, U]
"""
return self.request_json(METHOD_GET,
timeout=timeout,
auth=auth,
decoder=decoder,
params=params)
def post_json(self,
timeout=None,
auth=None,
payload=None,
decoder=None,
params=None):
"""
Sends a POST request.
:param timeout: timeout in seconds
:type timeout: float
:param auth: auth scheme for the request
:type auth: requests.auth.AuthBase
:param payload: post data
:type payload: dict[str, T] | str
:param decoder: decoder for json response
:type decoder: (dict) -> T
:param params: additional params to include in the request
:type params: str | dict[str, T]
:rtype: dict[str, T]
"""
return self.request_json(METHOD_POST,
timeout=timeout,
auth=auth,
payload=payload,
decoder=decoder,
params=params)
| apache-2.0 |
catapult-project/catapult | third_party/gsutil/third_party/apitools/apitools/gen/test_utils.py | 7 | 1450 | #
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various utilities used in tests."""
import contextlib
import os
import shutil
import sys
import tempfile
import six
import unittest2
SkipOnWindows = unittest2.skipIf(
os.name == 'nt', 'Does not run on windows')
@contextlib.contextmanager
def TempDir(change_to=False):
if change_to:
original_dir = os.getcwd()
path = tempfile.mkdtemp()
try:
if change_to:
os.chdir(path)
yield path
finally:
if change_to:
os.chdir(original_dir)
shutil.rmtree(path)
@contextlib.contextmanager
def CaptureOutput():
new_stdout, new_stderr = six.StringIO(), six.StringIO()
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_stdout, new_stderr
yield new_stdout, new_stderr
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
| bsd-3-clause |
ome/ome-files-py | ome_files/metadata.py | 2 | 4333 | # #%L
# OME-FILES Python library for image IO.
# Copyright (c) 2017 University of Dundee
# %%
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# #L%
import sys
import xml.dom.minidom as minidom
TEXT_NODE = minidom.Node.TEXT_NODE
def get_text(node):
c = node.firstChild
if c is None or c.nodeType != TEXT_NODE:
raise ValueError("node %r does not contain text" % (node,))
return c.data
def get_children(node):
return [_ for _ in node.childNodes if _.nodeType != TEXT_NODE]
class NodeWrapper(object):
FIELDS = []
def __setattr__(self, name, value):
if name not in self.FIELDS:
raise AttributeError("attribute %s not allowed in %s" %
(name, self.__class__.__name__))
super(NodeWrapper, self).__setattr__(name, value)
@classmethod
def fromnode(cls, node):
if node.nodeName != cls.__name__:
raise ValueError("unexpected node: %s" % node.nodeName)
w = cls()
for name, value in node.attributes.items():
setattr(w, name, value)
super(NodeWrapper, w).__setattr__("_children", get_children(node))
return w
class Annotation(NodeWrapper):
FIELDS = ["Annotator", "ID", "Namespace", "Description", "AnnotationRef"]
@classmethod
def fromnode(cls, node):
ann = super(Annotation, cls).fromnode(node)
ann.AnnotationRef = []
for c in ann._children:
if c.nodeName == "Description":
ann.Description = get_text(c)
if c.nodeName == "AnnotationRef":
ann.AnnotationRef.append(c.attributes.get("ID").value)
return ann
class MapAnnotation(Annotation):
FIELDS = Annotation.FIELDS + ["Value"]
@classmethod
def fromnode(cls, node):
ann = super(MapAnnotation, cls).fromnode(node)
ann.Value = {}
for c in ann._children:
if c.nodeName == "Value":
for m in get_children(c):
try:
k = m.attributes.get("K").value
except AttributeError:
pass # The "K" attribute is optional
else:
ann.Value.setdefault(k, []).append(get_text(m))
return ann
class XMLAnnotation(Annotation):
FIELDS = Annotation.FIELDS + ["Value"]
@classmethod
def fromnode(cls, node):
ann = super(XMLAnnotation, cls).fromnode(node)
for c in ann._children:
if c.nodeName == "Value":
ann.Value = c
return ann
class OMEXMLMetadata(object):
def __init__(self, xml_string):
if sys.version_info < (3,):
xml_string = xml_string.encode("utf-8")
self.doc = minidom.parseString(xml_string)
def get_map_annotations(self):
return [MapAnnotation.fromnode(_) for _ in
self.doc.getElementsByTagName("MapAnnotation")]
def get_xml_annotations(self):
return [XMLAnnotation.fromnode(_) for _ in
self.doc.getElementsByTagName("XMLAnnotation")]
| bsd-2-clause |
edge/three.js | utils/exporters/blender/addons/io_three/exporter/__init__.py | 178 | 2661 | import os
import sys
import traceback
from .. import constants, logger, exceptions, dialogs
from . import scene, geometry, api, base_classes
def _error_handler(func):
def inner(filepath, options, *args, **kwargs):
level = options.get(constants.LOGGING, constants.DEBUG)
version = options.get('addon_version')
logger.init('io_three.export.log', level=level)
if version is not None:
logger.debug("Addon Version %s", version)
api.init()
try:
func(filepath, options, *args, **kwargs)
except:
info = sys.exc_info()
trace = traceback.format_exception(
info[0], info[1], info[2].tb_next)
trace = ''.join(trace)
logger.error(trace)
print('Error recorded to %s' % logger.LOG_FILE)
raise
else:
print('Log: %s' % logger.LOG_FILE)
return inner
@_error_handler
def export_scene(filepath, options):
selected = []
# during scene exports unselect everything. this is needed for
# applying modifiers, if it is necessary
# record the selected nodes so that selection is restored later
for obj in api.selected_objects():
api.object.unselect(obj)
selected.append(obj)
active = api.active_object()
try:
scene_ = scene.Scene(filepath, options=options)
scene_.parse()
scene_.write()
except:
_restore_selection(selected, active)
raise
_restore_selection(selected, active)
@_error_handler
def export_geometry(filepath, options, node=None):
msg = ""
exception = None
if node is None:
node = api.active_object()
if node is None:
msg = "Nothing selected"
logger.error(msg)
exception = exceptions.SelectionError
if node.type != 'MESH':
msg = "%s is not a valid mesh object" % node.name
logger.error(msg)
exception = exceptions.GeometryError
if exception is not None:
if api.batch_mode():
raise exception(msg)
else:
dialogs.error(msg)
return
mesh = api.object.mesh(node, options)
parent = base_classes.BaseScene(filepath, options)
geo = geometry.Geometry(mesh, parent)
geo.parse()
geo.write()
if not options.get(constants.EMBED_ANIMATION, True):
geo.write_animation(os.path.dirname(filepath))
def _restore_selection(objects, active):
for obj in objects:
api.object.select(obj)
api.set_active_object(active)
| mit |
jtl999/certbot | certbot-nginx/certbot_nginx/tests/parser_test.py | 2 | 16722 | """Tests for certbot_nginx.parser."""
import glob
import os
import re
import shutil
import unittest
from certbot import errors
from certbot_nginx import nginxparser
from certbot_nginx import obj
from certbot_nginx import parser
from certbot_nginx.tests import util
class NginxParserTest(util.NginxTest):
"""Nginx Parser Test."""
def setUp(self):
super(NginxParserTest, self).setUp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
def test_root_normalized(self):
path = os.path.join(self.temp_dir, "etc_nginx/////"
"ubuntu_nginx/../../etc_nginx")
nparser = parser.NginxParser(path, None)
self.assertEqual(nparser.root, self.config_path)
def test_root_absolute(self):
nparser = parser.NginxParser(os.path.relpath(self.config_path), None)
self.assertEqual(nparser.root, self.config_path)
def test_root_no_trailing_slash(self):
nparser = parser.NginxParser(self.config_path + os.path.sep, None)
self.assertEqual(nparser.root, self.config_path)
def test_load(self):
"""Test recursive conf file parsing.
"""
nparser = parser.NginxParser(self.config_path, self.ssl_options)
nparser.load()
self.assertEqual(set([nparser.abs_path(x) for x in
['foo.conf', 'nginx.conf', 'server.conf',
'sites-enabled/default',
'sites-enabled/example.com',
'sites-enabled/migration.com']]),
set(nparser.parsed.keys()))
self.assertEqual([['server_name', 'somename alias another.alias']],
nparser.parsed[nparser.abs_path('server.conf')])
self.assertEqual([[['server'], [['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', '.example.com'],
['server_name', 'example.*']]]],
nparser.parsed[nparser.abs_path(
'sites-enabled/example.com')])
def test_abs_path(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
self.assertEqual('/etc/nginx/*', nparser.abs_path('/etc/nginx/*'))
self.assertEqual(os.path.join(self.config_path, 'foo/bar/'),
nparser.abs_path('foo/bar/'))
def test_filedump(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
nparser.filedump('test', lazy=False)
# pylint: disable=protected-access
parsed = nparser._parse_files(nparser.abs_path(
'sites-enabled/example.com.test'))
self.assertEqual(3, len(glob.glob(nparser.abs_path('*.test'))))
self.assertEqual(3, len(
glob.glob(nparser.abs_path('sites-enabled/*.test'))))
self.assertEqual([[['server'], [['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', '.example.com'],
['server_name', 'example.*']]]],
parsed[0])
def test__do_for_subarray(self):
# pylint: disable=protected-access
mylists = [([[2], [3], [2]], [[0], [2]]),
([[2], [3], [4]], [[0]]),
([[4], [3], [2]], [[2]]),
([], []),
(2, []),
([[[2], [3], [2]], [[2], [3], [2]]],
[[0, 0], [0, 2], [1, 0], [1, 2]]),
([[[0], [3], [2]], [[2], [3], [2]]], [[0, 2], [1, 0], [1, 2]]),
([[[0], [3], [4]], [[2], [3], [2]]], [[1, 0], [1, 2]]),
([[[0], [3], [4]], [[5], [3], [2]]], [[1, 2]]),
([[[0], [3], [4]], [[5], [3], [0]]], [])]
for mylist, result in mylists:
paths = []
parser._do_for_subarray(mylist,
lambda x: isinstance(x, list) and
len(x) >= 1 and
x[0] == 2,
lambda x, y, pts=paths: pts.append(y))
self.assertEqual(paths, result)
def test_get_vhosts(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
vhosts = nparser.get_vhosts()
vhost1 = obj.VirtualHost(nparser.abs_path('nginx.conf'),
[obj.Addr('', '8080', False, False)],
False, True,
set(['localhost',
r'~^(www\.)?(example|bar)\.']),
[], [9, 1, 9])
vhost2 = obj.VirtualHost(nparser.abs_path('nginx.conf'),
[obj.Addr('somename', '8080', False, False),
obj.Addr('', '8000', False, False)],
False, True,
set(['somename', 'another.alias', 'alias']),
[], [9, 1, 12])
vhost3 = obj.VirtualHost(nparser.abs_path('sites-enabled/example.com'),
[obj.Addr('69.50.225.155', '9000',
False, False),
obj.Addr('127.0.0.1', '', False, False)],
False, True,
set(['.example.com', 'example.*']), [], [0])
vhost4 = obj.VirtualHost(nparser.abs_path('sites-enabled/default'),
[obj.Addr('myhost', '', False, True)],
False, True, set(['www.example.org']),
[], [0])
vhost5 = obj.VirtualHost(nparser.abs_path('foo.conf'),
[obj.Addr('*', '80', True, True)],
True, True, set(['*.www.foo.com',
'*.www.example.com']),
[], [2, 1, 0])
self.assertEqual(7, len(vhosts))
example_com = [x for x in vhosts if 'example.com' in x.filep][0]
self.assertEqual(vhost3, example_com)
default = [x for x in vhosts if 'default' in x.filep][0]
self.assertEqual(vhost4, default)
fooconf = [x for x in vhosts if 'foo.conf' in x.filep][0]
self.assertEqual(vhost5, fooconf)
localhost = [x for x in vhosts if 'localhost' in x.names][0]
self.assertEqual(vhost1, localhost)
somename = [x for x in vhosts if 'somename' in x.names][0]
self.assertEqual(vhost2, somename)
def test_has_ssl_on_directive(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
mock_vhost = obj.VirtualHost(None, None, None, None, None,
[['listen', 'myhost default_server'],
['server_name', 'www.example.org'],
[['location', '/'], [['root', 'html'], ['index', 'index.html index.htm']]]
], None)
self.assertFalse(nparser.has_ssl_on_directive(mock_vhost))
mock_vhost.raw = [['listen', '*:80 default_server ssl'],
['server_name', '*.www.foo.com *.www.example.com'],
['root', '/home/ubuntu/sites/foo/']]
self.assertFalse(nparser.has_ssl_on_directive(mock_vhost))
mock_vhost.raw = [['listen', '80 ssl'],
['server_name', '*.www.foo.com *.www.example.com']]
self.assertFalse(nparser.has_ssl_on_directive(mock_vhost))
mock_vhost.raw = [['listen', '80'],
['ssl', 'on'],
['server_name', '*.www.foo.com *.www.example.com']]
self.assertTrue(nparser.has_ssl_on_directive(mock_vhost))
def test_add_server_directives(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
mock_vhost = obj.VirtualHost(nparser.abs_path('nginx.conf'),
None, None, None,
set(['localhost',
r'~^(www\.)?(example|bar)\.']),
None, [9, 1, 9])
nparser.add_server_directives(mock_vhost,
[['foo', 'bar'], ['\n ', 'ssl_certificate', ' ',
'/etc/ssl/cert.pem']],
replace=False)
ssl_re = re.compile(r'\n\s+ssl_certificate /etc/ssl/cert.pem')
dump = nginxparser.dumps(nparser.parsed[nparser.abs_path('nginx.conf')])
self.assertEqual(1, len(re.findall(ssl_re, dump)))
example_com = nparser.abs_path('sites-enabled/example.com')
names = set(['.example.com', 'example.*'])
mock_vhost.filep = example_com
mock_vhost.names = names
mock_vhost.path = [0]
nparser.add_server_directives(mock_vhost,
[['foo', 'bar'], ['ssl_certificate',
'/etc/ssl/cert2.pem']],
replace=False)
nparser.add_server_directives(mock_vhost, [['foo', 'bar']],
replace=False)
from certbot_nginx.parser import COMMENT
self.assertEqual(nparser.parsed[example_com],
[[['server'], [['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', '.example.com'],
['server_name', 'example.*'],
['foo', 'bar'],
['#', COMMENT],
['ssl_certificate', '/etc/ssl/cert2.pem'],
['#', COMMENT], [], []
]]])
server_conf = nparser.abs_path('server.conf')
names = set(['alias', 'another.alias', 'somename'])
mock_vhost.filep = server_conf
mock_vhost.names = names
mock_vhost.path = []
self.assertRaises(errors.MisconfigurationError,
nparser.add_server_directives,
mock_vhost,
[['foo', 'bar'],
['ssl_certificate', '/etc/ssl/cert2.pem']],
replace=False)
def test_replace_server_directives(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
target = set(['.example.com', 'example.*'])
filep = nparser.abs_path('sites-enabled/example.com')
mock_vhost = obj.VirtualHost(filep, None, None, None, target, None, [0])
nparser.add_server_directives(
mock_vhost, [['server_name', 'foobar.com']], replace=True)
from certbot_nginx.parser import COMMENT
self.assertEqual(
nparser.parsed[filep],
[[['server'], [['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', 'foobar.com'], ['#', COMMENT],
['server_name', 'example.*'], []
]]])
mock_vhost.names = set(['foobar.com', 'example.*'])
self.assertRaises(errors.MisconfigurationError,
nparser.add_server_directives,
mock_vhost,
[['ssl_certificate', 'cert.pem']],
replace=True)
def test_get_best_match(self):
target_name = 'www.eff.org'
names = [set(['www.eff.org', 'irrelevant.long.name.eff.org', '*.org']),
set(['eff.org', 'ww2.eff.org', 'test.www.eff.org']),
set(['*.eff.org', '.www.eff.org']),
set(['.eff.org', '*.org']),
set(['www.eff.', 'www.eff.*', '*.www.eff.org']),
set(['example.com', r'~^(www\.)?(eff.+)', '*.eff.*']),
set(['*', r'~^(www\.)?(eff.+)']),
set(['www.*', r'~^(www\.)?(eff.+)', '.test.eff.org']),
set(['*.org', r'*.eff.org', 'www.eff.*']),
set(['*.www.eff.org', 'www.*']),
set(['*.org']),
set([]),
set(['example.com'])]
winners = [('exact', 'www.eff.org'),
(None, None),
('exact', '.www.eff.org'),
('wildcard_start', '.eff.org'),
('wildcard_end', 'www.eff.*'),
('regex', r'~^(www\.)?(eff.+)'),
('wildcard_start', '*'),
('wildcard_end', 'www.*'),
('wildcard_start', '*.eff.org'),
('wildcard_end', 'www.*'),
('wildcard_start', '*.org'),
(None, None),
(None, None)]
for i, winner in enumerate(winners):
self.assertEqual(winner,
parser.get_best_match(target_name, names[i]))
def test_comment_directive(self):
# pylint: disable=protected-access
block = nginxparser.UnspacedList([
["\n", "a", " ", "b", "\n"],
["c", " ", "d"],
["\n", "e", " ", "f"]])
from certbot_nginx.parser import _comment_directive, COMMENT_BLOCK
_comment_directive(block, 1)
_comment_directive(block, 0)
self.assertEqual(block.spaced, [
["\n", "a", " ", "b", "\n"],
COMMENT_BLOCK,
"\n",
["c", " ", "d"],
COMMENT_BLOCK,
["\n", "e", " ", "f"]])
def test_get_all_certs_keys(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
filep = nparser.abs_path('sites-enabled/example.com')
mock_vhost = obj.VirtualHost(filep,
None, None, None,
set(['.example.com', 'example.*']),
None, [0])
nparser.add_server_directives(mock_vhost,
[['ssl_certificate', 'foo.pem'],
['ssl_certificate_key', 'bar.key'],
['listen', '443 ssl']],
replace=False)
c_k = nparser.get_all_certs_keys()
migration_file = nparser.abs_path('sites-enabled/migration.com')
self.assertEqual(set([('foo.pem', 'bar.key', filep),
('cert.pem', 'cert.key', migration_file)
]), c_k)
def test_parse_server_ssl(self):
server = parser.parse_server([
['listen', '443']
])
self.assertFalse(server['ssl'])
server = parser.parse_server([
['listen', '443 ssl']
])
self.assertTrue(server['ssl'])
server = parser.parse_server([
['listen', '443'], ['ssl', 'off']
])
self.assertFalse(server['ssl'])
server = parser.parse_server([
['listen', '443'], ['ssl', 'on']
])
self.assertTrue(server['ssl'])
def test_ssl_options_should_be_parsed_ssl_directives(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
self.assertEqual(nginxparser.UnspacedList(nparser.loc["ssl_options"]),
[['ssl_session_cache', 'shared:le_nginx_SSL:1m'],
['ssl_session_timeout', '1440m'],
['ssl_protocols', 'TLSv1 TLSv1.1 TLSv1.2'],
['ssl_prefer_server_ciphers', 'on'],
['ssl_ciphers', '"ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-ECDSA-'+
'AES256-GCM-SHA384 ECDHE-ECDSA-AES128-SHA ECDHE-ECDSA-AES256'+
'-SHA ECDHE-ECDSA-AES128-SHA256 ECDHE-ECDSA-AES256-SHA384'+
' ECDHE-RSA-AES128-GCM-SHA256 ECDHE-RSA-AES256-GCM-SHA384'+
' ECDHE-RSA-AES128-SHA ECDHE-RSA-AES128-SHA256 ECDHE-RSA-'+
'AES256-SHA384 DHE-RSA-AES128-GCM-SHA256 DHE-RSA-AES256-GCM'+
'-SHA384 DHE-RSA-AES128-SHA DHE-RSA-AES256-SHA DHE-RSA-'+
'AES128-SHA256 DHE-RSA-AES256-SHA256 EDH-RSA-DES-CBC3-SHA"']
])
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
zemanel/ansible | v2/test/executor/test_playbook_iterator.py | 10 | 2719 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.playbook_iterator import PlaybookIterator
from ansible.playbook import Playbook
from test.mock.loader import DictDataLoader
class TestPlaybookIterator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_playbook_iterator(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
roles:
- test_role
pre_tasks:
- debug: msg="this is a pre_task"
tasks:
- debug: msg="this is a regular task"
post_tasks:
- debug: msg="this is a post_task"
""",
'/etc/ansible/roles/test_role/tasks/main.yml': """
- debug: msg="this is a role task"
""",
})
p = Playbook.load('test_play.yml', loader=fake_loader)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
itr = PlaybookIterator(inventory, None, p)
task = itr.get_next_task_for_host(hosts[0])
print(task)
self.assertIsNotNone(task)
task = itr.get_next_task_for_host(hosts[0])
print(task)
self.assertIsNotNone(task)
task = itr.get_next_task_for_host(hosts[0])
print(task)
self.assertIsNotNone(task)
task = itr.get_next_task_for_host(hosts[0])
print(task)
self.assertIsNotNone(task)
task = itr.get_next_task_for_host(hosts[0])
print(task)
self.assertIsNone(task)
| gpl-3.0 |
eddyb/servo | tests/wpt/mozilla/tests/webgl/conformance-2.0.0/deqp/build.py | 51 | 11811 | #!/usr/bin/env python
#
# Copyright (c) 2015 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and/or associated documentation files (the
# "Materials"), to deal in the Materials without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Materials, and to
# permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
#
# Author: Mobica LTD
import sys
import re
import os
import subprocess
import threading
from sys import stdout, stderr, argv
# Running this script
# 1. To rebuild all dependencies:
# $ build.py deps
# 2. To build all targets without rebuilding dependencies
# $ build.py build
# 3. To build a single target without rebuilding dependencies
# $ build.py build <target>
# See the table below for available targets
# 4. To rebuild all dependencies and targets
# $ build.py
# 5. To build dependencies for a single target
# $ build.py deps <target>
# 6. To build dependencies for and compile a single target
# $ build.py <target>
# List of targets (short target name, closure namespace)
targets = {
'textureformat': 'functional.gles3.es3fTextureFormatTests',
'fboCompletenessTests': 'functional.gles3.es3fFboCompletenessTests',
'fbomultisampletests': 'functional.gles3.es3fFboMultisampleTests',
'fbostencilbuffertests': 'functional.gles3.es3fFboStencilbufferTests',
'fragmentoutput': 'functional.gles3.es3fFragmentOutputTests',
'framebufferblittests': 'functional.gles3.es3fFramebufferBlitTests',
'instancedrenderingtests': 'functional.gles3.es3fInstancedRenderingTests',
'pixelBufferObjectTest': 'functional.gles3.es3fPixelBufferObjectTest',
'primitiverestarttests': 'functional.gles3.es3fPrimitiveRestartTests',
'samplerobjecttests': 'functional.gles3.es3fSamplerObjectTests',
'transformFeedbackTests': 'functional.gles3.es3fTransformFeedbackTests',
'uniformapi': 'functional.gles3.es3fUniformApiTests',
'uniformbuffers': 'functional.gles3.es3fUniformBlockTests',
'vertexarrays': 'functional.gles3.es3fVertexArrayTests',
'shaderlibrary': 'modules.shared.glsShaderLibrary',
'negativebuffer': 'functional.gles3.es3fNegativeBufferApiTests',
'sglrReferenceContextTest': 'framework.opengl.simplereference.sglrReferenceContextTest',
'lifetime': 'functional.gles3.es3fLifetimeTests',
'draw': 'functional.gles3.es3fDrawTests',
'attriblocation': 'functional.gles3.es3fAttribLocationTests',
'textureShadowTests': 'functional.gles3.es3fTextureShadowTests',
'texturewrap': 'functional.gles3.es3fTextureWrapTests',
'negativetextureapi': 'functional.gles3.es3fNegativeTextureApiTests',
'multisample': 'functional.gles3.es3fMultisampleTests',
'negativefragmentapi': 'functional.gles3.es3fNegativeFragmentApiTests',
'negativevertexarrayapi': 'functional.gles3.es3fNegativeVertexArrayApiTests',
'negativestateapi' : 'functional.gles3.es3fNegativeStateApiTests',
'negativeshaderapi' : 'functional.gles3.es3fNegativeShaderApiTests',
'rasterizerdiscard' : 'functional.gles3.es3fRasterizerDiscardTests',
'buffercopy' : 'functional.gles3.es3fBufferCopyTests',
'shaderindexing' : 'functional.gles3.es3fShaderIndexingTests',
'shaderloop' : 'functional.gles3.es3fShaderLoopTests',
'shaderstruct' : 'functional.gles3.es3fShaderStructTests',
'shaderswitch' : 'functional.gles3.es3fShaderSwitchTests',
'fborender' : 'functional.gles3.es3fFboRenderTest',
'shaderderivate' : 'functional.gles3.es3fShaderDerivateTests',
'builtinprecision' : 'functional.gles3.es3fBuiltinPrecisionTests',
'shaderbuiltinvar' : 'functional.gles3.es3fShaderBuiltinVarTests',
'texturefiltering' : 'functional.gles3.es3fTextureFilteringTests',
'fbocolor' : 'functional.gles3.es3fFboColorbufferTests',
'fragdepth' : 'functional.gles3.es3fFragDepthTests',
'shaderop' : 'functional.gles3.es3fShaderOperatorTests',
'vao' : 'functional.gles3.es3fVertexArrayObjectTests',
'clip' : 'functional.gles3.es3fClippingTests',
'inv' : 'functional.gles3.es3fFboInvalidateTests',
'defvertattr' : 'functional.gles3.es3fDefaultVertexAttributeTests',
'occlusion' : 'functional.gles3.es3fOcclusionQueryTests',
'shaderapi' : 'functional.gles3.es3fShaderApiTests',
'shaderpackingfunction' : 'functional.gles3.es3fShaderPackingFunctionTests',
'shadercommonfunction' : 'functional.gles3.es3fShaderCommonFunctionTests',
'shadermatrix' : 'functional.gles3.es3fShaderMatrixTest',
'shaderprecision' : 'functional.gles3.es3fShaderPrecisionTests',
'bstate': 'functional.gles3.es3fBooleanStateQuery',
'shaderstate': 'functional.gles3.es3fShaderStateQueryTests',
'fbostate' : 'functional.gles3.es3fFboStateQueryTests',
'rbostate' : 'functional.gles3.es3fRboStateQueryTests',
'bufferstate' : 'functional.gles3.es3fBufferObjectQueryTests',
'samplerstate' : 'functional.gles3.es3fSamplerStateQueryTests',
'texstate' : 'functional.gles3.es3fTextureStateQuery',
'internalformatstate' : 'functional.gles3.es3fInternalFormatQueryTests',
'texturespecification' : 'functional.gles3.es3fTextureSpecificationTests',
'shadertexturefunction' : 'functional.gles3.es3fShaderTextureFunctionTests',
'sync' : 'functional.gles3.es3fSyncTests',
'readpixel' : 'functional.gles3.es3fReadPixelTests',
'stringquery' : 'functional.gles3.es3fStringQueryTests',
'indexedstate' : 'functional.gles3.es3fIndexedStateQueryTests',
'integerstate' : 'functional.gles3.es3fIntegerStateQueryTests',
'floatstate' : 'functional.gles3.es3fFloatStateQueryTests'
}
total_errors = 0
total_warnings = 0
results = dict()
def dep_filename(target):
return target + '.dep'
def compiled_filename(target):
return target + '.compiled'
def write_to_file(outfile, cmdLine, redirect_stderr):
stderr = None
if redirect_stderr:
stderr = subprocess.STDOUT
with open(outfile, "w") as out_file:
proc = subprocess.Popen(cmdLine, shell=True, stdout=subprocess.PIPE, stderr=stderr)
while proc.poll() is None:
line = proc.stdout.readline()
out_file.write(line)
out_file.flush()
proc.wait()
def read_file(file_path):
#File exist
if not file_exists(file_path):
sys.exit(2)
fo = open(file_path)
lines = fo.read()
fo.close()
return lines
def file_exists(file_path):
if not os.path.exists:
print "The file " + file_name + " doesn't exists"
return False
return True
def build_deps(target, namespace):
cmdLine = 'python ../closure-library/closure/bin/build/closurebuilder.py --root=../closure-library --root=. --namespace=' + namespace
print cmdLine
write_to_file(dep_filename(target), cmdLine, False)
def build_all_deps():
for target in targets.keys():
build_deps(target, targets[target])
def buildDepsFile():
# the parameter "--root_with_prefix" is the relative path from the file goog/base.js to the root of the .js files we
# are working on.
cmdBuildDeps = 'python ../closure-library/closure/bin/build/depswriter.py --root_with_prefix=". ../../../deqp" > deqp-deps.js'
# Calls the python program that generates the google closure dependencies
# write_to_file('deqp-deps.js', cmdBuildDeps, False)
proc = subprocess.Popen(cmdBuildDeps, shell=True, stdout=subprocess.PIPE, stderr=None)
proc.wait()
def build_target(target, namespace):
global total_errors
global total_warnings
deps = read_file(dep_filename(target))
cmdLine = 'java -jar compiler.jar --compilation_level ADVANCED_OPTIMIZATIONS --warning_level VERBOSE --jscomp_warning undefinedVars --externs compiler_additional_extern.js'
for dep in deps.split('\n'):
dep = dep.strip()
if len(dep) > 0:
cmdLine += ' --js ' + dep
cmdLine += ' --closure_entry_point=' + namespace
print cmdLine
filename = compiled_filename(target)
write_to_file(filename, cmdLine, True)
compiled = read_file(filename)
result = re.search(r'(\d*)\s*error\(s\),\s*(\d*)\s*warning\(s\)', compiled)
errors = 0
warnings = 0
if result:
print target + ': ' + result.group(0)
errors = int(result.group(1))
warnings = int(result.group(2))
total_errors += errors
total_warnings += warnings
results[target] = [errors, warnings]
def build_all_targets():
for target in targets.keys():
build_target(target, targets[target])
def format_target(target):
deps = read_file(dep_filename(target))
fixjsstyle = 'fixjsstyle.py'
reformat = 'reformatting_tool.py'
for dep in deps.split('\n'):
dep = dep.strip()
if len(dep) > 0 and not re.search('closure-library.*base\.js', dep):
print fixjsstyle + ' ' + dep
subprocess.call(['python', fixjsstyle, dep])
print reformat + ' -f ' + dep
subprocess.call(['python', reformat, '-f', dep])
def format_all_targets():
for target in targets.keys():
format_target(target)
def pass_or_fail():
if total_errors + total_warnings == 0:
print "Passed"
elif len(results) > 1: #display the summary only when building more than one target
passed = [k for k, v in results.iteritems() if v[0] + v[1] == 0]
failed = dict((k, v) for k, v in results.iteritems() if v[0] + v[1] != 0)
print "\nBuild Summary:"
# Print first the tests that passed
for target in passed:
print "{0:>30}\tPassed".format(target+":")
# Print tests that failed. Fixed-width to improve readability
for target in failed:
errors = failed[target][0]
warnings = failed[target][1]
print "{0:>30}\tErrors: {1:4}\tWarnings: {2:4}".format(target+":", errors, warnings)
print "Compilation failed: {} error(s), {} warning(s).".format(total_errors, total_warnings)
def main(argv):
if len(argv) == 0:
build_all_deps()
build_all_targets()
buildDepsFile()
pass_or_fail()
elif (argv[0] == 'deps'):
if len(argv) == 2:
target = argv[1]
build_deps(target, targets[target])
else:
build_all_deps()
elif (argv[0] == 'format'):
if len(argv) == 2:
target = argv[1]
format_target(target)
else:
format_all_targets()
elif (argv[0] == 'build'):
if len(argv) == 2:
target = argv[1]
build_target(target, targets[target])
else:
build_all_targets()
pass_or_fail()
elif (argv[0] == 'depfile'):
buildDepsFile()
elif (argv[0] == 'list'):
print "List of available targets:"
for target in targets.keys():
print "\t{}".format(target)
else:
target = argv[0]
build_deps(target, targets[target])
build_target(target, targets[target])
pass_or_fail()
if __name__ == '__main__':
main(sys.argv[1:])
| mpl-2.0 |
SamYaple/neutron | neutron/db/api.py | 8 | 3664 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session
from oslo_utils import uuidutils
from sqlalchemy import exc
from neutron.common import exceptions as n_exc
from neutron.db import common_db_mixin
_FACADE = None
MAX_RETRIES = 10
is_deadlock = lambda e: isinstance(e, db_exc.DBDeadlock)
retry_db_errors = oslo_db_api.wrap_db_retry(
max_retries=MAX_RETRIES,
exception_checker=is_deadlock
)
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
return _FACADE
def get_engine():
"""Helper method to grab engine."""
facade = _create_facade_lazily()
return facade.get_engine()
def dispose():
# Don't need to do anything if an enginefacade hasn't been created
if _FACADE is not None:
get_engine().pool.dispose()
def get_session(autocommit=True, expire_on_commit=False, use_slave=False):
"""Helper method to grab session."""
facade = _create_facade_lazily()
return facade.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit,
use_slave=use_slave)
@contextlib.contextmanager
def autonested_transaction(sess):
"""This is a convenience method to not bother with 'nested' parameter."""
try:
session_context = sess.begin_nested()
except exc.InvalidRequestError:
session_context = sess.begin(subtransactions=True)
finally:
with session_context as tx:
yield tx
# Common database operation implementations
def get_object(context, model, **kwargs):
with context.session.begin(subtransactions=True):
return (common_db_mixin.model_query(context, model)
.filter_by(**kwargs)
.first())
def get_objects(context, model, **kwargs):
with context.session.begin(subtransactions=True):
return (common_db_mixin.model_query(context, model)
.filter_by(**kwargs)
.all())
def create_object(context, model, values):
with context.session.begin(subtransactions=True):
if 'id' not in values:
values['id'] = uuidutils.generate_uuid()
db_obj = model(**values)
context.session.add(db_obj)
return db_obj.__dict__
def _safe_get_object(context, model, id):
db_obj = get_object(context, model, id=id)
if db_obj is None:
raise n_exc.ObjectNotFound(id=id)
return db_obj
def update_object(context, model, id, values):
with context.session.begin(subtransactions=True):
db_obj = _safe_get_object(context, model, id)
db_obj.update(values)
db_obj.save(session=context.session)
return db_obj.__dict__
def delete_object(context, model, id):
with context.session.begin(subtransactions=True):
db_obj = _safe_get_object(context, model, id)
context.session.delete(db_obj)
| apache-2.0 |
bkochendorfer/reviewboard | reviewboard/diffviewer/views.py | 4 | 16931 | from __future__ import unicode_literals
import logging
import traceback
from django.conf import settings
from django.core.paginator import InvalidPage, Paginator
from django.http import (HttpResponse, HttpResponseNotModified,
HttpResponseServerError, Http404)
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.http import encode_etag, etag_if_none_match, set_etag
from reviewboard.diffviewer.diffutils import (get_diff_files,
get_enable_highlighting)
from reviewboard.diffviewer.errors import UserVisibleError
from reviewboard.diffviewer.models import DiffSet, FileDiff
from reviewboard.diffviewer.renderers import (get_diff_renderer,
get_diff_renderer_class)
def get_collapse_diff(request):
if request.GET.get('expand', False):
return False
elif request.GET.get('collapse', False):
return True
elif 'collapsediffs' in request.COOKIES:
return (request.COOKIES['collapsediffs'] == "True")
else:
return True
class DiffViewerView(TemplateView):
"""Renders the main diff viewer.
This renders the diff viewer for a given DiffSet (or an interdiff
between two DiffSets). It handles loading information on the diffs,
generating the side-by-side view, and pagination.
The view expects the following parameters to be provided:
* diffset
- The DiffSet to render.
The following may also be provided:
* interdiffset
- A DiffSet object representing the other end of an interdiff range.
The following query parameters can be passed in on the URL:
* ?expand=1
- Expands all files within the diff viewer.
* ?collapse=1
- Collapses all files within the diff viewer, showing only
modifications and a few lines of context.
* ?file=<id>
- Renders only the FileDiff represented by the provided ID.
* ?page=<pagenum>
- Renders diffs found on the given page number, if the diff viewer
is paginated.
"""
template_name = 'diffviewer/view_diff.html'
fragment_error_template_name = 'diffviewer/diff_fragment_error.html'
def get(self, request, diffset, interdiffset=None, *args, **kwargs):
"""Handles GET requests for this view.
This will render the full diff viewer based on the provided
parameters.
The full rendering time will be logged.
If there's any exception thrown during rendering, an error page
with a traceback will be returned instead.
"""
self.collapse_diffs = get_collapse_diff(request)
if interdiffset:
logging.debug('Generating diff viewer page for interdiffset '
'ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Generating diff viewer page for filediff id %s',
diffset.id, request=request)
try:
response = super(DiffViewerView, self).get(
request, diffset=diffset, interdiffset=interdiffset,
*args, **kwargs)
if interdiffset:
logging.debug('Done generating diff viewer page for '
'interdiffset ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Done generating diff viewer page for filediff '
'id %s',
diffset.id, request=request)
return response
except Exception as e:
return exception_traceback(request, e, self.template_name)
def render_to_response(self, *args, **kwargs):
"""Renders the page to an HttpResponse.
This renders the diff viewer page, based on the context data
generated, and sets cookies before returning an HttpResponse to
the client.
"""
response = super(DiffViewerView, self).render_to_response(*args,
**kwargs)
response.set_cookie('collapsediffs', self.collapse_diffs)
return response
def get_context_data(self, diffset, interdiffset, extra_context={},
**kwargs):
"""Calculates and returns data used for rendering the diff viewer.
This handles all the hard work of generating the data backing the
side-by-side diff, handling pagination, and more. The data is
collected into a context dictionary and returned for rendering.
"""
files = get_diff_files(diffset, None, interdiffset,
request=self.request)
# Break the list of files into pages
siteconfig = SiteConfiguration.objects.get_current()
paginator = Paginator(files,
siteconfig.get('diffviewer_paginate_by'),
siteconfig.get('diffviewer_paginate_orphans'))
page_num = int(self.request.GET.get('page', 1))
if self.request.GET.get('file', False):
file_id = int(self.request.GET['file'])
for i, f in enumerate(files):
if f['filediff'].pk == file_id:
page_num = i // paginator.per_page + 1
if page_num > paginator.num_pages:
page_num = paginator.num_pages
break
try:
page = paginator.page(page_num)
except InvalidPage:
page = paginator.page(paginator.num_pages)
diff_context = {
'revision': {
'revision': diffset.revision,
'is_interdiff': interdiffset is not None,
'interdiff_revision': (interdiffset.revision
if interdiffset else None),
},
'pagination': {
'is_paginated': page.has_other_pages(),
'current_page': page.number,
'pages': paginator.num_pages,
'page_numbers': paginator.page_range,
'has_next': page.has_next(),
'has_previous': page.has_previous(),
},
}
if page.has_next():
diff_context['pagination']['next_page'] = page.next_page_number()
if page.has_previous():
diff_context['pagination']['previous_page'] = \
page.previous_page_number()
context = dict({
'diff_context': diff_context,
'diffset': diffset,
'interdiffset': interdiffset,
'diffset_pair': (diffset, interdiffset),
'files': page.object_list,
'collapseall': self.collapse_diffs,
}, **extra_context)
return context
class DiffFragmentView(View):
"""Renders a fragment from a file in the diff viewer.
Based on the diffset data and other arguments provided, this will render
a fragment from a file in a diff. This may be the entire file, or some
chunk within.
The view expects the following parameters to be provided:
* diffset_or_id
- A DiffSet object or the ID for one.
* filediff_id
- The ID of a FileDiff within the DiffSet.
The following may also be provided:
* interdiffset_or_id
- A DiffSet object or the ID for one representing the other end of
an interdiff range.
* chunkindex
- The index (0-based) of the chunk to render. If left out, the
entire file will be rendered.
The caller may also pass ``?lines-of-context=`` as a query parameter to
the URL to indicate how many lines of context should be provided around
the chunk.
"""
template_name = 'diffviewer/diff_file_fragment.html'
error_template_name = 'diffviewer/diff_fragment_error.html'
def get(self, request, *args, **kwargs):
"""Handles GET requests for this view.
This will create the renderer for the diff fragment, render it, and
return it.
If there's an error when rendering the diff fragment, an error page
will be rendered and returned instead.
"""
try:
renderer_settings = self._get_renderer_settings(**kwargs)
etag = self.make_etag(renderer_settings, **kwargs)
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
diff_info_or_response = self.process_diffset_info(**kwargs)
if isinstance(diff_info_or_response, HttpResponse):
return diff_info_or_response
except Http404:
raise
except Exception as e:
return exception_traceback(self.request, e,
self.error_template_name)
kwargs.update(diff_info_or_response)
try:
context = self.get_context_data(**kwargs)
renderer = self.create_renderer(
context=context,
renderer_settings=renderer_settings,
*args, **kwargs)
response = renderer.render_to_response(request)
except Exception as e:
return exception_traceback(
self.request, e, self.error_template_name,
extra_context={
'file': diff_info_or_response['diff_file'],
})
if response.status_code == 200:
set_etag(response, etag)
return response
def make_etag(self, renderer_settings, filediff_id,
interdiffset_or_id=None, **kwargs):
"""Return an ETag identifying this render."""
if interdiffset_or_id and isinstance(interdiffset_or_id, DiffSet):
interdiffset_or_id = interdiffset_or_id.pk
etag = '%s:%s:%s:%s:%s:%s' % (
get_diff_renderer_class(),
renderer_settings['collapse_all'],
renderer_settings['highlighting'],
filediff_id,
interdiffset_or_id,
settings.TEMPLATE_SERIAL)
return encode_etag(etag)
def process_diffset_info(self, diffset_or_id, filediff_id,
interdiffset_or_id=None, **kwargs):
"""Process and return information on the desired diff.
The diff IDs and other data passed to the view can be processed and
converted into DiffSets. A dictionary with the DiffSet and FileDiff
information will be returned.
A subclass may instead return a HttpResponse to indicate an error
with the DiffSets.
"""
# Depending on whether we're invoked from a URL or from a wrapper
# with precomputed diffsets, we may be working with either IDs or
# actual objects. If they're objects, just use them as-is. Otherwise,
# if they're IDs, we want to grab them both (if both are provided)
# in one go, to save on an SQL query.
diffset = None
interdiffset = None
diffset_ids = []
if isinstance(diffset_or_id, DiffSet):
diffset = diffset_or_id
else:
diffset_ids.append(diffset_or_id)
if interdiffset_or_id:
if isinstance(interdiffset_or_id, DiffSet):
interdiffset = interdiffset_or_id
else:
diffset_ids.append(interdiffset_or_id)
if diffset_ids:
diffsets = DiffSet.objects.filter(pk__in=diffset_ids)
if len(diffsets) != len(diffset_ids):
raise Http404
for temp_diffset in diffsets:
if temp_diffset.pk == diffset_or_id:
diffset = temp_diffset
elif temp_diffset.pk == interdiffset_or_id:
interdiffset = temp_diffset
else:
assert False
filediff = get_object_or_404(FileDiff, pk=filediff_id, diffset=diffset)
# Store this so we don't end up causing an SQL query later when looking
# this up.
filediff.diffset = diffset
diff_file = self._get_requested_diff_file(diffset, filediff,
interdiffset)
if not diff_file:
raise UserVisibleError(
_('Internal error. Unable to locate file record for '
'filediff %s')
% filediff.pk)
return {
'diffset': diffset,
'interdiffset': interdiffset,
'filediff': filediff,
'diff_file': diff_file,
}
def create_renderer(self, context, renderer_settings, diff_file,
*args, **kwargs):
"""Creates the renderer for the diff.
This calculates all the state and data needed for rendering, and
constructs a DiffRenderer with that data. That renderer is then
returned, ready for rendering.
If there's an error in looking up the necessary information, this
may raise a UserVisibleError (best case), or some other form of
Exception.
"""
return get_diff_renderer(
diff_file,
extra_context=context,
template_name=self.template_name,
**renderer_settings)
def get_context_data(self, *args, **kwargs):
"""Returns context data used for rendering the view.
This can be overridden by subclasses to provide additional data for the
view.
"""
return {}
def _get_renderer_settings(self, chunk_index=None, **kwargs):
"""Calculate the render settings for the display of a diff.
This will calculate settings based on user preferences and URL
parameters. It does not calculate the state of any DiffSets or
FileDiffs.
"""
highlighting = get_enable_highlighting(self.request.user)
try:
lines_of_context = self.request.GET.get('lines-of-context', '')
lines_of_context = [int(i) for i in lines_of_context.split(',', 1)]
except (TypeError, ValueError):
lines_of_context = None
if chunk_index is not None:
try:
chunk_index = int(chunk_index)
except (TypeError, ValueError):
chunk_index = None
if lines_of_context:
collapse_all = True
elif chunk_index is not None:
# If we're currently expanding part of a chunk, we want to render
# the entire chunk without any lines collapsed. In the case of
# showing a range of lines, we're going to get all chunks and then
# only show the range. This is so that we won't have separate
# cached entries for each range.
collapse_all = False
else:
collapse_all = get_collapse_diff(self.request)
return {
'chunk_index': chunk_index,
'collapse_all': collapse_all,
'highlighting': highlighting,
'lines_of_context': lines_of_context,
}
def _get_requested_diff_file(self, diffset, filediff, interdiffset):
"""Fetches information on the requested diff.
This will look up information on the diff that's to be rendered
and return it, if found. It may also augment it with additional
data.
The file will not contain chunk information. That must be specifically
populated later.
"""
files = get_diff_files(diffset, filediff, interdiffset,
request=self.request)
if files:
assert len(files) == 1
file = files[0]
if 'index' in self.request.GET:
try:
file['index'] = int(self.request.GET.get('index'))
except ValueError:
pass
return file
return None
def exception_traceback_string(request, e, template_name, extra_context={}):
context = {'error': e}
context.update(extra_context)
if e.__class__ is not UserVisibleError:
context['trace'] = traceback.format_exc()
if request:
request_context = RequestContext(request, context)
else:
request_context = context
return render_to_string(template_name, request_context)
def exception_traceback(request, e, template_name, extra_context={}):
return HttpResponseServerError(
exception_traceback_string(request, e, template_name, extra_context))
| mit |
konradcybulski/GameTheory1041 | Python_Resources/SimulationInstance.py | 1 | 5811 | """
@author Konrad Cybulski
@since 14/08/2016
@modified 16/08/2016
Code written utilising pseudocode provided
in 'Social norms of cooperation in small-scale
societies' by Santos, Santos, Pacheco
"""
import numpy as np
from InstanceVariables import InstanceVariables
"""
Static Variables
"""
# B G
strategies = np.array([[0, 0], # AllD
[1, 0], # pDisc
[0, 1], # Disc
[1, 1]]) # AllC
def payoff_function(x, y, variables):
"""
:param x: the index of agent-x in population
:param y: the index of agent-y in population
:param variables: the class containing simulation instance variables.
:return: the fitness of x after
"""
# Action of X:
xstrategy = strategies[variables.population[x]]
if np.random.random() < variables.assessment_error:
if np.random.random() < variables.execution_error and xstrategy[1 - variables.reputation[y]]:
cx = 1 - xstrategy[1 - variables.reputation[y]]
else:
cx = xstrategy[1 - variables.reputation[y]]
else:
if np.random.random() < variables.execution_error and xstrategy[variables.reputation[y]]:
cx = 1 - xstrategy[variables.reputation[y]]
else:
cx = xstrategy[variables.reputation[y]]
# Action of Y:
ystrategy = strategies[variables.population[y]]
if np.random.random() < variables.assessment_error:
if np.random.random() < variables.execution_error and ystrategy[1 - variables.reputation[x]]:
cy = 1 - ystrategy[1 - variables.reputation[x]]
else:
cy = ystrategy[1 - variables.reputation[x]]
else:
if np.random.random() < variables.execution_error and ystrategy[variables.reputation[x]]:
cy = 1 - ystrategy[variables.reputation[x]]
else:
cy = ystrategy[variables.reputation[x]]
rx = variables.reputation[x]
ry = variables.reputation[y]
# Update Reputation of X:
if np.random.random() < variables.reputation_update_rate:
if np.random.random() < variables.reputation_assignment_error:
rx = 1 - variables.socialnorm[1 - cx][1 - variables.reputation[y]]
else:
rx = variables.socialnorm[1 - cx][1 - variables.reputation[y]]
# Update Reputation of Y:
if np.random.random() < variables.reputation_update_rate:
if np.random.random() < variables.reputation_assignment_error:
ry = 1 - variables.socialnorm[1 - cy][1 - variables.reputation[x]]
else:
ry = variables.socialnorm[1 - cy][1 - variables.reputation[x]]
variables.reputation[x] = rx
variables.reputation[y] = ry
# Track cooperation
cur_cooperation_index = float(float(cy + cx)/float(2))
variables.increment_coop_index(cur_cooperation_index)
return (variables.benefit * cy) - (variables.cost * cx)
def fitness_function(agent_x, agent_y, variables):
Z = variables.population_size
fitness_x = 0
fitness_y = 0
# Creating tournament arrays
probabilities_x = np.ones(Z, dtype=np.float) / float(Z - 1)
probabilities_x[agent_x] = 0
probabilities_y = np.ones(Z, dtype=np.float) / float(Z - 1)
probabilities_y[agent_y] = 0
tournament_sample_x = np.random.choice(Z, size=2 * Z, p=probabilities_x,
replace=True)
tournament_sample_y = np.random.choice(Z, size=2 * Z, p=probabilities_y,
replace=True)
for c in range(0, 2 * Z):
agent_three = tournament_sample_x[c]
# Update Fitness of A and Reputation of A & C
fitness_x += payoff_function(agent_x, agent_three, variables)
agent_three = tournament_sample_y[c]
# Update Fitness of B and Reputation of B & C
fitness_y += payoff_function(agent_y, agent_three, variables)
fitness_x /= (2 * Z)
fitness_y /= (2 * Z)
return [fitness_x, fitness_y]
def simulate(runs, generations, population_size, mutation_rate,
execution_error, reputation_assignment_error,
private_assessment_error, reputation_update_prob,
socialnorm, cost, benefit):
variables = InstanceVariables(runs, generations, population_size, mutation_rate,
execution_error, reputation_assignment_error,
private_assessment_error, reputation_update_prob,
socialnorm, cost, benefit)
# Simulation begins
Z = variables.population_size
for r in range(0, variables.runs):
# Initialise random population
variables.population = np.random.randint(4, size=Z) # equivalent to U(0, 3)
variables.reputation = np.random.randint(2, size=Z) # equivalent to U(0, 1)
for t in range(0, variables.generations):
mutation_probs = np.random.rand(Z) < variables.mutation_rate
agent_pairs = [np.random.choice(Z, size=2, replace=False) for _ in range(Z)]
for i in range(Z):
agent_one = agent_pairs[i][0]
agent_two = agent_pairs[i][1]
# Random mutation probability
if mutation_probs[i]:
variables.population[agent_one] = np.random.randint(4)
# Calculate fitness of agents
fitness = fitness_function(agent_one, agent_two, variables)
fitness_a = fitness[0]
fitness_b = fitness[1]
if np.random.random() < np.power(1 + np.exp(fitness_a - fitness_b), -1):
variables.population[agent_one] = variables.population[agent_two]
# Simulation ends
# Return cooperation index.
coop_index = variables.get_average_coop_index()
return coop_index
| gpl-3.0 |
gengliangwang/spark | python/run-tests.py | 15 | 13614 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
| apache-2.0 |
nguyentu1602/pyexp | pyexp/datetime_practice.py | 1 | 2824 | import datetime as dt
import time as tm
import pytz as tz
# get current time - note that there is no timezone
now = dt.datetime.now()
print(now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond, now.tzinfo)
# make new date objects and play date arithmetic with it
last = dt.datetime(2020, 10, 31, 4, 20, 10) # if we don't give any hour etc default is midnight
first = dt.datetime(2019, 1, 1)
diff = last - first
[i for i in dir(diff) if '__' not in i] # what does this object has?
print("days and seconds are main units of the timedelta object: ", diff.days, diff.seconds)
diff.total_seconds() # convert all days to seconds
delta = dt.timedelta(days=10, hours=20, minutes=35, seconds=10) # timedelta object
future_date = last + delta; print( future_date ) # timedelta object could be added to dt object easily
# print in nice format (the f stands for format)
time_string = dt.datetime.strftime(future_date, '%Y-%m-%d %H:%M:%S') # be careful between month and minute
# convert back from string to datetime obj
date_obj = dt.datetime.strptime('2019-16-02', '%Y-%d-%m') # p is for parse
print(date_obj)
# use the time module for unix time operation - e.g. return unix epoch and sleep. This is pretty much all it's good for
tm.time() # seconds since beginning of time - useful when calculating how much time a function takes
# test features of the pytz package
utc = tz.utc
dir(utc)
utc.dst(dt.datetime.now()) # should be zero for me since im in LDN
now_with_tz = utc.localize(dt.datetime.now())
print(now_with_tz)
# print all timezones and get a few of them out:
for tzz in tz.all_timezones:
print(tzz)
eastern = tz.timezone('US/Eastern')
deutsche = tz.timezone('Europe/Berlin')
ams = tz.timezone('Europe/Amsterdam')
chicago = tz.timezone('US/Central')
cet = tz.timezone('CET')
ldn = tz.timezone('Europe/London')
hkg = tz.timezone('Asia/Hong_Kong')
# midnight at ldn is AFTER midnight at hkg by x hours
hkg_ldn_diff = dt.datetime(2019, 11, 11, tzinfo=ldn) - dt.datetime(2019, 11, 11, tzinfo=hkg)
hkg_ldn_diff.total_seconds() / 60 / 60 # interesting that it returns 7.63 not 8!!!
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
# localized times - 1st way: build from strings:
loc_dt = cet.localize(dt.datetime(2019, 11, 11, 6, 0, 0))
# tzinfo doesn't work for many timezones:
date_obj_ldn = dt.datetime(2019, 11, 11, 0, 0, 0, tzinfo=ldn)
date_obj_ldn.astimezone(hkg)
# The preferred way of dealing with times is to always work in UTC
# converting to localtime only when generating output to be read by humans.
utc_dt = dt.datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
loc_dt_to_print = utc_dt.astimezone(eastern) # DO NOT DO ANY ARITHMETIC with this obj
# read more here - it's a mess really - but no other way of dealing with it
# http://pytz.sourceforge.net/
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.