repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
nens/threedi-qgis-plugin | tool_commands/import_sufhyd/command.py | 1 | 1869 | # (c) Nelen & Schuurmans, see LICENSE.rst.
from ThreeDiToolbox.tool_commands.custom_command_base import CustomCommandBase
from ThreeDiToolbox.tool_commands.import_sufhyd.import_sufhyd_dialog import (
ImportSufhydDialogWidget,
)
from ThreeDiToolbox.tool_commands.import_sufhyd.import_sufhyd_main import Importer
from ThreeDiToolbox.utils.threedi_database import ThreediDatabase
import inspect
import logging
logger = logging.getLogger(__name__)
class CustomCommand(CustomCommandBase):
"""
Things to note:
If you select a memory layer the behaviour will be different from clicking
on a normal spatialite view. For example, NcStatsAgg will be used instead
of NcStats.
"""
class Fields(object):
name = "Import sufhyd"
value = 1
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self._fields = sorted(
[
(name, cl)
for name, cl in inspect.getmembers(
self.Fields, lambda a: not (inspect.isroutine(a))
)
if not name.startswith("__") and not name.startswith("_")
]
)
self.iface = kwargs.get("iface")
self.ts_datasources = kwargs.get("ts_datasources")
self.tool_dialog_widget = None
def run(self):
self.show_gui()
def show_gui(self):
self.tool_dialog_widget = ImportSufhydDialogWidget(
iface=self.iface, ts_datasources=self.ts_datasources, command=self
)
self.tool_dialog_widget.exec_() # block execution
def run_it(self, sufhyd_file, db_set, db_type):
# todo: check if database is empty, otherwise popup
db = ThreediDatabase(db_set, db_type)
importer = Importer(sufhyd_file, db)
importer.run_import()
# todo: show logging
| gpl-3.0 |
gilamsalem/pynfs | nfs4.0/servertests/st_fslocations.py | 3 | 6570 | from nfs4_const import *
from nfs4lib import list2bitmap
from environment import check
def testReference(t, env):
"""FSLOCATION test of referral node
This assumes option --usespecial was set to point to correct path
FLAGS: fslocations
CODE: FSLOC1
"""
c = env.c1
path = env.opts.usespecial
ops = [c.putrootfh_op(), c.getfh_op()]
for comp in path:
ops += [c.lookup_op(comp), c.getfh_op()]
res = c.compound(ops)
check(res, NFS4ERR_MOVED, "GETFH of path indicated by --usespecial")
locs = c.do_getattr(FATTR4_FS_LOCATIONS, path)
print "After NFS4ERR_MOVED, GETATTR(fs_locations) = %s" % locs
def testReference2(t, env):
"""FSLOCATION test of referral node
This assumes option --usespecial was set to point to correct path
FLAGS: fslocations
CODE: FSLOC2
"""
c = env.c1
path = env.opts.usespecial
locs = c.do_getattr(FATTR4_FS_LOCATIONS, path)
print "After NFS4ERR_MOVED, GETATTR(fs_locations) = %s" % locs
def testReference3(t, env):
"""FSLOCATION test of referral node
This assumes option --usespecial was set to point to correct path
FLAGS: fslocations
CODE: FSLOC3
"""
c = env.c1
path = env.opts.usespecial
locs = c.do_getattr(FATTR4_FS_LOCATIONS, c.homedir)
print "After NFS4ERR_MOVED, GETATTR(fs_locations) = %s" % locs
def testAttr1a(t, env):
"""GETATTR with attributes should return _MOVED
FLAGS: fslocations
CODE: FSLOC4a
"""
c = env.c1
path = env.opts.usespecial
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_FSID]
ops = c.use_obj(path)
ops += [c.getattr(attrlist)]
res = c.compound(ops)
check(res, NFS4ERR_MOVED, "GETATTR w/o FSLOC or RDATTR_ERROR")
def testAttr1b(t, env):
"""READDIR with attributes should return _MOVED
FLAGS: fslocations
CODE: FSLOC4b
"""
c = env.c1
c.init_connection()
path = env.opts.usespecial[:-1]
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_FSID]
ops = c.use_obj(path)
ops += [c.readdir_op(0, '', 4096, 4096, list2bitmap(attrlist))]
res = c.compound(ops)
check(res, NFS4ERR_MOVED, "READDIR w/o FSLOC or RDATTR_ERROR")
def testAttr2a(t, env):
"""GETATTR with no FSLOC but with RDATTR_ERROR should return _MOVED
FLAGS: fslocations
CODE: FSLOC5a
"""
c = env.c1
path = env.opts.usespecial
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_RDATTR_ERROR, FATTR4_FSID]
ops = c.use_obj(path)
ops += [c.getattr(attrlist)]
res = c.compound(ops)
check(res, NFS4ERR_MOVED, "GETATTR w/o FSLOC but with RDATTR_ERROR")
def testAttr2b(t, env):
"""READDIR with no FSLOC but with RDATTR_ERROR should put _MOVED in
_RDATTR_ERROR and return what it can
FLAGS: fslocations
CODE: FSLOC5b
"""
c = env.c1
c.init_connection()
path = env.opts.usespecial[:-1]
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_RDATTR_ERROR, FATTR4_FSID]
entries = c.do_readdir(path, attr_request=attrlist)
moved = [e for e in entries if e.attrdict[FATTR4_RDATTR_ERROR] == NFS4ERR_MOVED]
print "RDATTR==MOVED for:", [e.name for e in moved]
for e in moved:
if len(e.attrdict) != 2:
print e.attrdict
t.fail("Expected 2 attrs returned for file %s, got %i" % (e.name, len(e.attrdict)))
def testAttr3a(t, env):
"""GETATTR with restricted attrs but no FSLOC should work
FLAGS: fslocations
CODE: FSLOC6a
"""
c = env.c1
path = env.opts.usespecial
attrlist = [FATTR4_RDATTR_ERROR, FATTR4_FSID, FATTR4_MOUNTED_ON_FILEID]
ops = c.use_obj(path)
ops += [c.getattr(attrlist)]
res = c.compound(ops)
check(res, msg = "GETATTR w/o FSLOC but only restricted attrs")
def testAttr3b(t, env):
"""READDIR with restricted attrs but no FSLOC should work
FLAGS: fslocations
CODE: FSLOC6b
"""
c = env.c1
c.init_connection()
path = env.opts.usespecial[:-1]
attrlist = [FATTR4_RDATTR_ERROR, FATTR4_FSID, FATTR4_MOUNTED_ON_FILEID]
entries = c.do_readdir(path, attr_request=attrlist)
moved = [e for e in entries if e.name == env.opts.usespecial[-1]][0]
if len(moved.attrdict) != 3:
print moved.attrdict
t.fail("Expected 3 attrs returned for file %s, got %i" % (moved.name, len(moved.attrdict)))
def testAttr4a(t, env):
"""GETATTR with FSLOC and RDATTR_ERROR should return what can
FLAGS: fslocations
CODE: FSLOC7a
"""
c = env.c1
path = env.opts.usespecial
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_RDATTR_ERROR, FATTR4_FSID, FATTR4_FS_LOCATIONS]
d = c.do_getattrdict(path, attrlist)
print d
if len(d) != 3:
t.fail("Expected 3 attrs returned, got %i" % len(d))
def testAttr4b(t, env):
"""READDIR with FSLOC and RDATTR_ERROR should put _MOVED in
_RDATTR_ERROR and return what it can
FLAGS: fslocations
CODE: FSLOC7b
"""
c = env.c1
c.init_connection()
path = env.opts.usespecial[:-1]
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_RDATTR_ERROR, FATTR4_FSID, FATTR4_FS_LOCATIONS]
entries = c.do_readdir(path, attr_request=attrlist)
moved = [e for e in entries if e.attrdict[FATTR4_RDATTR_ERROR] == NFS4ERR_MOVED]
print "RDATTR==MOVED for:", [e.name for e in moved]
for e in moved:
if len(e.attrdict) != 3:
print e.attrdict
t.fail("Expected 3 attrs returned for file %s, got %i" % (e.name, len(e.attrdict)))
def testAttr5a(t, env):
"""GETATTR with FSLOC but no RDATTR_ERROR should return what can
FLAGS: fslocations
CODE: FSLOC8a
"""
c = env.c1
path = env.opts.usespecial
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_FSID, FATTR4_FS_LOCATIONS]
d = c.do_getattrdict(path, attrlist)
print d
if len(d) != 2:
t.fail("Expected 3 attrs returned, got %i" % len(d))
def testAttr5b(t, env):
"""READDIR with FSLOC but no RDATTR_ERROR should put _MOVED in
_RDATTR_ERROR and return what it can
FLAGS: fslocations
CODE: FSLOC8b
"""
c = env.c1
c.init_connection()
path = env.opts.usespecial[:-1]
attrlist = [FATTR4_SIZE, FATTR4_FILEHANDLE, FATTR4_FSID, FATTR4_FS_LOCATIONS]
entries = c.do_readdir(path, attr_request=attrlist)
moved = [e for e in entries if e.name == env.opts.usespecial[-1]][0]
if len(moved.attrdict) != 2:
print moved.attrdict
t.fail("Expected 2 attrs returned for file %s, got %i" % (moved.name, len(moved.attrdict)))
| gpl-2.0 |
tensorflow/model-analysis | tensorflow_model_analysis/metrics/query_statistics.py | 1 | 5733 | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Query statistics metrics."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Dict, Iterable, Text
import apache_beam as beam
from tensorflow_model_analysis.metrics import metric_types
TOTAL_QUERIES_NAME = 'total_queries'
TOTAL_DOCUMENTS_NAME = 'total_documents'
MIN_DOCUMENTS_NAME = 'min_documents'
MAX_DOCUMENTS_NAME = 'max_documents'
class QueryStatistics(metric_types.Metric):
"""Query statistic metrics.
These metrics are query/ranking based so a query_key must also be provided in
the associated metrics spec.
"""
def __init__(self,
total_queries_name=TOTAL_QUERIES_NAME,
total_documents_name=TOTAL_DOCUMENTS_NAME,
min_documents_name=MIN_DOCUMENTS_NAME,
max_documents_name=MAX_DOCUMENTS_NAME):
"""Initializes query statistics metrics.
Args:
total_queries_name: Total queries metric name.
total_documents_name: Total documents metric name.
min_documents_name: Min documents name.
max_documents_name: Max documents name.
"""
super(QueryStatistics, self).__init__(
_query_statistics,
total_queries_name=total_queries_name,
total_documents_name=total_documents_name,
min_documents_name=min_documents_name,
max_documents_name=max_documents_name)
metric_types.register_metric(QueryStatistics)
def _query_statistics(total_queries_name=TOTAL_QUERIES_NAME,
total_documents_name=TOTAL_DOCUMENTS_NAME,
min_documents_name=MIN_DOCUMENTS_NAME,
max_documents_name=MAX_DOCUMENTS_NAME,
query_key: Text = '') -> metric_types.MetricComputations:
"""Returns metric computations for query statistics."""
if not query_key:
raise ValueError('a query_key is required to use QueryStatistics metrics')
total_queries_key = metric_types.MetricKey(name=total_queries_name)
total_documents_key = metric_types.MetricKey(name=total_documents_name)
min_documents_key = metric_types.MetricKey(name=min_documents_name)
max_documents_key = metric_types.MetricKey(name=max_documents_name)
return [
metric_types.MetricComputation(
keys=[
total_queries_key, total_documents_key, min_documents_key,
max_documents_key
],
preprocessor=None,
combiner=_QueryStatisticsCombiner(total_queries_key,
total_documents_key,
min_documents_key,
max_documents_key))
]
class _QueryStatisticsAccumulator(object):
"""Query statistics accumulator."""
__slots__ = [
'total_queries', 'total_documents', 'min_documents', 'max_documents'
]
LARGE_INT = 1000000000
def __init__(self):
self.total_queries = 0
self.total_documents = 0
self.min_documents = self.LARGE_INT
self.max_documents = 0
class _QueryStatisticsCombiner(beam.CombineFn):
"""Computes query statistics metrics."""
def __init__(self, total_queries_key: metric_types.MetricKey,
total_documents_key: metric_types.MetricKey,
min_documents_key: metric_types.MetricKey,
max_documents_key: metric_types.MetricKey):
self._total_queries_key = total_queries_key
self._total_documents_key = total_documents_key
self._min_documents_key = min_documents_key
self._max_documents_key = max_documents_key
def create_accumulator(self) -> _QueryStatisticsAccumulator:
return _QueryStatisticsAccumulator()
def add_input(
self, accumulator: _QueryStatisticsAccumulator,
element: metric_types.StandardMetricInputs
) -> _QueryStatisticsAccumulator:
accumulator.total_queries += 1
num_documents = len(element.prediction)
accumulator.total_documents += num_documents
accumulator.min_documents = min(accumulator.min_documents, num_documents)
accumulator.max_documents = max(accumulator.max_documents, num_documents)
return accumulator
def merge_accumulators(
self, accumulators: Iterable[_QueryStatisticsAccumulator]
) -> _QueryStatisticsAccumulator:
accumulators = iter(accumulators)
result = next(accumulators)
for accumulator in accumulators:
result.total_queries += accumulator.total_queries
result.total_documents += accumulator.total_documents
result.min_documents = min(result.min_documents,
accumulator.min_documents)
result.max_documents = max(result.max_documents,
accumulator.max_documents)
return result
def extract_output(
self, accumulator: _QueryStatisticsAccumulator
) -> Dict[metric_types.MetricKey, int]:
return {
self._total_queries_key: accumulator.total_queries,
self._total_documents_key: accumulator.total_documents,
self._min_documents_key: accumulator.min_documents,
self._max_documents_key: accumulator.max_documents
}
| apache-2.0 |
blademainer/intellij-community | python/lib/Lib/mimify.py | 102 | 14885 | #! /usr/bin/env python
"""Mimification and unmimification of mail messages.
Decode quoted-printable parts of a mail message or encode using
quoted-printable.
Usage:
mimify(input, output)
unmimify(input, output, decode_base64 = 0)
to encode and decode respectively. Input and output may be the name
of a file or an open file object. Only a readline() method is used
on the input file, only a write() method is used on the output file.
When using file names, the input and output file names may be the
same.
Interactive usage:
mimify.py -e [infile [outfile]]
mimify.py -d [infile [outfile]]
to encode and decode respectively. Infile defaults to standard
input and outfile to standard output.
"""
# Configure
MAXLEN = 200 # if lines longer than this, encode as quoted-printable
CHARSET = 'ISO-8859-1' # default charset for non-US-ASCII mail
QUOTE = '> ' # string replies are quoted with
# End configure
import re
__all__ = ["mimify","unmimify","mime_encode_header","mime_decode_header"]
qp = re.compile('^content-transfer-encoding:\\s*quoted-printable', re.I)
base64_re = re.compile('^content-transfer-encoding:\\s*base64', re.I)
mp = re.compile('^content-type:.*multipart/.*boundary="?([^;"\n]*)', re.I|re.S)
chrset = re.compile('^(content-type:.*charset=")(us-ascii|iso-8859-[0-9]+)(".*)', re.I|re.S)
he = re.compile('^-*\n')
mime_code = re.compile('=([0-9a-f][0-9a-f])', re.I)
mime_head = re.compile('=\\?iso-8859-1\\?q\\?([^? \t\n]+)\\?=', re.I)
repl = re.compile('^subject:\\s+re: ', re.I)
class File:
"""A simple fake file object that knows about limited read-ahead and
boundaries. The only supported method is readline()."""
def __init__(self, file, boundary):
self.file = file
self.boundary = boundary
self.peek = None
def readline(self):
if self.peek is not None:
return ''
line = self.file.readline()
if not line:
return line
if self.boundary:
if line == self.boundary + '\n':
self.peek = line
return ''
if line == self.boundary + '--\n':
self.peek = line
return ''
return line
class HeaderFile:
def __init__(self, file):
self.file = file
self.peek = None
def readline(self):
if self.peek is not None:
line = self.peek
self.peek = None
else:
line = self.file.readline()
if not line:
return line
if he.match(line):
return line
while 1:
self.peek = self.file.readline()
if len(self.peek) == 0 or \
(self.peek[0] != ' ' and self.peek[0] != '\t'):
return line
line = line + self.peek
self.peek = None
def mime_decode(line):
"""Decode a single line of quoted-printable text to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_code.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
chr(int(res.group(1), 16))
pos = res.end(0)
return newline + line[pos:]
def mime_decode_header(line):
"""Decode a header line to 8bit."""
newline = ''
pos = 0
while 1:
res = mime_head.search(line, pos)
if res is None:
break
match = res.group(1)
# convert underscores to spaces (before =XX conversion!)
match = ' '.join(match.split('_'))
newline = newline + line[pos:res.start(0)] + mime_decode(match)
pos = res.end(0)
return newline + line[pos:]
def unmimify_part(ifile, ofile, decode_base64 = 0):
"""Convert a quoted-printable part of a MIME mail message to 8bit."""
multipart = None
quoted_printable = 0
is_base64 = 0
is_repl = 0
if ifile.boundary and ifile.boundary[:2] == QUOTE:
prefix = QUOTE
else:
prefix = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
return
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
line = mime_decode_header(line)
if qp.match(line):
quoted_printable = 1
continue # skip this header
if decode_base64 and base64_re.match(line):
is_base64 = 1
continue
ofile.write(pref + line)
if not prefix and repl.match(line):
# we're dealing with a reply message
is_repl = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
break
if is_repl and (quoted_printable or multipart):
is_repl = 0
# read body
while 1:
line = ifile.readline()
if not line:
return
line = re.sub(mime_head, '\\1', line)
if prefix and line[:len(prefix)] == prefix:
line = line[len(prefix):]
pref = prefix
else:
pref = ''
## if is_repl and len(line) >= 4 and line[:4] == QUOTE+'--' and line[-3:] != '--\n':
## multipart = line[:-1]
while multipart:
if line == multipart + '--\n':
ofile.write(pref + line)
multipart = None
line = None
break
if line == multipart + '\n':
ofile.write(pref + line)
nifile = File(ifile, multipart)
unmimify_part(nifile, ofile, decode_base64)
line = nifile.peek
if not line:
# premature end of file
break
continue
# not a boundary between parts
break
if line and quoted_printable:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
if line and is_base64 and not pref:
import base64
line = base64.decodestring(line)
if line:
ofile.write(pref + line)
def unmimify(infile, outfile, decode_base64 = 0):
"""Convert quoted-printable parts of a MIME mail message to 8bit."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
unmimify_part(nifile, ofile, decode_base64)
ofile.flush()
mime_char = re.compile('[=\177-\377]') # quote these chars in body
mime_header_char = re.compile('[=?\177-\377]') # quote these in header
def mime_encode(line, header):
"""Code a single line as quoted-printable.
If header is set, quote some extra characters."""
if header:
reg = mime_header_char
else:
reg = mime_char
newline = ''
pos = 0
if len(line) >= 5 and line[:5] == 'From ':
# quote 'From ' at the start of a line for stupid mailers
newline = ('=%02x' % ord('F')).upper()
pos = 1
while 1:
res = reg.search(line, pos)
if res is None:
break
newline = newline + line[pos:res.start(0)] + \
('=%02x' % ord(res.group(0))).upper()
pos = res.end(0)
line = newline + line[pos:]
newline = ''
while len(line) >= 75:
i = 73
while line[i] == '=' or line[i-1] == '=':
i = i - 1
i = i + 1
newline = newline + line[:i] + '=\n'
line = line[i:]
return newline + line
mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)(?=[ \t)]|\n)')
def mime_encode_header(line):
"""Code a single header line as quoted-printable."""
newline = ''
pos = 0
while 1:
res = mime_header.search(line, pos)
if res is None:
break
newline = '%s%s%s=?%s?Q?%s?=' % \
(newline, line[pos:res.start(0)], res.group(1),
CHARSET, mime_encode(res.group(2), 1))
pos = res.end(0)
return newline + line[pos:]
mv = re.compile('^mime-version:', re.I)
cte = re.compile('^content-transfer-encoding:', re.I)
iso_char = re.compile('[\177-\377]')
def mimify_part(ifile, ofile, is_mime):
"""Convert an 8bit part of a MIME mail message to quoted-printable."""
has_cte = is_qp = is_base64 = 0
multipart = None
must_quote_body = must_quote_header = has_iso_chars = 0
header = []
header_end = ''
message = []
message_end = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
break
if not must_quote_header and iso_char.search(line):
must_quote_header = 1
if mv.match(line):
is_mime = 1
if cte.match(line):
has_cte = 1
if qp.match(line):
is_qp = 1
elif base64_re.match(line):
is_base64 = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
header_end = line
break
header.append(line)
# read body
while 1:
line = ifile.readline()
if not line:
break
if multipart:
if line == multipart + '--\n':
message_end = line
break
if line == multipart + '\n':
message_end = line
break
if is_base64:
message.append(line)
continue
if is_qp:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
message.append(line)
if not has_iso_chars:
if iso_char.search(line):
has_iso_chars = must_quote_body = 1
if not must_quote_body:
if len(line) > MAXLEN:
must_quote_body = 1
# convert and output header and body
for line in header:
if must_quote_header:
line = mime_encode_header(line)
chrset_res = chrset.match(line)
if chrset_res:
if has_iso_chars:
# change us-ascii into iso-8859-1
if chrset_res.group(2).lower() == 'us-ascii':
line = '%s%s%s' % (chrset_res.group(1),
CHARSET,
chrset_res.group(3))
else:
# change iso-8859-* into us-ascii
line = '%sus-ascii%s' % chrset_res.group(1, 3)
if has_cte and cte.match(line):
line = 'Content-Transfer-Encoding: '
if is_base64:
line = line + 'base64\n'
elif must_quote_body:
line = line + 'quoted-printable\n'
else:
line = line + '7bit\n'
ofile.write(line)
if (must_quote_header or must_quote_body) and not is_mime:
ofile.write('Mime-Version: 1.0\n')
ofile.write('Content-Type: text/plain; ')
if has_iso_chars:
ofile.write('charset="%s"\n' % CHARSET)
else:
ofile.write('charset="us-ascii"\n')
if must_quote_body and not has_cte:
ofile.write('Content-Transfer-Encoding: quoted-printable\n')
ofile.write(header_end)
for line in message:
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
ofile.write(message_end)
line = message_end
while multipart:
if line == multipart + '--\n':
# read bit after the end of the last part
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
if line == multipart + '\n':
nifile = File(ifile, multipart)
mimify_part(nifile, ofile, 1)
line = nifile.peek
if not line:
# premature end of file
break
ofile.write(line)
continue
# unexpectedly no multipart separator--copy rest of file
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
def mimify(infile, outfile):
"""Convert 8bit parts of a MIME mail message to quoted-printable."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
import os
d, f = os.path.split(infile)
os.rename(infile, os.path.join(d, ',' + f))
else:
ifile = infile
if type(outfile) == type(''):
ofile = open(outfile, 'w')
else:
ofile = outfile
nifile = File(ifile, None)
mimify_part(nifile, ofile, 0)
ofile.flush()
import sys
if __name__ == '__main__' or (len(sys.argv) > 0 and sys.argv[0] == 'mimify'):
import getopt
usage = 'Usage: mimify [-l len] -[ed] [infile [outfile]]'
decode_base64 = 0
opts, args = getopt.getopt(sys.argv[1:], 'l:edb')
if len(args) not in (0, 1, 2):
print usage
sys.exit(1)
if (('-e', '') in opts) == (('-d', '') in opts) or \
((('-b', '') in opts) and (('-d', '') not in opts)):
print usage
sys.exit(1)
for o, a in opts:
if o == '-e':
encode = mimify
elif o == '-d':
encode = unmimify
elif o == '-l':
try:
MAXLEN = int(a)
except (ValueError, OverflowError):
print usage
sys.exit(1)
elif o == '-b':
decode_base64 = 1
if len(args) == 0:
encode_args = (sys.stdin, sys.stdout)
elif len(args) == 1:
encode_args = (args[0], sys.stdout)
else:
encode_args = (args[0], args[1])
if decode_base64:
encode_args = encode_args + (decode_base64,)
encode(*encode_args)
| apache-2.0 |
chemelnucfin/tensorflow | tensorflow/contrib/graph_editor/tests/transform_test.py | 23 | 11263 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import numpy as np
from tensorflow.contrib import graph_editor as ge
from tensorflow.contrib.graph_editor.tests import match
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Precision tolerance for floating-point value tests.
ERROR_TOLERANCE = 1e-3
class TransformTest(test.TestCase):
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
c0 = constant_op.constant(1.0, shape=[10], name="Const")
c0.op._set_attr("_foo", attr_value_pb2.AttrValue(s=b"foo"))
c1 = constant_op.constant(1.0, shape=[10], name="Const")
c2 = constant_op.constant(1.0, shape=[10], name="Const")
i = constant_op.constant(1.0, shape=[10], name="Input")
self.o = math_ops.add(c2, math_ops.add(c1, math_ops.add(c0, i)))
def test_copy(self):
graph = ops.Graph()
_, info = ge.copy(self.graph, graph)
self.assertEqual(
set(op.name for op in self.graph.get_operations()),
set(op.name for op in graph.get_operations()))
src_ops = self.graph.get_operations()
dst_ops = graph.get_operations()
for op in src_ops:
op_ = info.transformed(op)
self.assertTrue(op_ in dst_ops)
self.assertEqual(op.name, op_.name)
self.assertEqual(info.original(op_), op)
src_ts = ge.util.get_tensors(self.graph)
dst_ts = ge.util.get_tensors(graph)
for t in src_ts:
t_ = info.transformed(t)
self.assertTrue(t_ in dst_ts)
self.assertEqual(t.name, t_.name)
self.assertEqual(info.original(t_), t)
def test_copy_assert(self):
ops.reset_default_graph()
a = constant_op.constant(1)
b = constant_op.constant(1)
eq = math_ops.equal(a, b)
assert_op = control_flow_ops.Assert(eq, [a, b])
with ops.control_dependencies([assert_op]):
_ = math_ops.add(a, b)
sgv = ge.make_view([assert_op, eq.op, a.op, b.op])
copier = ge.Transformer()
_, info = copier(sgv, sgv.graph, "", "")
new_assert_op = info.transformed(assert_op)
self.assertIsNotNone(new_assert_op)
def test_transform(self):
transformer = ge.Transformer()
def my_transform_op_handler(info, op, new_inputs):
add_noise = op.name.startswith("Add")
op_, op_outputs_ = ge.transform.copy_op_handler(info, op, new_inputs)
if not add_noise:
return op_, op_outputs_
# add some noise to op
with info.graph_.as_default():
t_ = math_ops.add(
constant_op.constant(1.0, shape=[10], name="Noise"),
op_.outputs[0],
name="AddNoise")
# return the "noisy" op
return op_, [t_]
transformer.transform_op_handler = my_transform_op_handler
graph = ops.Graph()
transformer(self.graph, graph, "", "")
matcher0 = match.OpMatcher("AddNoise").input_ops(
"Noise", match.OpMatcher("Add").input_ops("Const", "Input"))
matcher1 = match.OpMatcher("AddNoise_1").input_ops(
"Noise_1", match.OpMatcher("Add_1").input_ops("Const_1", matcher0))
matcher2 = match.OpMatcher("AddNoise_2").input_ops(
"Noise_2", match.OpMatcher("Add_2").input_ops("Const_2", matcher1))
top = ge.select_ops("^AddNoise_2$", graph=graph)[0]
self.assertTrue(matcher2(top))
def test_transform_nodedef_fn(self):
transformer = ge.Transformer()
def nodedef_fn(node_def):
if "_foo" in node_def.attr:
del node_def.attr["_foo"]
node_def.attr["_bar"].s = b"bar"
return node_def
my_copy_op_handler = functools.partial(
ge.transform.copy_op_handler, nodedef_fn=nodedef_fn)
transformer.transform_op_handler = my_copy_op_handler
graph = ops.Graph()
transformer(self.graph, graph, "", "")
c0_before = self.graph.get_operation_by_name("Const")
c0_after = graph.get_operation_by_name("Const")
self.assertEquals(c0_before.get_attr("_foo"), b"foo")
with self.assertRaises(ValueError):
c0_after.get_attr("_foo")
all_ops = graph.get_operations()
for op in all_ops:
self.assertEquals(op.get_attr("_bar"), b"bar")
def test_copy_with_input_replacements(self):
with self.graph.as_default():
ten = constant_op.constant(10.0, shape=[10], name="Input")
sgv, _ = ge.copy_with_input_replacements(self.o.op,
{self.o.op.inputs[1]: ten})
with session.Session() as sess:
val = sess.run(sgv.outputs[0])
self.assertNear(
np.linalg.norm(val - np.array([11])), 0.0, ERROR_TOLERANCE)
def test_graph_replace(self):
ops.reset_default_graph()
a = constant_op.constant(1.0, name="a")
b = variables.Variable(1.0, name="b")
eps = constant_op.constant(0.001, name="eps")
c = array_ops.identity(a + b + eps, name="c")
a_new = constant_op.constant(2.0, name="a_new")
c_new = ge.graph_replace(c, {a: a_new})
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
c_val, c_new_val = sess.run([c, c_new])
self.assertNear(c_val, 2.001, ERROR_TOLERANCE)
self.assertNear(c_new_val, 3.001, ERROR_TOLERANCE)
def test_graph_replace_dict(self):
ops.reset_default_graph()
a = constant_op.constant(1.0, name="a")
b = variables.Variable(1.0, name="b")
eps = constant_op.constant(0.001, name="eps")
c = array_ops.identity(a + b + eps, name="c")
a_new = constant_op.constant(2.0, name="a_new")
c_new = ge.graph_replace({"c": c}, {a: a_new})
self.assertTrue(isinstance(c_new, dict))
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
c_val, c_new_val = sess.run([c, c_new])
self.assertTrue(isinstance(c_new_val, dict))
self.assertNear(c_val, 2.001, ERROR_TOLERANCE)
self.assertNear(c_new_val["c"], 3.001, ERROR_TOLERANCE)
def test_graph_replace_ordered_dict(self):
ops.reset_default_graph()
a = constant_op.constant(1.0, name="a")
b = variables.Variable(1.0, name="b")
eps = constant_op.constant(0.001, name="eps")
c = array_ops.identity(a + b + eps, name="c")
a_new = constant_op.constant(2.0, name="a_new")
c_new = ge.graph_replace(collections.OrderedDict({"c": c}), {a: a_new})
self.assertTrue(isinstance(c_new, collections.OrderedDict))
def test_graph_replace_named_tuple(self):
ops.reset_default_graph()
a = constant_op.constant(1.0, name="a")
b = variables.Variable(1.0, name="b")
eps = constant_op.constant(0.001, name="eps")
c = array_ops.identity(a + b + eps, name="c")
a_new = constant_op.constant(2.0, name="a_new")
one_tensor = collections.namedtuple("OneTensor", ["t"])
c_new = ge.graph_replace(one_tensor(c), {a: a_new})
self.assertTrue(isinstance(c_new, one_tensor))
def test_graph_replace_missing(self):
ops.reset_default_graph()
a = constant_op.constant(1.0, name="a")
b = constant_op.constant(2.0, name="b")
c = a + 2 * b
d = constant_op.constant(2.0, name="d")
res = ge.graph_replace([b, c], {a: d})
self.assertEqual(res[0].name, "b:0")
self.assertEqual(res[1].name, "add_1:0")
def test_graph_replace_gradients(self):
ops.reset_default_graph()
w = variables.VariableV1(0.0, name="w")
y = math_ops.multiply(math_ops.multiply(w, w, name="mul1"), w, name="mul2")
g = gradients_impl.gradients(y, w, name="grad")[0]
# Extract the operations.
replacement_ts = {w.value(): g}
original_mul1_grad = (ops.get_default_graph().
get_operation_by_name("grad/mul1_grad/Mul_1"))
# Should not raise exception.
res = ge.graph_replace(g, replacement_ts, dst_scope="res")
# Extract the operations after graph_replace.
result_mul1_grad = (ops.get_default_graph().
get_operation_by_name("res/grad/mul1_grad/Mul_1"))
# Make sure _original_ops are as expected.
self.assertEqual(original_mul1_grad._original_op.name, u"mul1")
self.assertEqual(result_mul1_grad._original_op.name, u"res/mul1")
self.assertNotEqual(res.name, g.name)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
g_val, res_val = sess.run([g, res])
self.assertNear(g_val, 0.0, ERROR_TOLERANCE)
self.assertNear(res_val, 0.0, ERROR_TOLERANCE)
def test_graph_while_loop(self):
graph = ops.Graph()
with graph.as_default():
max_index = array_ops.placeholder(dtype=dtypes.int32, shape=tuple())
index_start = constant_op.constant(1)
sum_start = constant_op.constant(0)
_, result = control_flow_ops.while_loop(
cond=lambda i, unused_s: i <= max_index,
body=lambda i, s: (i + 1, s + i),
loop_vars=[index_start, sum_start])
copied_graph = ops.Graph()
_, copy_info = ge.copy(
graph, dst_graph=copied_graph, dst_scope="imported")
copied_result = copy_info.transformed(result)
copied_max_index = copy_info.transformed(max_index)
with copied_graph.as_default():
with session.Session() as sess:
n = 10
sum_val = sess.run(copied_result, feed_dict={copied_max_index: n})
self.assertEqual(sum_val, 55)
def test_graph_cond(self):
graph = ops.Graph()
with graph.as_default():
choice = array_ops.placeholder(shape=(), dtype=dtypes.bool)
result = control_flow_ops.cond(
choice,
lambda: constant_op.constant(1),
lambda: constant_op.constant(2))
copied_graph = ops.Graph()
_, copy_info = ge.copy(
graph, dst_graph=copied_graph, dst_scope="imported")
copied_result = copy_info.transformed(result)
copied_choice = copy_info.transformed(choice)
with copied_graph.as_default():
with session.Session() as sess:
res = sess.run(copied_result, feed_dict={copied_choice: True})
self.assertEqual(res, 1)
res = sess.run(copied_result, feed_dict={copied_choice: False})
self.assertEqual(res, 2)
if __name__ == "__main__":
test.main()
| apache-2.0 |
alipsgh/tornado | classifier/knn.py | 1 | 3526 | """
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import math
import operator
from classifier.classifier import SuperClassifier
from dictionary.tornado_dictionary import *
def calculate_euclidean_distance(instance_1, instance_2):
summation = 0
for i in range(0, len(instance_1)):
summation += math.pow((float(instance_1[i]) - float(instance_2[i])), 2)
distance = math.sqrt(summation)
return distance
def calculate_hamming_distance(instance_1, instance_2):
n = len(instance_1)
summation = 0
for i in range(0, len(instance_1)):
if instance_1[i] != instance_2[i]:
summation += 1
distance = summation / n
return distance
class KNN(SuperClassifier):
"""This is the implementation of the K Nearest Neighbor algorithm. This classifier keeps the recent
instances of a data stream within a window. For a new instance, its nearest neighbors are located in the window.
Then, using the majority voting approach, the class of the new instance is decided."""
LEARNER_NAME = TornadoDic.K_NN
LEARNER_TYPE = TornadoDic.LOADABLE
LEARNER_CATEGORY = TornadoDic.NUM_CLASSIFIER
def __init__(self, labels, attributes, k=5, window_size=100):
super().__init__(labels, attributes)
self.INSTANCES = []
self.K = k
self.LEARNER_NAME = str(self.K) + " NEAREST NEIGHBORS"
self.__WINDOW_SIZE = window_size
def load(self, instance):
if len(self.INSTANCES) > self.__WINDOW_SIZE:
self.INSTANCES.pop(0)
self.INSTANCES.append(instance)
def test(self, ts_instance):
if self._IS_READY:
distances = []
x_test = ts_instance[0:len(ts_instance) - 1]
y = ts_instance[len(ts_instance) - 1]
for instance in self.INSTANCES:
x = instance[0:len(instance) - 1]
distances.append([instance, ts_instance, calculate_euclidean_distance(x, x_test)])
knn = self.__find_k_nearest_neighbours(distances)
predicted_class = self.__predict(knn)
self.update_confusion_matrix(y, predicted_class)
return predicted_class
else:
print("Please load KNN classifier with some instances first!")
exit()
def __find_k_nearest_neighbours(self, distances):
unsorted_distance_dic = {}
for i in range(0, len(distances)):
unsorted_distance_dic[i] = distances[i][2]
sorted_distance_list = sorted(unsorted_distance_dic.items(), key=operator.itemgetter(1))
knn = {}
num_total_nodes = len(distances)
if num_total_nodes < self.K:
k = num_total_nodes
else:
k = self.K
for i in range(0, k):
knn[i] = distances[sorted_distance_list[i][0]][0]
return knn
@staticmethod
def __predict(knn):
knn_class_dist = {}
for k, v in knn.items():
if knn_class_dist.__contains__(v[len(v) - 1]) is True:
knn_class_dist[v[len(v) - 1]] += 1
else:
knn_class_dist[v[len(v) - 1]] = 1
prediction = max(knn_class_dist.items(), key=operator.itemgetter(1))[0]
return prediction
def reset(self):
super()._reset_stats()
self.INSTANCES = []
| mit |
krafczyk/spack | var/spack/repos/builtin/packages/py-doxypypy/package.py | 5 | 1648 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyDoxypypy(PythonPackage):
"""A Doxygen filter for Python.
A more Pythonic version of doxypy, a Doxygen filter for Python.
"""
homepage = "https://github.com/Feneric/doxypypy"
url = "https://pypi.io/packages/source/d/doxypypy/doxypypy-0.8.8.6.tar.gz"
version('0.8.8.6', '6b3fe4eff5d459400071b626333fe15f')
depends_on('py-setuptools', type='build')
| lgpl-2.1 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.py | 123 | 1429 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
import scipy.ndimage as ndimage
def test_byte_order_median():
"""Regression test for #413: median_filter does not handle bytes orders."""
a = np.arange(9, dtype='<f4').reshape(3, 3)
ref = ndimage.filters.median_filter(a,(3, 3))
b = np.arange(9, dtype='>f4').reshape(3, 3)
t = ndimage.filters.median_filter(b, (3, 3))
assert_array_almost_equal(ref, t)
def test_zoom_output_shape():
"""Ticket #643"""
x = np.arange(12).reshape((3,4))
ndimage.zoom(x, 2, output=np.zeros((6,8)))
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
slices = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3,1240,1240)
a = np.random.rand(np.product(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60,320))
d[:,:257] = 1
d[:,260:] = 1
d[36,257] = 1
d[35,258] = 1
d[35,259] = 1
assert ndimage.label(d, np.ones((3,3)))[1] == 1
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
peterbraden/tensorflow | tensorflow/python/framework/contrib_test.py | 15 | 1436 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that the contrib module shows up properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorflow.python.platform import googletest
class ContribTest(googletest.TestCase):
def testContrib(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert inspect.ismodule(tf.contrib)
def testLayers(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert inspect.ismodule(tf.contrib.layers)
def testLinearOptimizer(self):
# pylint: disable=g-import-not-at-top
import tensorflow as tf
assert inspect.ismodule(tf.contrib.linear_optimizer)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
cassm/will-o-the-wisp | control/coordGen.py | 1 | 3517 | #!/usr/bin/env python
import math
spherical = (( 60, 6), ( 20, 30), ( 45, 48), ( 75, 18), (100, 348),
(300, 6), (260, 348), (285, 18), (315, 48), (340, 30),
(180, 328), (140, 333), (165, 300), (195, 300), (220, 333),
(120, 318), ( 95, 338), (109, 238), (131, 258), (145, 288),
( 71, 58), ( 49, 78), ( 35, 108), ( 60, 138), ( 85, 158),
( 25, 63), ( 00, 68), (335, 63), (249, 96), ( 11, 96),
(311, 78), (289, 58), (275, 158), (300, 138), (325, 108),
(265, 338), (240, 318), (215, 288), (229, 258), (251, 238),
(191, 276), (169, 243), (155, 248), (180, 243), (205, 276),
(135, 228), (105, 198), ( 80, 168), (120, 186), (160, 210),
( 40, 153), ( 15, 120), (345, 120), (320, 153), ( 5, 148),
(280, 168), (255, 198), (225, 228), (200, 210), (240, 186))
# lantern radii
# Small = 0.15
# Medium = 0.2
# Large = 0.25? check
lantern_locations = ((0.0, 0.0, 0.0),
(2.0, 0.0, 0.0), (0.0, 2.0, 0.0),
(-2.0, 0.0, 0.0), (0.0, -2.0, 0.0),
(0.88, 0.88, 0.0), (-0.88, 0.88, 0.0),
(-0.88, -0.88, 0.0), (0.88, -0.88, 0.0))
lantern_radii = (0.25, 0.2, 0.2, 0.2, 0.2, 0.15, 0.15, 0.15, 0.15)
# lantern_orientations = (270, 345, 90, 50, 110, 270, 270, 270, 290)
lantern_orientations = (0, 0, 0, 0, 0, 0, 0, 0, 0)
global_cartesian = []
origin_delta = []
local_cartesian = []
spherical_radians = []
normalised_cartesian = []
for lantern in range(len(lantern_orientations)):
lantern_coords = []
for coordinate in spherical:
phi = math.radians(coordinate[1])
theta = math.radians(coordinate[0]-lantern_orientations[lantern])
lantern_coords.append((phi, theta))
# sphericalRadians.append((round(phi, 4), round(theta, 4)))
spherical_radians.append(lantern_coords)
for phi, theta in spherical_radians[0]:
x = round(math.sin(phi%3.1416) * math.cos(theta), 4)
y = round(math.sin(phi%3.1416) * math.sin(theta), 4)
z = round(math.cos(phi), 4)
normalised_cartesian.append((x, y, z))
for lanternIndex, location in enumerate(lantern_locations):
lantern_cartesian = []
for phi, theta in spherical_radians[lanternIndex]:
x = round(lantern_radii[lanternIndex] * math.sin(phi%3.1416) * math.cos(theta), 4)
y = round(lantern_radii[lanternIndex] * math.sin(phi%3.1416) * math.sin(theta), 4)
z = round(lantern_radii[lanternIndex] * math.cos(phi), 4)
lantern_cartesian.append((x, y, z))
global_cartesian.append((location[0] + x, location[1] + y, location[2] + z))
if location == (0, 0, 0):
origin_delta.append(0)
else:
origin_delta.append(math.sqrt((location[0] + x)**2 + (location[1] + y)**2 + (location[2] + z)**2))
local_cartesian.append(lantern_cartesian)
f = open("coords.py", "w")
f.write("spherical = " + str(spherical_radians) + "\n\nglobal_cartesian = " + str(global_cartesian) + "\n\norigin_delta = " + str(origin_delta) + "\n\nlocal_cartesian = " + str(local_cartesian) + "\n\nnormalised_cartesian = " + str(normalised_cartesian) + "\n\nlantern_locations = " + str(lantern_locations) + "\n")
f.close()
f = open("lanterns.json", "w")
lines = []
for coordinate in global_cartesian:
lines.append(" {\"point\": [%0.4f, %0.4f, %0.4f]}" % (coordinate[0], coordinate[1], coordinate[2]))
f.write('[\n' + ',\n'.join(lines) + '\n]')
f.close()
| gpl-3.0 |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_size05.py | 9 | 1575 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_size04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_5_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [73773440, 73774976]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart, {'x_offset': 8, 'y_offset': 9})
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
arthur-wsw/pinax-messages | pinax/messages/models.py | 1 | 3535 | from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from .signals import message_sent
from .utils import cached_attribute
@python_2_unicode_compatible
class Thread(models.Model):
subject = models.CharField(max_length=150)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through="UserThread")
@classmethod
def inbox(cls, user):
return cls.objects.filter(userthread__user=user, userthread__deleted=False)
@classmethod
def unread(cls, user):
return cls.objects.filter(
userthread__user=user,
userthread__deleted=False,
userthread__unread=True
)
def __str__(self):
return "{}: {}".format(
self.subject,
", ".join([str(user) for user in self.users.all()])
)
def get_absolute_url(self):
return reverse("pinax_messages:thread_detail", args=[self.pk])
@property
@cached_attribute
def first_message(self):
return self.messages.all()[0]
@property
@cached_attribute
def latest_message(self):
return self.messages.order_by("-sent_at")[0]
@classmethod
def ordered(cls, objs):
"""
Returns the iterable ordered the correct way, this is a class method
because we don"t know what the type of the iterable will be.
"""
objs = list(objs)
objs.sort(key=lambda o: o.latest_message.sent_at, reverse=True)
return objs
class UserThread(models.Model):
thread = models.ForeignKey(Thread)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
unread = models.BooleanField()
deleted = models.BooleanField()
class Message(models.Model):
thread = models.ForeignKey(Thread, related_name="messages")
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="sent_messages")
sent_at = models.DateTimeField(default=timezone.now)
content = models.TextField()
@classmethod
def new_reply(cls, thread, user, content):
"""
Create a new reply for an existing Thread.
Mark thread as unread for all other participants, and
mark thread as read by replier.
"""
msg = cls.objects.create(thread=thread, sender=user, content=content)
thread.userthread_set.exclude(user=user).update(deleted=False, unread=True)
thread.userthread_set.filter(user=user).update(deleted=False, unread=False)
message_sent.send(sender=cls, message=msg, thread=thread, reply=True)
return msg
@classmethod
def new_message(cls, from_user, to_users, subject, content):
"""
Create a new Message and Thread.
Mark thread as unread for all recipients, and
mark thread as read and deleted from inbox by creator.
"""
thread = Thread.objects.create(subject=subject)
for user in to_users:
thread.userthread_set.create(user=user, deleted=False, unread=True)
thread.userthread_set.create(user=from_user, deleted=True, unread=False)
msg = cls.objects.create(thread=thread, sender=from_user, content=content)
message_sent.send(sender=cls, message=msg, thread=thread, reply=False)
return msg
class Meta:
ordering = ("sent_at",)
def get_absolute_url(self):
return self.thread.get_absolute_url()
| mit |
OESF/Linaro-Android_LinaroSprint2011Q1 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
cevaris/pants | tests/python/pants_test/targets/test_java_agent.py | 10 | 3426 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.base_test import BaseTest
class JavaAgentTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'java_agent': JavaAgent,
},
)
def create_agent(self, name, **kwargs):
args = {'name': name, 'sources': []}
args.update(**kwargs)
formatted_args = ', '.join('{name}={value!r}'.format(name=k, value=v) for k, v in args.items())
target = 'java_agent({args})'.format(args=formatted_args)
self.add_to_build_file('{path}'.format(path=name), target)
return self.target('{path}:{name}'.format(path=name, name=name))
def test_required(self):
with self.assertRaises(TargetDefinitionException):
self.create_agent('name', premain=None, agent_class=None)
def test_minimal(self):
self.assertEqual('jack', self.create_agent('one', premain='jack').premain)
self.assertEqual('jill', self.create_agent('two', agent_class='jill').agent_class)
def test_defaults(self):
def assert_bool_defaults(tgt):
self.assertFalse(tgt.can_redefine)
self.assertFalse(tgt.can_retransform)
self.assertFalse(tgt.can_set_native_method_prefix)
agent = self.create_agent('one', premain='jack')
self.assertEqual('jack', agent.premain)
self.assertIsNone(agent.agent_class)
assert_bool_defaults(agent)
agent = self.create_agent('two', agent_class='jill')
self.assertEqual('jill', agent.agent_class)
self.assertIsNone(agent.premain)
assert_bool_defaults(agent)
def test_can_redefine(self):
agent = self.create_agent('one', premain='jack', can_redefine=True)
self.assertTrue(agent.can_redefine)
self.assertFalse(agent.can_retransform)
self.assertFalse(agent.can_set_native_method_prefix)
agent = self.create_agent('two', premain='jack', can_redefine=False)
self.assertFalse(agent.can_redefine)
self.assertFalse(agent.can_retransform)
self.assertFalse(agent.can_set_native_method_prefix)
def test_can_retransform(self):
agent = self.create_agent('one', premain='jack', can_retransform=True)
self.assertTrue(agent.can_retransform)
self.assertFalse(agent.can_redefine)
self.assertFalse(agent.can_set_native_method_prefix)
agent = self.create_agent('two', premain='jack', can_retransform=False)
self.assertFalse(agent.can_retransform)
self.assertFalse(agent.can_redefine)
self.assertFalse(agent.can_set_native_method_prefix)
def test_can_set_native_method_prefix(self):
agent = self.create_agent('one', premain='jack', can_set_native_method_prefix=True)
self.assertTrue(agent.can_set_native_method_prefix)
self.assertFalse(agent.can_redefine)
self.assertFalse(agent.can_retransform)
agent = self.create_agent('two', premain='jack', can_set_native_method_prefix=False)
self.assertFalse(agent.can_set_native_method_prefix)
self.assertFalse(agent.can_redefine)
self.assertFalse(agent.can_retransform)
| apache-2.0 |
raven47git/readthedocs.org | readthedocs/restapi/permissions.py | 12 | 1615 | from rest_framework import permissions
from privacy.backend import AdminPermission
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.users.all()
class CommentModeratorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, comment):
if request.method in permissions.SAFE_METHODS:
return True # TODO: Similar logic to #1084
else:
return AdminPermission.is_admin(request.user, comment.node.project)
class RelatedProjectIsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.project.users.all()
class APIPermission(permissions.IsAuthenticatedOrReadOnly):
'''
This permission should allow authenicated users readonly access to the API,
and allow admin users write access. This should be used on API resources
that need to implement write operations to resources that were based on the
ReadOnlyViewSet
'''
def has_object_permission(self, request, view, obj):
has_perm = super(APIPermission, self).has_object_permission(
request, view, obj)
return has_perm or (request.user and request.user.is_staff)
| mit |
pixelgremlins/ztruck | dj/lib/python2.7/site-packages/django/conf/locale/__init__.py | 82 | 12130 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
"""
LANG_INFO is a dictionary structure to provide meta information about languages.
About name_local: capitalize it as if your language name was appearing
inside a sentence in your language.
The 'fallback' key can be used to specify a special fallback logic which doesn't
follow the traditional 'fr-ca' -> 'fr' fallback logic.
"""
LANG_INFO = {
'af': {
'bidi': False,
'code': 'af',
'name': 'Afrikaans',
'name_local': 'Afrikaans',
},
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': 'العربيّة',
},
'ast': {
'bidi': False,
'code': 'ast',
'name': 'Asturian',
'name_local': 'asturianu',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': 'Azərbaycanca',
},
'be': {
'bidi': False,
'code': 'be',
'name': 'Belarusian',
'name_local': 'беларуская',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': 'български',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': 'বাংলা',
},
'br': {
'bidi': False,
'code': 'br',
'name': 'Breton',
'name_local': 'brezhoneg',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': 'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': 'català',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': 'česky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': 'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': 'dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': 'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': 'Ελληνικά',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': 'English',
},
'en-au': {
'bidi': False,
'code': 'en-au',
'name': 'Australian English',
'name_local': 'Australian English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': 'British English',
},
'eo': {
'bidi': False,
'code': 'eo',
'name': 'Esperanto',
'name_local': 'Esperanto',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': 'español',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': 'español de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': 'español de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': 'español de Nicaragua',
},
'es-ve': {
'bidi': False,
'code': 'es-ve',
'name': 'Venezuelan Spanish',
'name_local': 'español de Venezuela',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': 'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': 'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': 'فارسی',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': 'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': 'français',
},
'fy': {
'bidi': False,
'code': 'fy',
'name': 'Frisian',
'name_local': 'frysk',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': 'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': 'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': 'עברית',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': 'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': 'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': 'Magyar',
},
'ia': {
'bidi': False,
'code': 'ia',
'name': 'Interlingua',
'name_local': 'Interlingua',
},
'io': {
'bidi': False,
'code': 'io',
'name': 'Ido',
'name_local': 'ido',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': 'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': 'Íslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': 'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': '日本語',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': 'ქართული',
},
'kk': {
'bidi': False,
'code': 'kk',
'name': 'Kazakh',
'name_local': 'Қазақ',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': 'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': 'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': '한국어',
},
'lb': {
'bidi': False,
'code': 'lb',
'name': 'Luxembourgish',
'name_local': 'Lëtzebuergesch',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': 'Lietuviškai',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': 'latviešu',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': 'Македонски',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': 'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': 'Mongolian',
},
'mr': {
'bidi': False,
'code': 'mr',
'name': 'Marathi',
'name_local': 'मराठी',
},
'my': {
'bidi': False,
'code': 'my',
'name': 'Burmese',
'name_local': 'မြန်မာဘာသာ',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': 'norsk (bokmål)',
},
'ne': {
'bidi': False,
'code': 'ne',
'name': 'Nepali',
'name_local': 'नेपाली',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': 'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': 'norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': 'norsk',
},
'os': {
'bidi': False,
'code': 'os',
'name': 'Ossetic',
'name_local': 'Ирон',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': 'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': 'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': 'Português',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': 'Português Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': 'Română',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': 'Русский',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': 'slovenský',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': 'Slovenščina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': 'shqip',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': 'српски',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': 'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': 'svenska',
},
'sw': {
'bidi': False,
'code': 'sw',
'name': 'Swahili',
'name_local': 'Kiswahili',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': 'தமிழ்',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': 'తెలుగు',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': 'ภาษาไทย',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': 'Türkçe',
},
'tt': {
'bidi': False,
'code': 'tt',
'name': 'Tatar',
'name_local': 'Татарча',
},
'udm': {
'bidi': False,
'code': 'udm',
'name': 'Udmurt',
'name_local': 'Удмурт',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': 'Українська',
},
'ur': {
'bidi': True,
'code': 'ur',
'name': 'Urdu',
'name_local': 'اردو',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': 'Tiếng Việt',
},
'zh-cn': {
'fallback': ['zh-hans'],
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hans': {
'bidi': False,
'code': 'zh-hans',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hant': {
'bidi': False,
'code': 'zh-hant',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
'zh-hk': {
'fallback': ['zh-hant'],
},
'zh-mo': {
'fallback': ['zh-hant'],
},
'zh-my': {
'fallback': ['zh-hans'],
},
'zh-sg': {
'fallback': ['zh-hans'],
},
'zh-tw': {
'fallback': ['zh-hant'],
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
}
| apache-2.0 |
badloop/SickRage | sickbeard/databases/cache_db.py | 6 | 3682 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from sickbeard import db
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema(db.SchemaUpgrade):
def test(self):
return self.hasTable("db_version")
def execute(self):
queries = [
("CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);",),
("CREATE TABLE lastSearch (provider TEXT, time NUMERIC);",),
("CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER KEY, show_name TEXT, season NUMERIC DEFAULT -1, custom NUMERIC DEFAULT 0);",),
("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);",),
("CREATE TABLE network_timezones (network_name TEXT PRIMARY KEY, timezone TEXT);",),
("CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER);",),
("CREATE TABLE db_version (db_version INTEGER);",),
("INSERT INTO db_version(db_version) VALUES (1);",),
]
for query in queries:
if len(query) == 1:
self.connection.action(query[0])
else:
self.connection.action(query[0], query[1:])
class AddSceneExceptions(InitialSchema):
def test(self):
return self.hasTable("scene_exceptions")
def execute(self):
self.connection.action(
"CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER KEY, show_name TEXT);")
class AddSceneNameCache(AddSceneExceptions):
def test(self):
return self.hasTable("scene_names")
def execute(self):
self.connection.action("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);")
class AddNetworkTimezones(AddSceneNameCache):
def test(self):
return self.hasTable("network_timezones")
def execute(self):
self.connection.action("CREATE TABLE network_timezones (network_name TEXT PRIMARY KEY, timezone TEXT);")
class AddLastSearch(AddNetworkTimezones):
def test(self):
return self.hasTable("lastSearch")
def execute(self):
self.connection.action("CREATE TABLE lastSearch (provider TEXT, time NUMERIC);")
class AddSceneExceptionsSeasons(AddLastSearch):
def test(self):
return self.hasColumn("scene_exceptions", "season")
def execute(self):
self.addColumn("scene_exceptions", "season", "NUMERIC", -1)
class AddSceneExceptionsCustom(AddSceneExceptionsSeasons):
def test(self):
return self.hasColumn("scene_exceptions", "custom")
def execute(self):
self.addColumn("scene_exceptions", "custom", "NUMERIC", 0)
class AddSceneExceptionsRefresh(AddSceneExceptionsCustom):
def test(self):
return self.hasTable("scene_exceptions_refresh")
def execute(self):
self.connection.action(
"CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER);")
| gpl-3.0 |
MCMic/Sick-Beard | lib/enzyme/asf.py | 180 | 15818 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
from exceptions import ParseError
import core
import logging
import string
import struct
__all__ = ['Parser']
# get logging object
log = logging.getLogger(__name__)
def _guid(input):
# Remove any '-'
s = string.join(string.split(input, '-'), '')
r = ''
if len(s) != 32:
return ''
for i in range(0, 16):
r += chr(int(s[2 * i:2 * i + 2], 16))
guid = struct.unpack('>IHHBB6s', r)
return guid
GUIDS = {
'ASF_Header_Object' : _guid('75B22630-668E-11CF-A6D9-00AA0062CE6C'),
'ASF_Data_Object' : _guid('75B22636-668E-11CF-A6D9-00AA0062CE6C'),
'ASF_Simple_Index_Object' : _guid('33000890-E5B1-11CF-89F4-00A0C90349CB'),
'ASF_Index_Object' : _guid('D6E229D3-35DA-11D1-9034-00A0C90349BE'),
'ASF_Media_Object_Index_Object' : _guid('FEB103F8-12AD-4C64-840F-2A1D2F7AD48C'),
'ASF_Timecode_Index_Object' : _guid('3CB73FD0-0C4A-4803-953D-EDF7B6228F0C'),
'ASF_File_Properties_Object' : _guid('8CABDCA1-A947-11CF-8EE4-00C00C205365'),
'ASF_Stream_Properties_Object' : _guid('B7DC0791-A9B7-11CF-8EE6-00C00C205365'),
'ASF_Header_Extension_Object' : _guid('5FBF03B5-A92E-11CF-8EE3-00C00C205365'),
'ASF_Codec_List_Object' : _guid('86D15240-311D-11D0-A3A4-00A0C90348F6'),
'ASF_Script_Command_Object' : _guid('1EFB1A30-0B62-11D0-A39B-00A0C90348F6'),
'ASF_Marker_Object' : _guid('F487CD01-A951-11CF-8EE6-00C00C205365'),
'ASF_Bitrate_Mutual_Exclusion_Object' : _guid('D6E229DC-35DA-11D1-9034-00A0C90349BE'),
'ASF_Error_Correction_Object' : _guid('75B22635-668E-11CF-A6D9-00AA0062CE6C'),
'ASF_Content_Description_Object' : _guid('75B22633-668E-11CF-A6D9-00AA0062CE6C'),
'ASF_Extended_Content_Description_Object' : _guid('D2D0A440-E307-11D2-97F0-00A0C95EA850'),
'ASF_Content_Branding_Object' : _guid('2211B3FA-BD23-11D2-B4B7-00A0C955FC6E'),
'ASF_Stream_Bitrate_Properties_Object' : _guid('7BF875CE-468D-11D1-8D82-006097C9A2B2'),
'ASF_Content_Encryption_Object' : _guid('2211B3FB-BD23-11D2-B4B7-00A0C955FC6E'),
'ASF_Extended_Content_Encryption_Object' : _guid('298AE614-2622-4C17-B935-DAE07EE9289C'),
'ASF_Alt_Extended_Content_Encryption_Obj' : _guid('FF889EF1-ADEE-40DA-9E71-98704BB928CE'),
'ASF_Digital_Signature_Object' : _guid('2211B3FC-BD23-11D2-B4B7-00A0C955FC6E'),
'ASF_Padding_Object' : _guid('1806D474-CADF-4509-A4BA-9AABCB96AAE8'),
'ASF_Extended_Stream_Properties_Object' : _guid('14E6A5CB-C672-4332-8399-A96952065B5A'),
'ASF_Advanced_Mutual_Exclusion_Object' : _guid('A08649CF-4775-4670-8A16-6E35357566CD'),
'ASF_Group_Mutual_Exclusion_Object' : _guid('D1465A40-5A79-4338-B71B-E36B8FD6C249'),
'ASF_Stream_Prioritization_Object' : _guid('D4FED15B-88D3-454F-81F0-ED5C45999E24'),
'ASF_Bandwidth_Sharing_Object' : _guid('A69609E6-517B-11D2-B6AF-00C04FD908E9'),
'ASF_Language_List_Object' : _guid('7C4346A9-EFE0-4BFC-B229-393EDE415C85'),
'ASF_Metadata_Object' : _guid('C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA'),
'ASF_Metadata_Library_Object' : _guid('44231C94-9498-49D1-A141-1D134E457054'),
'ASF_Index_Parameters_Object' : _guid('D6E229DF-35DA-11D1-9034-00A0C90349BE'),
'ASF_Media_Object_Index_Parameters_Obj' : _guid('6B203BAD-3F11-4E84-ACA8-D7613DE2CFA7'),
'ASF_Timecode_Index_Parameters_Object' : _guid('F55E496D-9797-4B5D-8C8B-604DFE9BFB24'),
'ASF_Audio_Media' : _guid('F8699E40-5B4D-11CF-A8FD-00805F5C442B'),
'ASF_Video_Media' : _guid('BC19EFC0-5B4D-11CF-A8FD-00805F5C442B'),
'ASF_Command_Media' : _guid('59DACFC0-59E6-11D0-A3AC-00A0C90348F6'),
'ASF_JFIF_Media' : _guid('B61BE100-5B4E-11CF-A8FD-00805F5C442B'),
'ASF_Degradable_JPEG_Media' : _guid('35907DE0-E415-11CF-A917-00805F5C442B'),
'ASF_File_Transfer_Media' : _guid('91BD222C-F21C-497A-8B6D-5AA86BFC0185'),
'ASF_Binary_Media' : _guid('3AFB65E2-47EF-40F2-AC2C-70A90D71D343'),
'ASF_Web_Stream_Media_Subtype' : _guid('776257D4-C627-41CB-8F81-7AC7FF1C40CC'),
'ASF_Web_Stream_Format' : _guid('DA1E6B13-8359-4050-B398-388E965BF00C'),
'ASF_No_Error_Correction' : _guid('20FB5700-5B55-11CF-A8FD-00805F5C442B'),
'ASF_Audio_Spread' : _guid('BFC3CD50-618F-11CF-8BB2-00AA00B4E220')}
class Asf(core.AVContainer):
"""
ASF video parser. The ASF format is also used for Microsft Windows
Media files like wmv.
"""
def __init__(self, file):
core.AVContainer.__init__(self)
self.mime = 'video/x-ms-asf'
self.type = 'asf format'
self._languages = []
self._extinfo = {}
h = file.read(30)
if len(h) < 30:
raise ParseError()
(guidstr, objsize, objnum, reserved1, \
reserved2) = struct.unpack('<16sQIBB', h)
guid = self._parseguid(guidstr)
if (guid != GUIDS['ASF_Header_Object']):
raise ParseError()
if reserved1 != 0x01 or reserved2 != 0x02:
raise ParseError()
log.debug(u'Header size: %d / %d objects' % (objsize, objnum))
header = file.read(objsize - 30)
for _ in range(0, objnum):
h = self._getnextheader(header)
header = header[h[1]:]
del self._languages
del self._extinfo
def _findstream(self, id):
for stream in self.video + self.audio:
if stream.id == id:
return stream
def _apply_extinfo(self, streamid):
stream = self._findstream(streamid)
if not stream or streamid not in self._extinfo:
return
stream.bitrate, stream.fps, langid, metadata = self._extinfo[streamid]
if langid is not None and langid >= 0 and langid < len(self._languages):
stream.language = self._languages[langid]
if metadata:
stream._appendtable('ASFMETADATA', metadata)
def _parseguid(self, string):
return struct.unpack('<IHHBB6s', string[:16])
def _parsekv(self, s):
pos = 0
(descriptorlen,) = struct.unpack('<H', s[pos:pos + 2])
pos += 2
descriptorname = s[pos:pos + descriptorlen]
pos += descriptorlen
descriptortype, valuelen = struct.unpack('<HH', s[pos:pos + 4])
pos += 4
descriptorvalue = s[pos:pos + valuelen]
pos += valuelen
value = None
if descriptortype == 0x0000:
# Unicode string
value = descriptorvalue
elif descriptortype == 0x0001:
# Byte Array
value = descriptorvalue
elif descriptortype == 0x0002:
# Bool (?)
value = struct.unpack('<I', descriptorvalue)[0] != 0
elif descriptortype == 0x0003:
# DWORD
value = struct.unpack('<I', descriptorvalue)[0]
elif descriptortype == 0x0004:
# QWORD
value = struct.unpack('<Q', descriptorvalue)[0]
elif descriptortype == 0x0005:
# WORD
value = struct.unpack('<H', descriptorvalue)[0]
else:
log.debug(u'Unknown Descriptor Type %d' % descriptortype)
return (pos, descriptorname, value)
def _parsekv2(self, s):
pos = 0
strno, descriptorlen, descriptortype, valuelen = struct.unpack('<2xHHHI', s[pos:pos + 12])
pos += 12
descriptorname = s[pos:pos + descriptorlen]
pos += descriptorlen
descriptorvalue = s[pos:pos + valuelen]
pos += valuelen
value = None
if descriptortype == 0x0000:
# Unicode string
value = descriptorvalue
elif descriptortype == 0x0001:
# Byte Array
value = descriptorvalue
elif descriptortype == 0x0002:
# Bool
value = struct.unpack('<H', descriptorvalue)[0] != 0
pass
elif descriptortype == 0x0003:
# DWORD
value = struct.unpack('<I', descriptorvalue)[0]
elif descriptortype == 0x0004:
# QWORD
value = struct.unpack('<Q', descriptorvalue)[0]
elif descriptortype == 0x0005:
# WORD
value = struct.unpack('<H', descriptorvalue)[0]
else:
log.debug(u'Unknown Descriptor Type %d' % descriptortype)
return (pos, descriptorname, value, strno)
def _getnextheader(self, s):
r = struct.unpack('<16sQ', s[:24])
(guidstr, objsize) = r
guid = self._parseguid(guidstr)
if guid == GUIDS['ASF_File_Properties_Object']:
log.debug(u'File Properties Object')
val = struct.unpack('<16s6Q4I', s[24:24 + 80])
(fileid, size, date, packetcount, duration, \
senddur, preroll, flags, minpack, maxpack, maxbr) = \
val
# FIXME: parse date to timestamp
self.length = duration / 10000000.0
elif guid == GUIDS['ASF_Stream_Properties_Object']:
log.debug(u'Stream Properties Object [%d]' % objsize)
streamtype = self._parseguid(s[24:40])
errortype = self._parseguid(s[40:56])
offset, typelen, errorlen, flags = struct.unpack('<QIIH', s[56:74])
strno = flags & 0x7f
encrypted = flags >> 15
if encrypted:
self._set('encrypted', True)
if streamtype == GUIDS['ASF_Video_Media']:
vi = core.VideoStream()
vi.width, vi.height, depth, codec, = struct.unpack('<4xII2xH4s', s[89:89 + 20])
vi.codec = codec
vi.id = strno
self.video.append(vi)
elif streamtype == GUIDS['ASF_Audio_Media']:
ai = core.AudioStream()
twocc, ai.channels, ai.samplerate, bitrate, block, \
ai.samplebits, = struct.unpack('<HHIIHH', s[78:78 + 16])
ai.bitrate = 8 * bitrate
ai.codec = twocc
ai.id = strno
self.audio.append(ai)
self._apply_extinfo(strno)
elif guid == GUIDS['ASF_Extended_Stream_Properties_Object']:
streamid, langid, frametime = struct.unpack('<HHQ', s[72:84])
(bitrate,) = struct.unpack('<I', s[40:40 + 4])
if streamid not in self._extinfo:
self._extinfo[streamid] = [None, None, None, {}]
if frametime == 0:
# Problaby VFR, report as 1000fps (which is what MPlayer does)
frametime = 10000.0
self._extinfo[streamid][:3] = [bitrate, 10000000.0 / frametime, langid]
self._apply_extinfo(streamid)
elif guid == GUIDS['ASF_Header_Extension_Object']:
log.debug(u'ASF_Header_Extension_Object %d' % objsize)
size = struct.unpack('<I', s[42:46])[0]
data = s[46:46 + size]
while len(data):
log.debug(u'Sub:')
h = self._getnextheader(data)
data = data[h[1]:]
elif guid == GUIDS['ASF_Codec_List_Object']:
log.debug(u'List Object')
pass
elif guid == GUIDS['ASF_Error_Correction_Object']:
log.debug(u'Error Correction')
pass
elif guid == GUIDS['ASF_Content_Description_Object']:
log.debug(u'Content Description Object')
val = struct.unpack('<5H', s[24:24 + 10])
pos = 34
strings = []
for i in val:
ss = s[pos:pos + i].replace('\0', '').lstrip().rstrip()
strings.append(ss)
pos += i
# Set empty strings to None
strings = [x or None for x in strings]
self.title, self.artist, self.copyright, self.caption, rating = strings
elif guid == GUIDS['ASF_Extended_Content_Description_Object']:
(count,) = struct.unpack('<H', s[24:26])
pos = 26
descriptor = {}
for i in range(0, count):
# Read additional content descriptors
d = self._parsekv(s[pos:])
pos += d[0]
descriptor[d[1]] = d[2]
self._appendtable('ASFDESCRIPTOR', descriptor)
elif guid == GUIDS['ASF_Metadata_Object']:
(count,) = struct.unpack('<H', s[24:26])
pos = 26
streams = {}
for i in range(0, count):
# Read additional content descriptors
size, key, value, strno = self._parsekv2(s[pos:])
if strno not in streams:
streams[strno] = {}
streams[strno][key] = value
pos += size
for strno, metadata in streams.items():
if strno not in self._extinfo:
self._extinfo[strno] = [None, None, None, {}]
self._extinfo[strno][3].update(metadata)
self._apply_extinfo(strno)
elif guid == GUIDS['ASF_Language_List_Object']:
count = struct.unpack('<H', s[24:26])[0]
pos = 26
for i in range(0, count):
idlen = struct.unpack('<B', s[pos:pos + 1])[0]
idstring = s[pos + 1:pos + 1 + idlen]
idstring = unicode(idstring, 'utf-16').replace('\0', '')
log.debug(u'Language: %d/%d: %r' % (i + 1, count, idstring))
self._languages.append(idstring)
pos += 1 + idlen
elif guid == GUIDS['ASF_Stream_Bitrate_Properties_Object']:
# This record contains stream bitrate with payload overhead. For
# audio streams, we should have the average bitrate from
# ASF_Stream_Properties_Object. For video streams, we get it from
# ASF_Extended_Stream_Properties_Object. So this record is not
# used.
pass
elif guid == GUIDS['ASF_Content_Encryption_Object'] or \
guid == GUIDS['ASF_Extended_Content_Encryption_Object']:
self._set('encrypted', True)
else:
# Just print the type:
for h in GUIDS.keys():
if GUIDS[h] == guid:
log.debug(u'Unparsed %r [%d]' % (h, objsize))
break
else:
u = "%.8X-%.4X-%.4X-%.2X%.2X-%s" % guid
log.debug(u'unknown: len=%d [%d]' % (len(u), objsize))
return r
class AsfAudio(core.AudioStream):
"""
ASF audio parser for wma files.
"""
def __init__(self):
core.AudioStream.__init__(self)
self.mime = 'audio/x-ms-asf'
self.type = 'asf format'
def Parser(file):
"""
Wrapper around audio and av content.
"""
asf = Asf(file)
if not len(asf.audio) or len(asf.video):
# AV container
return asf
# No video but audio streams. Handle has audio core
audio = AsfAudio()
for key in audio._keys:
if key in asf._keys:
if not getattr(audio, key, None):
setattr(audio, key, getattr(asf, key))
return audio
| gpl-3.0 |
mtnman38/Aggregate | aggForFoundation/dist/aggregate.app/Contents/Resources/lib/python2.7/email/quoprimime.py | 246 | 10848 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: email-sig@python.org
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
safely encode text that is in a character set similar to the 7-bit US ASCII
character set, but that includes some 8-bit characters that are normally not
allowed in email bodies or headers.
Quoted-printable is very space-inefficient for encoding binary files; use the
email.base64mime module for that instead.
This module provides an interface to encode and decode both headers and bodies
with quoted-printable encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:/From:/Cc: etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.header module.
"""
__all__ = [
'body_decode',
'body_encode',
'body_quopri_check',
'body_quopri_len',
'decode',
'decodestring',
'encode',
'encodestring',
'header_decode',
'header_encode',
'header_quopri_check',
'header_quopri_len',
'quote',
'unquote',
]
import re
from string import hexdigits
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
# See also Charset.py
MISC_LEN = 7
hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
bqre = re.compile(r'[^ !-<>-~\t]')
# Helpers
def header_quopri_check(c):
"""Return True if the character should be escaped with header quopri."""
return bool(hqre.match(c))
def body_quopri_check(c):
"""Return True if the character should be escaped with body quopri."""
return bool(bqre.match(c))
def header_quopri_len(s):
"""Return the length of str when it is encoded with header quopri."""
count = 0
for c in s:
if hqre.match(c):
count += 3
else:
count += 1
return count
def body_quopri_len(str):
"""Return the length of str when it is encoded with body quopri."""
count = 0
for c in str:
if bqre.match(c):
count += 3
else:
count += 1
return count
def _max_append(L, s, maxlen, extra=''):
if not L:
L.append(s.lstrip())
elif len(L[-1]) + len(s) <= maxlen:
L[-1] += extra + s
else:
L.append(s.lstrip())
def unquote(s):
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
return chr(int(s[1:3], 16))
def quote(c):
return "=%02X" % ord(c)
def header_encode(header, charset="iso-8859-1", keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with quoted-printable (like) encoding.
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
used specifically for email header fields to allow charsets with mostly 7
bit characters (and some 8 bit) to remain more or less readable in non-RFC
2045 aware mail clients.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
The resulting string will be in the form:
"=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
=?charset?q?Silly_=C8nglish_Kn=EEghts?="
with each line wrapped safely at, at most, maxlinelen characters (defaults
to 76 characters). If maxlinelen is None, the entire string is encoded in
one chunk with no splitting.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Quopri encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
quoted = []
if maxlinelen is None:
# An obnoxiously large number that's good enough
max_encoded = 100000
else:
max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
_max_append(quoted, '_', max_encoded)
# These characters can be included verbatim
elif not hqre.match(c):
_max_append(quoted, c, max_encoded)
# Otherwise, replace with hex value like =E2
else:
_max_append(quoted, "=%02X" % ord(c), max_encoded)
# Now add the RFC chrome to each encoded chunk and glue the chunks
# together. BAW: should we be able to specify the leading whitespace in
# the joiner?
joiner = eol + ' '
return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
def encode(body, binary=False, maxlinelen=76, eol=NL):
"""Encode with quoted-printable, wrapping at maxlinelen characters.
If binary is False (the default), end-of-line characters will be converted
to the canonical email end-of-line sequence \\r\\n. Otherwise they will
be left verbatim.
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\\r\\n" if you will be using the result of this function directly
in an email.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters). Long lines will have the `soft linefeed' quoted-printable
character "=" appended to them, so the decoded text will be identical to
the original text.
"""
if not body:
return body
if not binary:
body = fix_eols(body)
# BAW: We're accumulating the body text by string concatenation. That
# can't be very efficient, but I don't have time now to rewrite it. It
# just feels like this algorithm could be more efficient.
encoded_body = ''
lineno = -1
# Preserve line endings here so we can check later to see an eol needs to
# be added to the output later.
lines = body.splitlines(1)
for line in lines:
# But strip off line-endings for processing this line.
if line.endswith(CRLF):
line = line[:-2]
elif line[-1] in CRLF:
line = line[:-1]
lineno += 1
encoded_line = ''
prev = None
linelen = len(line)
# Now we need to examine every character to see if it needs to be
# quopri encoded. BAW: again, string concatenation is inefficient.
for j in range(linelen):
c = line[j]
prev = c
if bqre.match(c):
c = quote(c)
elif j+1 == linelen:
# Check for whitespace at end of line; special case
if c not in ' \t':
encoded_line += c
prev = c
continue
# Check to see to see if the line has reached its maximum length
if len(encoded_line) + len(c) >= maxlinelen:
encoded_body += encoded_line + '=' + eol
encoded_line = ''
encoded_line += c
# Now at end of line..
if prev and prev in ' \t':
# Special case for whitespace at end of file
if lineno + 1 == len(lines):
prev = quote(prev)
if len(encoded_line) + len(prev) > maxlinelen:
encoded_body += encoded_line + '=' + eol + prev
else:
encoded_body += encoded_line + prev
# Just normal whitespace at end of line
else:
encoded_body += encoded_line + prev + '=' + eol
encoded_line = ''
# Now look at the line we just finished and it has a line ending, we
# need to add eol to the end of the line.
if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
encoded_body += encoded_line + eol
else:
encoded_body += encoded_line
encoded_line = ''
return encoded_body
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
# BAW: I'm not sure if the intent was for the signature of this function to be
# the same as base64MIME.decode() or not...
def decode(encoded, eol=NL):
"""Decode a quoted-printable string.
Lines are separated with eol, which defaults to \\n.
"""
if not encoded:
return encoded
# BAW: see comment in encode() above. Again, we're building up the
# decoded string with string concatenation, which could be done much more
# efficiently.
decoded = ''
for line in encoded.splitlines():
line = line.rstrip()
if not line:
decoded += eol
continue
i = 0
n = len(line)
while i < n:
c = line[i]
if c != '=':
decoded += c
i += 1
# Otherwise, c == "=". Are we at the end of the line? If so, add
# a soft line break.
elif i+1 == n:
i += 1
continue
# Decode if in form =AB
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
decoded += unquote(line[i:i+3])
i += 3
# Otherwise, not in form =AB, pass literally
else:
decoded += c
i += 1
if i == n:
decoded += eol
# Special case if original string did not end with eol
if not encoded.endswith(eol) and decoded.endswith(eol):
decoded = decoded[:-1]
return decoded
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
def _unquote_match(match):
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
s = match.group(0)
return unquote(s)
# Header decoding is done a bit differently
def header_decode(s):
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
This function does not parse a full MIME header value encoded with
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
the high level email.header class for that functionality.
"""
s = s.replace('_', ' ')
return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s)
| gpl-2.0 |
Sumith1896/sympy | sympy/functions/elementary/tests/test_complexes.py | 2 | 22502 | from sympy import (
Abs, adjoint, arg, atan2, conjugate, cos, DiracDelta, E, exp, expand,
Expr, Function, Heaviside, I, im, log, nan, oo, pi, Rational, re, S, C,
sign, sin, sqrt, Symbol, symbols, transpose, zoo, exp_polar, Piecewise
)
from sympy.utilities.pytest import XFAIL, raises
from sympy.utilities.randtest import comp
def N_equals(a, b):
"""Check whether two complex numbers are numerically close"""
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2 + I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == I*r*im(x)
assert re(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2)
assert re(x).rewrite(im) == x - im(x)
assert (x + re(y)).rewrite(re, im) == x + y - im(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2 + I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2
assert im(x).rewrite(re) == x - re(x)
assert (x + im(y)).rewrite(im, re) == x + y - re(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
assert sign(2 + 2*I).doit() == sqrt(2)*(2 + 2*I)/4
assert sign(2 + 3*I).simplify() == sign(2 + 3*I)
assert sign(2 + 2*I).simplify() == sign(1 + I)
assert sign(im(sqrt(1 - sqrt(3)))) == 1
assert sign(sqrt(1 - sqrt(3))) == I
x = Symbol('x')
assert sign(x).is_finite is True
assert sign(x).is_complex is True
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_imaginary is True
assert sign(x).is_integer is False
assert sign(x).is_real is False
assert sign(x).is_zero is False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz).is_imaginary is False
assert sign(nz).is_integer is True
assert sign(nz).is_real is True
assert sign(nz).is_zero is False
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
assert sign(Symbol('x', nonnegative=True)).is_nonnegative
assert sign(Symbol('x', nonnegative=True)).is_nonpositive is None
assert sign(Symbol('x', nonpositive=True)).is_nonnegative is None
assert sign(Symbol('x', nonpositive=True)).is_nonpositive
assert sign(Symbol('x', real=True)).is_nonnegative is None
assert sign(Symbol('x', real=True)).is_nonpositive is None
assert sign(Symbol('x', real=True, zero=False)).is_nonpositive is None
x, y = Symbol('x', real=True), Symbol('y')
assert sign(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (-1, x < 0), (0, True))
assert sign(y).rewrite(Piecewise) == sign(y)
assert sign(x).rewrite(Heaviside) == 2*Heaviside(x)-1
assert sign(y).rewrite(Heaviside) == sign(y)
# evaluate what can be evaluated
assert sign(exp_polar(I*pi)*pi) is S.NegativeOne
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert sign(eq).func is sign or sign(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert sign(d).func is sign or sign(d) == 0
def test_as_real_imag():
n = pi**1000
# the special code for working out the real
# and complex parts of a power with Integer exponent
# should not run if there is no imaginary part, hence
# this should not hang
assert n.as_real_imag() == (n, 0)
# issue 6261
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2),
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
# issue 3853
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
(
(a**2 + b**2)**Rational(
1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2),
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2)
assert sqrt(a**2).as_real_imag() == (sqrt(a**2), 0)
i = symbols('i', imaginary=True)
assert sqrt(i**2).as_real_imag() == (0, abs(i))
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1 # doesn't hang
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
# only 1 digit is being requested for this situtation;
# 2 digits works
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
raises(TypeError, lambda: Abs(C.Interval(2, 3))) # issue 8717
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert Abs((-1)**n) == 1
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x) # Python built-in
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (
Abs(x)**(3*n)).args == (Abs(x), 3*n) # leave symbolic odd unchanged
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
assert Abs(x)**-3 == Abs(x)/(x**4)
assert Abs(x**3) == x**2*Abs(x)
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when you can and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert abs(eq).func is Abs or abs(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert abs(d).func is Abs or abs(d) == 0
assert Abs(4*exp(pi*I/4)) == 4
assert Abs(3**(2 + I)) == 9
assert Abs((-3)**(1 - I)) == 3*exp(pi)
assert Abs(oo) is oo
assert Abs(-oo) is oo
assert Abs(oo + I) is oo
assert Abs(oo + I*oo) is oo
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
x, y = Symbol('x', real=True), Symbol('y')
assert Abs(x).rewrite(Piecewise) == Piecewise((x, x >= 0), (-x, True))
assert Abs(y).rewrite(Piecewise) == Abs(y)
assert Abs(y).rewrite(sign) == y/sign(y)
def test_Abs_real():
# test some properties of abs that only apply
# to real numbers
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
# if the symbol is zero, the following will still apply
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real is True
assert Abs(x).is_rational is None
assert Abs(x).is_positive is None
assert Abs(x).is_nonnegative is True
z = Symbol('z', complex=True, zero=False)
assert Abs(z).is_real is True
assert Abs(z).is_rational is None
assert Abs(z).is_positive is True
assert Abs(z).is_zero is False
p = Symbol('p', positive=True)
assert Abs(p).is_real is True
assert Abs(p).is_rational is None
assert Abs(p).is_positive is True
assert Abs(p).is_zero is False
q = Symbol('q', rational=True)
assert Abs(q).is_rational is True
assert Abs(q).is_integer is None
assert Abs(q).is_positive is None
assert Abs(q).is_nonnegative is True
i = Symbol('i', integer=True)
assert Abs(i).is_integer is True
assert Abs(i).is_positive is None
assert Abs(i).is_nonnegative is True
e = Symbol('n', even=True)
ne = Symbol('ne', real=True, even=False)
assert Abs(e).is_even
assert Abs(ne).is_even is False
assert Abs(i).is_even is None
o = Symbol('n', odd=True)
no = Symbol('no', real=True, odd=False)
assert Abs(o).is_odd
assert Abs(no).is_odd is False
assert Abs(i).is_odd is None
def test_abs():
# this tests that abs calls Abs; don't rename to
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1 + I) == pi/4
assert arg(-1 + I) == 3*pi/4
assert arg(1 - I) == -pi/4
f = Function('f')
assert not arg(f(0) + I*f(1)).atoms(re)
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
e = p + I*p**2
assert arg(e) == arg(1 + p*I)
# make sure sign doesn't swap
e = -2*p + 4*I*p**2
assert arg(e) == arg(-1 + 2*p*I)
# make sure sign isn't lost
x = symbols('x', real=True) # could be zero
e = x + I*x
assert arg(e) == arg(x*(1 + I))
assert arg(e/p) == arg(x*(1 + I))
e = p*cos(p) + I*log(p)*exp(p)
assert arg(e).args[0] == e
# keep it simple -- let the user do more advanced cancellation
e = (p + 1) + I*(p**2 - 1)
assert arg(e).args[0] == e
def test_arg_rewrite():
assert arg(1 + I) == atan2(1, 1)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert arg(x + I*y).rewrite(atan2) == atan2(y, x)
def test_adjoint():
a = Symbol('a', antihermitian=True)
b = Symbol('b', hermitian=True)
assert adjoint(a) == -a
assert adjoint(I*a) == I*a
assert adjoint(b) == b
assert adjoint(I*b) == -I*b
assert adjoint(a*b) == -b*a
assert adjoint(I*a*b) == I*b*a
x, y = symbols('x y')
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(x) * adjoint(y)
assert adjoint(x / y) == adjoint(x) / adjoint(y)
assert adjoint(-x) == -adjoint(x)
x, y = symbols('x y', commutative=False)
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(y) * adjoint(x)
assert adjoint(x / y) == 1 / adjoint(y) * adjoint(x)
assert adjoint(-x) == -adjoint(x)
def test_conjugate():
a = Symbol('a', real=True)
b = Symbol('b', imaginary=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
assert conjugate(b) == -b
assert conjugate(I*b) == I*b
assert conjugate(a*b) == -a*b
assert conjugate(I*a*b) == I*a*b
x, y = symbols('x y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_conjugate_transpose():
x = Symbol('x')
assert conjugate(transpose(x)) == adjoint(x)
assert transpose(conjugate(x)) == adjoint(x)
assert adjoint(transpose(x)) == conjugate(x)
assert transpose(adjoint(x)) == conjugate(x)
assert adjoint(conjugate(x)) == transpose(x)
assert conjugate(adjoint(x)) == transpose(x)
class Symmetric(Expr):
def _eval_adjoint(self):
return None
def _eval_conjugate(self):
return None
def _eval_transpose(self):
return self
x = Symmetric()
assert conjugate(x) == adjoint(x)
assert transpose(x) == x
def test_transpose():
a = Symbol('a', complex=True)
assert transpose(a) == a
assert transpose(I*a) == I*a
x, y = symbols('x y')
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(x) * transpose(y)
assert transpose(x / y) == transpose(x) / transpose(y)
assert transpose(-x) == -transpose(x)
x, y = symbols('x y', commutative=False)
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(y) * transpose(x)
assert transpose(x / y) == 1 / transpose(y) * transpose(x)
assert transpose(-x) == -transpose(x)
def test_issue_4035():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue_3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue_4754_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue_4757():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1 + I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1 + I*x**2).doit() == 2*x/(1 + x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1 + y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I + y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive=True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1 + I)**2), pi/2)
assert N_equals(unbranched_argument((1 - I)**2), -pi/2)
assert N_equals(periodic_argument((1 + I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1 - I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) == \
periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) == \
periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) == \
periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) == \
periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
assert Abs(polar_lift(1 + I)) == Abs(1 + I)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch
@XFAIL
def test_issue_6167_6151():
n = pi**1000
i = int(n)
assert sign(n - i) == 1
assert abs(n - i) == n - i
eps = pi**-1500
big = pi**1000
one = cos(x)**2 + sin(x)**2
e = big*one - big + eps
assert sign(simplify(e)) == 1
for xi in (111, 11, 1, S(1)/10):
assert sign(e.subs(x, xi)) == 1
| bsd-3-clause |
vikitripathi/MB-MessApp-API | messApp/env/lib/python2.7/site-packages/setuptools/command/test.py | 113 | 6526 | import unittest
from unittest import TestLoader
from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import (resource_listdir, resource_exists,
normalize_path, working_set, _namespace_packages, add_activation_listener,
require, EntryPoint)
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__ != 'setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = (
sys.version_info >= (3,)
and getattr(self.distribution, 'use_2to3', False)
)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__]+self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.load(require=False)()
| apache-2.0 |
5y/linux | scripts/gdb/linux/tasks.py | 367 | 2552 | #
# gdb helper commands and functions for Linux kernel debugging
#
# task & thread tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
task_type = utils.CachedType("struct task_struct")
def task_lists():
global task_type
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address
t = g = init_task
while True:
while True:
yield t
t = utils.container_of(t['thread_group']['next'],
task_ptr_type, "thread_group")
if t == g:
break
t = g = utils.container_of(g['tasks']['next'],
task_ptr_type, "tasks")
if t == init_task:
return
def get_task_by_pid(pid):
for task in task_lists():
if int(task['pid']) == pid:
return task
return None
class LxTaskByPidFunc(gdb.Function):
"""Find Linux task by PID and return the task_struct variable.
$lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and
return that task_struct variable which PID matches."""
def __init__(self):
super(LxTaskByPidFunc, self).__init__("lx_task_by_pid")
def invoke(self, pid):
task = get_task_by_pid(pid)
if task:
return task.dereference()
else:
raise gdb.GdbError("No task of PID " + str(pid))
LxTaskByPidFunc()
thread_info_type = utils.CachedType("struct thread_info")
ia64_task_size = None
def get_thread_info(task):
global thread_info_type
thread_info_ptr_type = thread_info_type.get_type().pointer()
if utils.is_target_arch("ia64"):
global ia64_task_size
if ia64_task_size is None:
ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
class LxThreadInfoFunc (gdb.Function):
"""Calculate Linux thread_info from task variable.
$lx_thread_info(TASK): Given TASK, return the corresponding thread_info
variable."""
def __init__(self):
super(LxThreadInfoFunc, self).__init__("lx_thread_info")
def invoke(self, task):
return get_thread_info(task)
LxThreadInfoFunc()
| gpl-2.0 |
drpngx/tensorflow | tensorflow/contrib/training/python/training/resample.py | 39 | 5844 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling methods for batches of tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
def _repeat_range(counts, name=None):
"""Repeat integers given by range(len(counts)) each the given number of times.
Example behavior:
[0, 1, 2, 3] -> [1, 2, 2, 3, 3, 3]
Args:
counts: 1D tensor with dtype=int32.
name: optional name for operation.
Returns:
1D tensor with dtype=int32 and dynamic length giving the repeated integers.
"""
with ops.name_scope(name, 'repeat_range', [counts]) as scope:
counts = ops.convert_to_tensor(counts, name='counts')
def cond(unused_output, i):
return i < size
def body(output, i):
value = array_ops.fill(counts[i:i+1], i)
return (output.write(i, value), i + 1)
size = array_ops.shape(counts)[0]
init_output_array = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=size, infer_shape=False)
output_array, num_writes = control_flow_ops.while_loop(
cond, body, loop_vars=[init_output_array, 0])
return control_flow_ops.cond(
num_writes > 0,
output_array.concat,
lambda: array_ops.zeros(shape=[0], dtype=dtypes.int32),
name=scope)
def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False):
"""Given `inputs` tensors, stochastically resamples each at a given rate.
For example, if the inputs are `[[a1, a2], [b1, b2]]` and the rates
tensor contains `[3, 1]`, then the return value may look like `[[a1,
a2, a1, a1], [b1, b2, b1, b1]]`. However, many other outputs are
possible, since this is stochastic -- averaged over many repeated
calls, each set of inputs should appear in the output `rate` times
the number of invocations.
Args:
inputs: A list of tensors, each of which has a shape of `[batch_size, ...]`
rates: A tensor of shape `[batch_size]` containing the resampling rates
for each input.
scope: Scope for the op.
seed: Random seed to use.
back_prop: Whether to allow back-propagation through this op.
Returns:
Selections from the input tensors.
"""
with ops.name_scope(scope, default_name='resample_at_rate',
values=list(inputs) + [rates]):
rates = ops.convert_to_tensor(rates, name='rates')
sample_counts = math_ops.cast(
random_ops.random_poisson(rates, (), rates.dtype, seed=seed),
dtypes.int32)
sample_indices = _repeat_range(sample_counts)
if not back_prop:
sample_indices = array_ops.stop_gradient(sample_indices)
return [array_ops.gather(x, sample_indices) for x in inputs]
def weighted_resample(inputs, weights, overall_rate, scope=None,
mean_decay=0.999, seed=None):
"""Performs an approximate weighted resampling of `inputs`.
This method chooses elements from `inputs` where each item's rate of
selection is proportional to its value in `weights`, and the average
rate of selection across all inputs (and many invocations!) is
`overall_rate`.
Args:
inputs: A list of tensors whose first dimension is `batch_size`.
weights: A `[batch_size]`-shaped tensor with each batch member's weight.
overall_rate: Desired overall rate of resampling.
scope: Scope to use for the op.
mean_decay: How quickly to decay the running estimate of the mean weight.
seed: Random seed.
Returns:
A list of tensors exactly like `inputs`, but with an unknown (and
possibly zero) first dimension.
A tensor containing the effective resampling rate used for each output.
"""
# Algorithm: Just compute rates as weights/mean_weight *
# overall_rate. This way the average weight corresponds to the
# overall rate, and a weight twice the average has twice the rate,
# etc.
with ops.name_scope(scope, 'weighted_resample', inputs) as opscope:
# First: Maintain a running estimated mean weight, with zero debiasing
# enabled (by default) to avoid throwing the average off.
with variable_scope.variable_scope(scope, 'estimate_mean', inputs):
estimated_mean = variable_scope.get_local_variable(
'estimated_mean',
initializer=math_ops.cast(0, weights.dtype),
dtype=weights.dtype)
batch_mean = math_ops.reduce_mean(weights)
mean = moving_averages.assign_moving_average(
estimated_mean, batch_mean, mean_decay)
# Then, normalize the weights into rates using the mean weight and
# overall target rate:
rates = weights * overall_rate / mean
results = resample_at_rate([rates] + inputs, rates,
scope=opscope, seed=seed, back_prop=False)
return (results[1:], results[0])
| apache-2.0 |
indictranstech/fbd_erpnext | erpnext/hr/report/monthly_salary_register/monthly_salary_register.py | 52 | 4104 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("Month") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
earning_types = frappe.db.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where ifnull(e_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
ded_types = frappe.db.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where ifnull(d_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Currency:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Currency:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 1 %s
order by employee, month""" % conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.d_type] = flt(d.d_modified_amount)
return ss_ded_map | agpl-3.0 |
icexelloss/spark | examples/src/main/python/mllib/multi_class_metrics_example.py | 55 | 2836 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.util import MLUtils
from pyspark.mllib.evaluation import MulticlassMetrics
# $example off$
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="MultiClassMetricsExample")
# Several of the methods available in scala are currently missing from pyspark
# $example on$
# Load training data in LIBSVM format
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_multiclass_classification_data.txt")
# Split data into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=11)
training.cache()
# Run training algorithm to build the model
model = LogisticRegressionWithLBFGS.train(training, numClasses=3)
# Compute raw scores on the test set
predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))
# Instantiate metrics object
metrics = MulticlassMetrics(predictionAndLabels)
# Overall statistics
precision = metrics.precision(1.0)
recall = metrics.recall(1.0)
f1Score = metrics.fMeasure(1.0)
print("Summary Stats")
print("Precision = %s" % precision)
print("Recall = %s" % recall)
print("F1 Score = %s" % f1Score)
# Statistics by class
labels = data.map(lambda lp: lp.label).distinct().collect()
for label in sorted(labels):
print("Class %s precision = %s" % (label, metrics.precision(label)))
print("Class %s recall = %s" % (label, metrics.recall(label)))
print("Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0)))
# Weighted stats
print("Weighted recall = %s" % metrics.weightedRecall)
print("Weighted precision = %s" % metrics.weightedPrecision)
print("Weighted F(1) Score = %s" % metrics.weightedFMeasure())
print("Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5))
print("Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate)
# $example off$
| apache-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.2/django/contrib/localflavor/sk/sk_districts.py | 543 | 2453 | """
Slovak districts according to http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska
"""
from django.utils.translation import ugettext_lazy as _
DISTRICT_CHOICES = (
('BB', _('Banska Bystrica')),
('BS', _('Banska Stiavnica')),
('BJ', _('Bardejov')),
('BN', _('Banovce nad Bebravou')),
('BR', _('Brezno')),
('BA1', _('Bratislava I')),
('BA2', _('Bratislava II')),
('BA3', _('Bratislava III')),
('BA4', _('Bratislava IV')),
('BA5', _('Bratislava V')),
('BY', _('Bytca')),
('CA', _('Cadca')),
('DT', _('Detva')),
('DK', _('Dolny Kubin')),
('DS', _('Dunajska Streda')),
('GA', _('Galanta')),
('GL', _('Gelnica')),
('HC', _('Hlohovec')),
('HE', _('Humenne')),
('IL', _('Ilava')),
('KK', _('Kezmarok')),
('KN', _('Komarno')),
('KE1', _('Kosice I')),
('KE2', _('Kosice II')),
('KE3', _('Kosice III')),
('KE4', _('Kosice IV')),
('KEO', _('Kosice - okolie')),
('KA', _('Krupina')),
('KM', _('Kysucke Nove Mesto')),
('LV', _('Levice')),
('LE', _('Levoca')),
('LM', _('Liptovsky Mikulas')),
('LC', _('Lucenec')),
('MA', _('Malacky')),
('MT', _('Martin')),
('ML', _('Medzilaborce')),
('MI', _('Michalovce')),
('MY', _('Myjava')),
('NO', _('Namestovo')),
('NR', _('Nitra')),
('NM', _('Nove Mesto nad Vahom')),
('NZ', _('Nove Zamky')),
('PE', _('Partizanske')),
('PK', _('Pezinok')),
('PN', _('Piestany')),
('PT', _('Poltar')),
('PP', _('Poprad')),
('PB', _('Povazska Bystrica')),
('PO', _('Presov')),
('PD', _('Prievidza')),
('PU', _('Puchov')),
('RA', _('Revuca')),
('RS', _('Rimavska Sobota')),
('RV', _('Roznava')),
('RK', _('Ruzomberok')),
('SB', _('Sabinov')),
('SC', _('Senec')),
('SE', _('Senica')),
('SI', _('Skalica')),
('SV', _('Snina')),
('SO', _('Sobrance')),
('SN', _('Spisska Nova Ves')),
('SL', _('Stara Lubovna')),
('SP', _('Stropkov')),
('SK', _('Svidnik')),
('SA', _('Sala')),
('TO', _('Topolcany')),
('TV', _('Trebisov')),
('TN', _('Trencin')),
('TT', _('Trnava')),
('TR', _('Turcianske Teplice')),
('TS', _('Tvrdosin')),
('VK', _('Velky Krtis')),
('VT', _('Vranov nad Toplou')),
('ZM', _('Zlate Moravce')),
('ZV', _('Zvolen')),
('ZC', _('Zarnovica')),
('ZH', _('Ziar nad Hronom')),
('ZA', _('Zilina')),
)
| mit |
costadorione/purestream | servers/wstream.py | 1 | 2905 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para wstream.video
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# by DrZ3r0
# ------------------------------------------------------------
import re
import time
from core import logger
from core import scrapertools
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
['Accept-Language', 'en-US,en;q=0.5'],
['Accept-Encoding', 'gzip, deflate']
]
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("[wstream.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url, headers=headers)
time.sleep(9)
post_url = re.findall('Form method="POST" action=\'(.*)\'', data)[0]
post_selected = re.findall('Form method="POST" action=(.*)</Form>', data, re.DOTALL)[0]
post_data = 'op=%s&usr_login=%s&id=%s&referer=%s&hash=%s&imhuman=Proceed+to+video' % (
re.findall('input type="hidden" name="op" value="(.*)"', post_selected)[0],
re.findall('input type="hidden" name="usr_login" value="(.*)"', post_selected)[0],
re.findall('input type="hidden" name="id" value="(.*)"', post_selected)[0],
re.findall('input type="hidden" name="referer" value="(.*)"', post_selected)[0],
re.findall('input type="hidden" name="hash" value="(.*)"', post_selected)[0])
headers.append(['Referer', post_url])
data = scrapertools.cache_page(post_url, post=post_data, headers=headers)
data_pack = scrapertools.find_single_match(data, "(eval.function.p,a,c,k,e,.*?)\s*</script>")
if data_pack != "":
from core import jsunpack
data = jsunpack.unpack(data_pack)
video_url = scrapertools.find_single_match(data, 'file"?\s*:\s*"([^"]+)",')
video_urls.append([".mp4 [wstream]", video_url])
for video_url in video_urls:
logger.info("[wstream.py] %s - %s" % (video_url[0], video_url[1]))
return video_urls
# Encuentra vìdeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
patronvideos = r"""wstream.video/(?:embed-)?([a-z0-9A-Z]+)"""
logger.info("[wstream.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[wstream]"
url = 'http://wstream.video/%s' % match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'wstream'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
| gpl-3.0 |
ovnicraft/edx-platform | common/djangoapps/third_party_auth/middleware.py | 169 | 1053 | """Middleware classes for third_party_auth."""
from social.apps.django_app.middleware import SocialAuthExceptionMiddleware
from . import pipeline
class ExceptionMiddleware(SocialAuthExceptionMiddleware):
"""Custom middleware that handles conditional redirection."""
def get_redirect_uri(self, request, exception):
# Fall back to django settings's SOCIAL_AUTH_LOGIN_ERROR_URL.
redirect_uri = super(ExceptionMiddleware, self).get_redirect_uri(request, exception)
# Safe because it's already been validated by
# pipeline.parse_query_params. If that pipeline step ever moves later
# in the pipeline stack, we'd need to validate this value because it
# would be an injection point for attacker data.
auth_entry = request.session.get(pipeline.AUTH_ENTRY_KEY)
# Check if we have an auth entry key we can use instead
if auth_entry and auth_entry in pipeline.AUTH_DISPATCH_URLS:
redirect_uri = pipeline.AUTH_DISPATCH_URLS[auth_entry]
return redirect_uri
| agpl-3.0 |
karrrt/scale | lib/python2.7/site-packages/setuptools/package_index.py | 72 | 38943 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version:
for i,p in enumerate(parts[2:]):
if len(p)==5 and p.startswith('py2.'):
return # It's a bdist_dumb, not an sdist -- bail out
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib2.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename,'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL):
v = sys.exc_info()[1]
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError:
v = sys.exc_info()[1]
return v
except urllib2.URLError:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except httplib.HTTPException:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return url2pathname(urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
| mit |
brianhouse/48to16 | data_compilation/compile_audio.py | 1 | 2894 | #!/usr/bin/env python3
import sys, os, datetime, calendar, compile_gpx, pytz
import numpy as np
from scipy.io import wavfile
from housepy import log, config, util, drawing, science, crashdb
MEDIA_LENGTH = 2090.43 # hack per video to get everything to match correctly
log.info("Starting...")
if len(sys.argv) < 2 or not os.path.isdir(sys.argv[1]):
print("[data_folder]")
exit()
directory = sys.argv[1]
gpx_filename = None
wav_filename = None
for filename in os.listdir(directory):
if filename[-4:] == ".gpx":
gpx_filename = os.path.join(directory, filename)
if filename[-4:] == ".wav":
wav_filename = os.path.join(directory, filename)
log.info("GPX %s" % gpx_filename)
log.info("WAV %s" % wav_filename)
audio_start_dt = datetime.datetime.strptime(wav_filename.split('.')[0].split('/')[-1].replace('_smp', ''), "%Y%m%d %H%M%S")
audio_start_dt = util.to_utc(audio_start_dt)
# get video times
video_start_t, video_end_t = compile_gpx.get_video_times(gpx_filename)
log.info("AUDIO START %s" % audio_start_dt)
audio_start_t = float(calendar.timegm(audio_start_dt.timetuple()))
sample_rate, data = wavfile.read(wav_filename)
log.info("AUDIO SAMPLE RATE %s" % sample_rate)
log.info("AUDIO LENGTH (samples) %s" % len(data))
seconds = float(len(data)) / sample_rate
log.info("AUDIO DURATION %s" % util.format_time(seconds))
skip = video_start_t - audio_start_t
log.info("AUDIO SKIP %s%s" % ('-' if skip < 0 else '', util.format_time(abs(skip))))
# downsample to 60hz
target_sample_rate = 60.0
signal = science.downsample(data, int(sample_rate / target_sample_rate))
log.info("NEW LENGTH (samples) %s" % len(signal))
average = np.average(signal)
reduced = signal - average
reduced = [x if x >= 0 else 0 for x in reduced]
reduced = science.smooth(reduced, window_len=50)
reduced = science.normalize(reduced)
signal = science.normalize(signal)
log.info("DETECTING PEAKS")
# the lookahead is key. dont want to see two peaks, but not too small
# in this case, a breath a second?
max_peaks, min_peaks = science.detect_peaks(reduced, lookahead=60)
breaths = []
for peak in max_peaks:
sample, y = peak
t = sample / target_sample_rate
t -= skip
if t < 0:
continue
if t > MEDIA_LENGTH:
continue
breaths.append(t)
log.info("SAVING")
db = crashdb.load("data.json")
db['breaths'] = breaths
db.close()
num_samples = len(signal)
ctx = drawing.Context(10000, 500, relative=True, flip=True, hsv=True)
ctx.line([(float(i) / num_samples, signal[i]) for i in range(num_samples)], stroke=(0., 0., 0.85), thickness=2)
ctx.line([(float(i) / num_samples, reduced[i]) for i in range(num_samples)], stroke=(0.55, 1., 1.), thickness=2)
for peak in max_peaks:
sample, y = peak
ctx.arc(float(sample) / num_samples, y, 5. / ctx.width, 5. / ctx.height, thickness=0., fill=(0., 1., 1.))
ctx.show()
ctx.image.save("breaths.png", 'PNG')
| gpl-3.0 |
Snuzzo/vigor_mofokernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
schlueter/ansible | lib/ansible/modules/network/avi/avi_vsdatascriptset.py | 41 | 4759 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vsdatascriptset
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of VSDataScriptSet Avi RESTful Object
description:
- This module is used to configure VSDataScriptSet object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
created_by:
description:
- Creator name.
- Field introduced in 17.1.11,17.2.4.
version_added: "2.5"
datascript:
description:
- Datascripts to execute.
description:
description:
- User defined description for the object.
ipgroup_refs:
description:
- Uuid of ip groups that could be referred by vsdatascriptset objects.
- It is a reference to an object of type ipaddrgroup.
name:
description:
- Name for the virtual service datascript collection.
required: true
pool_group_refs:
description:
- Uuid of pool groups that could be referred by vsdatascriptset objects.
- It is a reference to an object of type poolgroup.
pool_refs:
description:
- Uuid of pools that could be referred by vsdatascriptset objects.
- It is a reference to an object of type pool.
string_group_refs:
description:
- Uuid of string groups that could be referred by vsdatascriptset objects.
- It is a reference to an object of type stringgroup.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the virtual service datascript collection.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VSDataScriptSet object
avi_vsdatascriptset:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vsdatascriptset
"""
RETURN = '''
obj:
description: VSDataScriptSet (api/vsdatascriptset) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
created_by=dict(type='str',),
datascript=dict(type='list',),
description=dict(type='str',),
ipgroup_refs=dict(type='list',),
name=dict(type='str', required=True),
pool_group_refs=dict(type='list',),
pool_refs=dict(type='list',),
string_group_refs=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vsdatascriptset',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
40223220/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/long_int1/__init__.py | 503 | 3858 | from browser import html, document, window
import javascript
#memorize/cache?
def _get_value(other):
if isinstance(other, LongInt):
return other.value
return other
class BigInt:
def __init__(self):
pass
def __abs__(self):
return LongInt(self.value.abs())
def __add__(self, other):
return LongInt(self.value.plus(_get_value(other)))
def __and__(self, other):
pass
def __divmod__(self, other):
_value=_get_value(other)
return LongInt(self.value.div(_value)), LongInt(self.value.mod(_value))
def __div__(self, other):
return LongInt(self.value.div(_get_value(other)))
def __eq__(self, other):
return bool(self.value.eq(_get_value(other)))
def __floordiv__(self, other):
return LongInt(self.value.div(_get_value(other)).floor())
def __ge__(self, other):
return bool(self.value.gte(_get_value(other)))
def __gt__(self, other):
return bool(self.value.gt(_get_value(other)))
def __index__(self):
if self.value.isInt():
return int(self.value.toNumber())
raise TypeError("This is not an integer")
def __le__(self, other):
return bool(self.value.lte(_get_value(other)))
def __lt__(self, other):
return bool(self.value.lt(_get_value(other)))
def __lshift__(self, shift):
if isinstance(shift, int):
_v=LongInt(2)**shift
return LongInt(self.value.times(_v.value))
def __mod__(self, other):
return LongInt(self.value.mod(_get_value(other)))
def __mul__(self, other):
return LongInt(self.value.times(_get_value(other)))
def __neg__(self, other):
return LongInt(self.value.neg(_get_value(other)))
def __or__(self, other):
pass
def __pow__(self, other):
return LongInt(self.value.pow(_get_value(other)))
def __rshift__(self, other):
pass
def __sub__(self, other):
return LongInt(self.value.minus(_get_value(other)))
def __repr__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __str__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __xor__(self, other):
pass
_precision=20
def get_precision(value):
if isinstance(value, LongInt):
return len(str(value.value.toString(10)))
return len(str(value))
class DecimalJS(BigInt):
def __init__(self, value=0, base=10):
global _precision
_prec=get_precision(value)
if _prec > _precision:
_precision=_prec
window.eval('Decimal.precision=%s' % _precision)
self.value=javascript.JSConstructor(window.Decimal)(value, base)
class BigNumberJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.BigNumber)(value, base)
class BigJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.Big)(value, base)
def __floordiv__(self, other):
_v=LongInt(self.value.div(_get_value(other)))
if _v >= 0:
return LongInt(_v.value.round(0, 0)) #round down
return LongInt(_v.value.round(0, 3)) #round up
def __pow__(self, other):
if isinstance(other, LongInt):
_value=int(other.value.toString(10))
elif isinstance(other, str):
_value=int(other)
return LongInt(self.value.pow(_value))
#_path = __file__[:__file__.rfind('/')]+'/'
_path = __BRYTHON__.brython_path + 'Lib/long_int1/'
#to use decimal.js library uncomment these 2 lines
#javascript.load(_path+'decimal.min.js', ['Decimal'])
#LongInt=DecimalJS
#to use bignumber.js library uncomment these 2 lines
javascript.load(_path+'bignumber.min.js', ['BigNumber'])
LongInt=BigNumberJS
#big.js does not have a "base" so only base 10 stuff works.
#to use big.js library uncomment these 2 lines
#javascript.load(_path+'big.min.js', ['Big'])
#LongInt=BigJS
| gpl-3.0 |
Titan-C/helpful_scripts | bank.py | 1 | 1745 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
Parse Bank statement
====================
From CSV to ledger
"""
# Created: Sat May 18 18:32:47 2019
# Author: Óscar Nájera
# License: GPL-3
import argparse
import csv
import shlex
import subprocess
from datetime import datetime
from collections import namedtuple
entry = namedtuple(
"Statement_entry",
[
"entry",
"saved",
"type",
"info",
"amount",
"curr",
"payee",
"pa_Auftraggeberkonto",
"IBAN_Auftraggeberkonto",
"Kategorie",
],
)
def translate_bankstatement(filename):
with open(filename) as csvfile, open("/tmp/exit", "w") as fi:
data = csv.reader(csvfile, delimiter=";")
next(data)
fi.write("date,payee,amount\n")
for row in data:
en = entry(*row)
date = (
datetime.strptime(en.saved, "%d.%m.%Y") if en.saved else datetime.now()
)
date = date.strftime("%Y-%m-%d")
amount = en.amount.replace(",", ".")
fi.write(f"""{date},{en.info.replace(',',' ')},{amount} {en.curr}\n""")
def convert_to_ledger():
lo = shlex.split(
'ledger convert /tmp/exit -f ~/dev/journal/accout_setup.ledger --account "Assets:Commerzbank Vorteil" --invert --rich-data -y %Y-%m-%d'
)
yo = subprocess.run(lo, capture_output=True)
with open("/tmp/led", "wb") as fi:
fi.write(yo.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create Ledger entries from bank")
parser.add_argument("file", help="File containing the ndarray")
args = parser.parse_args()
translate_bankstatement(args.file)
convert_to_ledger()
| gpl-2.0 |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/mgtv.py | 1 | 1936 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import int_or_none
class MGTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html'
IE_DESC = '芒果TV'
_TESTS = [{
'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html',
'md5': 'b1ffc0fc163152acf6beaa81832c9ee7',
'info_dict': {
'id': '3116640',
'ext': 'mp4',
'title': '我是歌手第四季双年巅峰会:韩红李玟“双王”领军对抗',
'description': '我是歌手第四季双年巅峰会',
'duration': 7461,
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'http://www.mgtv.com/b/301817/3826653.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
api_data = self._download_json(
'http://pcweb.api.mgtv.com/player/video', video_id,
query={'video_id': video_id},
headers=self.geo_verification_headers())['data']
info = api_data['info']
title = info['title'].strip()
stream_domain = api_data['stream_domain'][0]
formats = []
for idx, stream in enumerate(api_data['stream']):
stream_path = stream.get('url')
if not stream_path:
continue
format_data = self._download_json(
stream_domain + stream_path, video_id,
note='Download video info for format #%d' % idx)
format_url = format_data.get('info')
if not format_url:
continue
tbr = int_or_none(self._search_regex(
r'_(\d+)_mp4/', format_url, 'tbr', default=None))
formats.append({
'format_id': compat_str(tbr or idx),
'url': format_url,
'ext': 'mp4',
'tbr': tbr,
'protocol': 'm3u8_native',
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': info.get('desc'),
'duration': int_or_none(info.get('duration')),
'thumbnail': info.get('thumb'),
}
| gpl-3.0 |
toshywoshy/ansible | lib/ansible/modules/network/fortios/fortios_log_fortiguard_override_setting.py | 7 | 10836 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_fortiguard_override_setting
short_description: Override global FortiCloud logging settings for this VDOM in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log_fortiguard feature and override_setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
log_fortiguard_override_setting:
description:
- Override global FortiCloud logging settings for this VDOM.
default: null
type: dict
suboptions:
override:
description:
- Overriding FortiCloud settings for this VDOM or use global settings.
type: str
choices:
- enable
- disable
status:
description:
- Enable/disable logging to FortiCloud.
type: str
choices:
- enable
- disable
upload_day:
description:
- Day of week to roll logs.
type: str
upload_interval:
description:
- Frequency of uploading log files to FortiCloud.
type: str
choices:
- daily
- weekly
- monthly
upload_option:
description:
- Configure how log messages are sent to FortiCloud.
type: str
choices:
- store-and-upload
- realtime
- 1-minute
- 5-minute
upload_time:
description:
- "Time of day to roll logs (hh:mm)."
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Override global FortiCloud logging settings for this VDOM.
fortios_log_fortiguard_override_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_fortiguard_override_setting:
override: "enable"
status: "enable"
upload_day: "<your_own_value>"
upload_interval: "daily"
upload_option: "store-and-upload"
upload_time: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_log_fortiguard_override_setting_data(json):
option_list = ['override', 'status', 'upload_day',
'upload_interval', 'upload_option', 'upload_time']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_fortiguard_override_setting(data, fos):
vdom = data['vdom']
log_fortiguard_override_setting_data = data['log_fortiguard_override_setting']
filtered_data = underscore_to_hyphen(filter_log_fortiguard_override_setting_data(log_fortiguard_override_setting_data))
return fos.set('log.fortiguard',
'override-setting',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_fortiguard(data, fos):
if data['log_fortiguard_override_setting']:
resp = log_fortiguard_override_setting(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"log_fortiguard_override_setting": {
"required": False, "type": "dict", "default": None,
"options": {
"override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"upload_day": {"required": False, "type": "str"},
"upload_interval": {"required": False, "type": "str",
"choices": ["daily", "weekly", "monthly"]},
"upload_option": {"required": False, "type": "str",
"choices": ["store-and-upload", "realtime", "1-minute",
"5-minute"]},
"upload_time": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_log_fortiguard(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_log_fortiguard(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
maleficarium/youtube-dl | youtube_dl/extractor/fczenit.py | 57 | 1471 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
class FczenitIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://fc-zenit.ru/video/41044/',
'md5': '0e3fab421b455e970fa1aa3891e57df0',
'info_dict': {
'id': '41044',
'ext': 'mp4',
'title': 'Так пишется история: казанский разгром ЦСКА на «Зенит-ТВ»',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(
r'<[^>]+class=\"photoalbum__title\">([^<]+)', webpage, 'title')
video_items = self._parse_json(self._search_regex(
r'arrPath\s*=\s*JSON\.parse\(\'(.+)\'\)', webpage, 'video items'),
video_id)
def merge_dicts(*dicts):
ret = {}
for a_dict in dicts:
ret.update(a_dict)
return ret
formats = [{
'url': compat_urlparse.urljoin(url, video_url),
'tbr': int(tbr),
} for tbr, video_url in merge_dicts(*video_items).items()]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'formats': formats,
}
| unlicense |
csachs/openmicroscopy | components/tools/OmeroPy/src/omero/util/figureUtil.py | 15 | 10940 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2009 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
A collection of utility methods used by Figure scripts for producing
publication type of figures.
@author William Moore
<a href="mailto:will@lifesci.dundee.ac.uk">will@lifesci.dundee.ac.uk</a>
@author Jean-Marie Burel
<a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a>
@author Donald MacDonald
<a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk</a>
@version 3.0
<small>
(<b>Internal version:</b> $Revision: $Date: $)
</small>
@since 3.0-Beta4.1
"""
try:
from PIL import Image, ImageDraw # see ticket:2597
except ImportError:
import Image
import ImageDraw
WHITE = (255, 255, 255)
SECS_MILLIS = "SECS_MILLIS"
SECS = "SECS"
MINS = "MINS"
HOURS = "HOURS"
MINS_SECS = "MINS_SECS"
HOURS_MINS = "HOURS_MINS"
HOURS_MINS_SECS = "HOURS_MINS_SECS"
HOURS_MINS_SECS_MILLIS = "HOURS_MINS_SECS_MILLIS"
TIME_UNITS = [SECS_MILLIS, SECS, MINS, HOURS, MINS_SECS,
HOURS_MINS, HOURS_MINS_SECS, HOURS_MINS_SECS_MILLIS]
def getDatasetsProjectsFromImages(queryService, imageIds):
"""
Query returns a map where each key is the imageId and the value is a list
of (projectName, datasetName) tuples.
If the image does not have a Dataset AND Project, the map will hold an
empty list for that imageId.
@param queryService: The Omero query service
@param imageIds: A list of image IDs. [long]
@return: A map imageId:[(projectName, datasetName)]
"""
ids = ",".join([str(i) for i in imageIds])
query_string = "select i from Image i join fetch i.datasetLinks idl join " \
"fetch idl.parent d join fetch d.projectLinks pl join " \
"fetch pl.parent where i.id in (%s)" % ids
images = queryService.findAllByQuery(query_string, None)
results = {}
for i in images: # order of images not same as imageIds
pdList = []
imageId = i.getId().getValue()
for link in i.iterateDatasetLinks():
dataset = link.parent
dName = dataset.getName().getValue()
if dataset.sizeOfProjectLinks() == 0:
pdList.append(("", dName))
for dpLink in dataset.iterateProjectLinks():
project = dpLink.parent
pName = project.getName().getValue()
pdList.append((pName, dName))
results[imageId] = pdList
# make sure the map contains all the imageIds
for iId in imageIds:
if iId not in results:
results[iId] = []
return results
def getTagsFromImages(metadataService, imageIds):
"""
Query returns a map of key = imageId, value = [tagNames] for the image
@param metadataService: The Omero metadata service
@param imageIds: A list of image IDs. [long]
@return: A map of imageId:[tagName]
"""
types = ["ome.model.annotations.TagAnnotation"]
annotations = metadataService.loadAnnotations(
"Image", imageIds, types, None, None)
tagsMap = {}
for i in imageIds:
annots = annotations[i]
tags = [a.getTextValue().getValue() for a in annots]
tagsMap[i] = tags
return tagsMap
def getTimes(queryService, pixelsId, tIndexes, theZ=None, theC=None):
"""
Get the time in seconds (float) for the first plane (C = 0 & Z = 0) at
each time-point for the defined pixels.
Returns a map of tIndex: timeInSecs
@param queryService: The Omero queryService
@param pixelsId: The ID of the pixels object. long
@param tIndexes: List of time indexes. [int]
@param theZ: The Z plane index. Default is 0
@param theC: The Channel index. Default is 0
@return: A map of tIndex: timeInSecs
"""
if theZ is None:
theZ = 0
if theC is None:
theC = 0
indexes = ",".join([str(t) for t in tIndexes])
query = "from PlaneInfo as Info where Info.theT in (%s) and Info.theZ "\
"in (%d) and Info.theC in (%d) and pixels.id='%d'" \
% (indexes, theZ, theC, pixelsId)
infoList = queryService.findAllByQuery(query, None)
timeMap = {}
for info in infoList:
tIndex = info.theT.getValue()
time = info.deltaT.getValue()
timeMap[tIndex] = time
return timeMap
def formatTime(seconds, timeUnits):
"""
Returns a string formatting of the time (in seconds)
according to the chosen timeUnits:
"SECS_MILLIS", "SECS", "MINS", "HOURS", "MINS_SECS",
"HOURS_MINS", "HOURS_MINS_SECS", "HOURS_MINS_SECS_MILLIS"
@param seconds: Time in seconds. float or int
@param timeUnits: A string denoting the format. One of the choices above.
@return: A string, such as "10" or "3:20"
"""
neg = False
if seconds < 0:
seconds = seconds * -1
neg = True
label = None
if timeUnits == "SECS_MILLIS":
label = "%.2f" % seconds
elif timeUnits == "SECS":
label = "%d" % int(round(seconds))
elif timeUnits == "MINS":
mins = float(seconds) / float(60)
label = "%d" % int(round(mins))
elif timeUnits == "HOURS":
hrs = float(seconds) / float(3600)
label = "%d" % int(round(hrs))
elif timeUnits == "MINS_SECS":
mins = seconds / 60
secs = round(seconds % 60)
label = "%d:%02d" % (mins, secs)
elif timeUnits == "HOURS_MINS":
hrs = seconds / 3600
mins = round((seconds % 3600) / 60)
label = "%d:%02d" % (hrs, mins)
elif timeUnits == "HOURS_MINS_SECS":
hrs = seconds / 3600
mins = (seconds % 3600) / 60
secs = round(seconds % (3600 * 60))
label = "%d:%02d:%02d" % (hrs, mins, secs)
elif timeUnits == "HOURS_MINS_SECS_MILLIS":
hrs = seconds / 3600
mins = (seconds % 3600) / 60
secs = (seconds % (3600 * 60))
label = "%d:%02d:%05.2f" % (hrs, mins, secs)
else:
label = "%.2f sec" % seconds
return neg and "-%s" % label or label
def getTimeLabels(queryService, pixelsId, tIndexes, sizeT,
timeUnits=None, showRoiDuration=False):
"""
Returns a list of time labels e.g. "10", "20" for the first plane at
each t-index (C=0 and Z=0). If no planeInfo is available,
returns plane number/total e.g "3/10"
If time units are not specified, the most suitable units are chosen
based on the max time.
The list of label returned includes the timeUnits as the last string
in the list, in case you didn't specify it.
@param queryService: The Omero query service
@param pixelsId: The ID of the pixels you want info for
@param tIndexes: List of t-index to get the times for.
Assumed to be in t order.
@param sizeT: The T dimension size of the pixels.
Used if no plane info
@param timeUnits: Format choice, see formatTime(). String
@param showRoiDuration: If true, times shown are from the start of
the ROI frames, otherwise use movie timestamp.
@return: A list of strings, ordered same as tIndexes
"""
secondsMap = getTimes(queryService, pixelsId, tIndexes)
if timeUnits is None and len(secondsMap) > 0:
maxSecs = max(secondsMap.values())
if maxSecs > 3600:
timeUnits = HOURS_MINS
elif maxSecs > 60:
timeUnits = MINS_SECS
else:
timeUnits = SECS_MILLIS
labels = []
for t in tIndexes:
if t in secondsMap:
seconds = secondsMap[t]
if showRoiDuration:
seconds = seconds - secondsMap[tIndexes[0]]
labels.append(formatTime(seconds, timeUnits))
else:
labels.append("%d/%d" % (t + 1, sizeT))
labels.append(timeUnits)
return labels
def addScalebar(scalebar, xIndent, yIndent, image, pixels, colour):
"""
Adds a scalebar at the bottom right of an image, No text.
@param scalebar length of scalebar in microns
@param xIndent indent from the right of the image
@param yIndent indent from the bottom of the image
@param image the PIL image to add scalebar to
@param pixels the pixels object
@param colour colour of the overlay as r,g,b tuple
"""
draw = ImageDraw.Draw(image)
if pixels.getPhysicalSizeX() is None:
return False, " Failed to add scale bar: Pixel size not defined."
pixelSizeX = pixels.getPhysicalSizeX().getValue()
if pixelSizeX <= 0:
return False, " Failed to add scale bar: Pixel size not defined."
iWidth, iHeight = image.size
lineThickness = (iHeight // 100) + 1
scaleBarY = iHeight - yIndent
scaleBarX = iWidth - scalebar // pixelSizeX - xIndent
scaleBarX2 = iWidth - xIndent
if (scaleBarX <= 0 or scaleBarX2 <= 0
or scaleBarY <= 0 or scaleBarX2 > iWidth):
return False, " Failed to add scale bar: Scale bar is too large."
for l in range(lineThickness):
draw.line(
[(scaleBarX, scaleBarY), (scaleBarX2, scaleBarY)], fill=colour)
scaleBarY -= 1
return True, " Scalebar added to the image."
def getVerticalLabels(labels, font, textGap):
"""
Returns an image with the labels written vertically with the given font,
black on white background
"""
maxWidth = 0
height = 0
textHeight = font.getsize("testq")[1]
for label in labels:
maxWidth = max(maxWidth, font.getsize(label)[0])
if height > 0:
height += textGap
height += textHeight
size = (maxWidth, height)
textCanvas = Image.new("RGB", size, WHITE)
textdraw = ImageDraw.Draw(textCanvas)
py = 0
for label in labels:
indent = (maxWidth - font.getsize(label)[0]) / 2
textdraw.text((indent, py), label, font=font, fill=(0, 0, 0))
py += textHeight + textGap
return textCanvas.rotate(90)
| gpl-2.0 |
AlmostBetterNetwork/podmaster-host | assets/management/commands/migrate_podcasts_to_assets.py | 3 | 1849 | from django.core.management.base import BaseCommand
from django.db import models
from assets.helpers.factory import create_asset_from_url
from podcasts.models import Podcast, PodcastEpisode
class Command(BaseCommand):
help = 'Migrates podcasts from model fields to assets'
def handle(self, *args, **options):
for pod in Podcast.objects.filter(cover_art=None):
try:
pod.cover_art = create_asset_from_url(
'podcast_cover_art', pod.cover_image, owner=pod.owner, foreign_uuid=str(pod.id))
except Exception as e:
print('Could not update cover art for <<{}>> :: {}'.format(pod, e))
continue
else:
# pod.cover_image = None
pod.save()
for ep in PodcastEpisode.objects.filter(models.Q(audio=None) | models.Q(artwork=None)).select_related('podcast'):
owner = ep.podcast.owner
try:
ep.artwork = create_asset_from_url(
'episode_artwork', ep.image_url, owner=owner, foreign_uuid=str(ep.id))
except Exception as e:
print('Could not update episode artwork for <<{}>> :: {}'.format(ep, e))
else:
# ep.image_url = None
pass
try:
ep.audio = create_asset_from_url(
'episode_audio',
ep.audio_url,
owner=owner,
foreign_uuid=str(ep.id),
content_type=ep.audio_type,
content_size=ep.audio_size,
)
except Exception as e:
print('Could not update audio for <<{}>> :: {}'.format(ep, e))
else:
# ep.audio_url = None
pass
ep.save()
| apache-2.0 |
sharkykh/SickRage | lib/tornado/testing.py | 19 | 28046 | #!/usr/bin/env python
"""Support classes for automated testing.
* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
with additional support for testing asynchronous (`.IOLoop`-based) code.
* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy.
* `main()`: A simple test runner (wrapper around unittest.main()) with support
for the tornado.autoreload module to rerun the tests when code changes.
"""
from __future__ import absolute_import, division, print_function
try:
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.ioloop import IOLoop, TimeoutError
from tornado import netutil
from tornado.process import Subprocess
except ImportError:
# These modules are not importable on app engine. Parts of this module
# won't work, but e.g. LogTrapTestCase and main() will.
AsyncHTTPClient = None # type: ignore
gen = None # type: ignore
HTTPServer = None # type: ignore
IOLoop = None # type: ignore
netutil = None # type: ignore
SimpleAsyncHTTPClient = None # type: ignore
Subprocess = None # type: ignore
from tornado.log import gen_log, app_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import raise_exc_info, basestring_type, PY3
import functools
import inspect
import logging
import os
import re
import signal
import socket
import sys
if PY3:
from io import StringIO
else:
from cStringIO import StringIO
try:
from collections.abc import Generator as GeneratorType # type: ignore
except ImportError:
from types import GeneratorType # type: ignore
if sys.version_info >= (3, 5):
iscoroutine = inspect.iscoroutine # type: ignore
iscoroutinefunction = inspect.iscoroutinefunction # type: ignore
else:
iscoroutine = iscoroutinefunction = lambda f: False
# Tornado's own test suite requires the updated unittest module
# (either py27+ or unittest2) so tornado.test.util enforces
# this requirement, but for other users of tornado.testing we want
# to allow the older version if unitest2 is not available.
if PY3:
# On python 3, mixing unittest2 and unittest (including doctest)
# doesn't seem to work, so always use unittest.
import unittest
else:
# On python 2, prefer unittest2 when available.
try:
import unittest2 as unittest # type: ignore
except ImportError:
import unittest # type: ignore
_next_port = 10000
def get_unused_port():
"""Returns a (hopefully) unused port number.
This function does not guarantee that the port it returns is available,
only that a series of get_unused_port calls in a single process return
distinct ports.
.. deprecated::
Use bind_unused_port instead, which is guaranteed to find an unused port.
"""
global _next_port
port = _next_port
_next_port = _next_port + 1
return port
def bind_unused_port(reuse_port=False):
"""Binds a server socket to an available port on localhost.
Returns a tuple (socket, port).
.. versionchanged:: 4.4
Always binds to ``127.0.0.1`` without resolving the name
``localhost``.
"""
sock = netutil.bind_sockets(None, '127.0.0.1', family=socket.AF_INET,
reuse_port=reuse_port)[0]
port = sock.getsockname()[1]
return sock, port
def get_async_test_timeout():
"""Get the global timeout setting for async tests.
Returns a float, the timeout in seconds.
.. versionadded:: 3.1
"""
try:
return float(os.environ.get('ASYNC_TEST_TIMEOUT'))
except (ValueError, TypeError):
return 5
class _TestMethodWrapper(object):
"""Wraps a test method to raise an error if it returns a value.
This is mainly used to detect undecorated generators (if a test
method yields it must use a decorator to consume the generator),
but will also detect other kinds of return values (these are not
necessarily errors, but we alert anyway since there is no good
reason to return a value from a test).
"""
def __init__(self, orig_method):
self.orig_method = orig_method
def __call__(self, *args, **kwargs):
result = self.orig_method(*args, **kwargs)
if isinstance(result, GeneratorType) or iscoroutine(result):
raise TypeError("Generator and coroutine test methods should be"
" decorated with tornado.testing.gen_test")
elif result is not None:
raise ValueError("Return value from test method ignored: %r" %
result)
def __getattr__(self, name):
"""Proxy all unknown attributes to the original method.
This is important for some of the decorators in the `unittest`
module, such as `unittest.skipIf`.
"""
return getattr(self.orig_method, name)
class AsyncTestCase(unittest.TestCase):
"""`~unittest.TestCase` subclass for testing `.IOLoop`-based
asynchronous code.
The unittest framework is synchronous, so the test must be
complete by the time the test method returns. This means that
asynchronous code cannot be used in quite the same way as usual.
To write test functions that use the same ``yield``-based patterns
used with the `tornado.gen` module, decorate your test methods
with `tornado.testing.gen_test` instead of
`tornado.gen.coroutine`. This class also provides the `stop()`
and `wait()` methods for a more manual style of testing. The test
method itself must call ``self.wait()``, and asynchronous
callbacks should call ``self.stop()`` to signal completion.
By default, a new `.IOLoop` is constructed for each test and is available
as ``self.io_loop``. This `.IOLoop` should be used in the construction of
HTTP clients/servers, etc. If the code being tested requires a
global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
The `.IOLoop`'s ``start`` and ``stop`` methods should not be
called directly. Instead, use `self.stop <stop>` and `self.wait
<wait>`. Arguments passed to ``self.stop`` are returned from
``self.wait``. It is possible to have multiple ``wait``/``stop``
cycles in the same test.
Example::
# This test uses coroutine style.
class MyTestCase(AsyncTestCase):
@tornado.testing.gen_test
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
response = yield client.fetch("http://www.tornadoweb.org")
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses argument passing between self.stop and self.wait.
class MyTestCase2(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client.fetch("http://www.tornadoweb.org/", self.stop)
response = self.wait()
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses an explicit callback-based style.
class MyTestCase3(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
self.wait()
def handle_fetch(self, response):
# Test contents of response (failures and exceptions here
# will cause self.wait() to throw an exception and end the
# test).
# Exceptions thrown here are magically propagated to
# self.wait() in test_http_fetch() via stack_context.
self.assertIn("FriendFeed", response.body)
self.stop()
"""
def __init__(self, methodName='runTest'):
super(AsyncTestCase, self).__init__(methodName)
self.__stopped = False
self.__running = False
self.__failure = None
self.__stop_args = None
self.__timeout = None
# It's easy to forget the @gen_test decorator, but if you do
# the test will silently be ignored because nothing will consume
# the generator. Replace the test method with a wrapper that will
# make sure it's not an undecorated generator.
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
def setUp(self):
super(AsyncTestCase, self).setUp()
self.io_loop = self.get_new_ioloop()
self.io_loop.make_current()
def tearDown(self):
# Clean up Subprocess, so it can be used again with a new ioloop.
Subprocess.uninitialize()
self.io_loop.clear_current()
if (not IOLoop.initialized() or
self.io_loop is not IOLoop.instance()):
# Try to clean up any file descriptors left open in the ioloop.
# This avoids leaks, especially when tests are run repeatedly
# in the same process with autoreload (because curl does not
# set FD_CLOEXEC on its file descriptors)
self.io_loop.close(all_fds=True)
super(AsyncTestCase, self).tearDown()
# In case an exception escaped or the StackContext caught an exception
# when there wasn't a wait() to re-raise it, do so here.
# This is our last chance to raise an exception in a way that the
# unittest machinery understands.
self.__rethrow()
def get_new_ioloop(self):
"""Creates a new `.IOLoop` for this test. May be overridden in
subclasses for tests that require a specific `.IOLoop` (usually
the singleton `.IOLoop.instance()`).
"""
return IOLoop()
def _handle_exception(self, typ, value, tb):
if self.__failure is None:
self.__failure = (typ, value, tb)
else:
app_log.error("multiple unhandled exceptions in test",
exc_info=(typ, value, tb))
self.stop()
return True
def __rethrow(self):
if self.__failure is not None:
failure = self.__failure
self.__failure = None
raise_exc_info(failure)
def run(self, result=None):
with ExceptionStackContext(self._handle_exception):
super(AsyncTestCase, self).run(result)
# As a last resort, if an exception escaped super.run() and wasn't
# re-raised in tearDown, raise it here. This will cause the
# unittest run to fail messily, but that's better than silently
# ignoring an error.
self.__rethrow()
def stop(self, _arg=None, **kwargs):
"""Stops the `.IOLoop`, causing one pending (or future) call to `wait()`
to return.
Keyword arguments or a single positional argument passed to `stop()` are
saved and will be returned by `wait()`.
"""
assert _arg is None or not kwargs
self.__stop_args = kwargs or _arg
if self.__running:
self.io_loop.stop()
self.__running = False
self.__stopped = True
def wait(self, condition=None, timeout=None):
"""Runs the `.IOLoop` until stop is called or timeout has passed.
In the event of a timeout, an exception will be thrown. The
default timeout is 5 seconds; it may be overridden with a
``timeout`` keyword argument or globally with the
``ASYNC_TEST_TIMEOUT`` environment variable.
If ``condition`` is not None, the `.IOLoop` will be restarted
after `stop()` until ``condition()`` returns true.
.. versionchanged:: 3.1
Added the ``ASYNC_TEST_TIMEOUT`` environment variable.
"""
if timeout is None:
timeout = get_async_test_timeout()
if not self.__stopped:
if timeout:
def timeout_func():
try:
raise self.failureException(
'Async operation timed out after %s seconds' %
timeout)
except Exception:
self.__failure = sys.exc_info()
self.stop()
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func)
while True:
self.__running = True
self.io_loop.start()
if (self.__failure is not None or
condition is None or condition()):
break
if self.__timeout is not None:
self.io_loop.remove_timeout(self.__timeout)
self.__timeout = None
assert self.__stopped
self.__stopped = False
self.__rethrow()
result = self.__stop_args
self.__stop_args = None
return result
class AsyncHTTPTestCase(AsyncTestCase):
"""A test case that starts up an HTTP server.
Subclasses must override `get_app()`, which returns the
`tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
Tests will typically use the provided ``self.http_client`` to fetch
URLs from this server.
Example, assuming the "Hello, world" example from the user guide is in
``hello.py``::
import hello
class TestHelloApp(AsyncHTTPTestCase):
def get_app(self):
return hello.make_app()
def test_homepage(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, 'Hello, world')
That call to ``self.fetch()`` is equivalent to ::
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
which illustrates how AsyncTestCase can turn an asynchronous operation,
like ``http_client.fetch()``, into a synchronous operation. If you need
to do other asynchronous operations in tests, you'll probably need to use
``stop()`` and ``wait()`` yourself.
"""
def setUp(self):
super(AsyncHTTPTestCase, self).setUp()
sock, port = bind_unused_port()
self.__port = port
self.http_client = self.get_http_client()
self._app = self.get_app()
self.http_server = self.get_http_server()
self.http_server.add_sockets([sock])
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop)
def get_http_server(self):
return HTTPServer(self._app, io_loop=self.io_loop,
**self.get_httpserver_options())
def get_app(self):
"""Should be overridden by subclasses to return a
`tornado.web.Application` or other `.HTTPServer` callback.
"""
raise NotImplementedError()
def fetch(self, path, **kwargs):
"""Convenience method to synchronously fetch a url.
The given path will be appended to the local server's host and
port. Any additional kwargs will be passed directly to
`.AsyncHTTPClient.fetch` (and so could be used to pass
``method="POST"``, ``body="..."``, etc).
"""
self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
return self.wait()
def get_httpserver_options(self):
"""May be overridden by subclasses to return additional
keyword arguments for the server.
"""
return {}
def get_http_port(self):
"""Returns the port used by the server.
A new port is chosen for each test.
"""
return self.__port
def get_protocol(self):
return 'http'
def get_url(self, path):
"""Returns an absolute url for the given path on the test server."""
return '%s://localhost:%s%s' % (self.get_protocol(),
self.get_http_port(), path)
def tearDown(self):
self.http_server.stop()
self.io_loop.run_sync(self.http_server.close_all_connections,
timeout=get_async_test_timeout())
if (not IOLoop.initialized() or
self.http_client.io_loop is not IOLoop.instance()):
self.http_client.close()
super(AsyncHTTPTestCase, self).tearDown()
class AsyncHTTPSTestCase(AsyncHTTPTestCase):
"""A test case that starts an HTTPS server.
Interface is generally the same as `AsyncHTTPTestCase`.
"""
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True,
defaults=dict(validate_cert=False))
def get_httpserver_options(self):
return dict(ssl_options=self.get_ssl_options())
def get_ssl_options(self):
"""May be overridden by subclasses to select SSL options.
By default includes a self-signed testing certificate.
"""
# Testing keys were generated with:
# openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509
module_dir = os.path.dirname(__file__)
return dict(
certfile=os.path.join(module_dir, 'test', 'test.crt'),
keyfile=os.path.join(module_dir, 'test', 'test.key'))
def get_protocol(self):
return 'https'
def gen_test(func=None, timeout=None):
"""Testing equivalent of ``@gen.coroutine``, to be applied to test methods.
``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not
already running. ``@gen_test`` should be applied to test methods
on subclasses of `AsyncTestCase`.
Example::
class MyTest(AsyncHTTPTestCase):
@gen_test
def test_something(self):
response = yield gen.Task(self.fetch('/'))
By default, ``@gen_test`` times out after 5 seconds. The timeout may be
overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
or for each test with the ``timeout`` keyword argument::
class MyTest(AsyncHTTPTestCase):
@gen_test(timeout=10)
def test_something_slow(self):
response = yield gen.Task(self.fetch('/'))
.. versionadded:: 3.1
The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
variable.
.. versionchanged:: 4.0
The wrapper now passes along ``*args, **kwargs`` so it can be used
on functions with arguments.
"""
if timeout is None:
timeout = get_async_test_timeout()
def wrap(f):
# Stack up several decorators to allow us to access the generator
# object itself. In the innermost wrapper, we capture the generator
# and save it in an attribute of self. Next, we run the wrapped
# function through @gen.coroutine. Finally, the coroutine is
# wrapped again to make it synchronous with run_sync.
#
# This is a good case study arguing for either some sort of
# extensibility in the gen decorators or cancellation support.
@functools.wraps(f)
def pre_coroutine(self, *args, **kwargs):
result = f(self, *args, **kwargs)
if isinstance(result, GeneratorType) or iscoroutine(result):
self._test_generator = result
else:
self._test_generator = None
return result
if iscoroutinefunction(f):
coro = pre_coroutine
else:
coro = gen.coroutine(pre_coroutine)
@functools.wraps(coro)
def post_coroutine(self, *args, **kwargs):
try:
return self.io_loop.run_sync(
functools.partial(coro, self, *args, **kwargs),
timeout=timeout)
except TimeoutError as e:
# run_sync raises an error with an unhelpful traceback.
# Throw it back into the generator or coroutine so the stack
# trace is replaced by the point where the test is stopped.
self._test_generator.throw(e)
# In case the test contains an overly broad except clause,
# we may get back here. In this case re-raise the original
# exception, which is better than nothing.
raise
return post_coroutine
if func is not None:
# Used like:
# @gen_test
# def f(self):
# pass
return wrap(func)
else:
# Used like @gen_test(timeout=10)
return wrap
# Without this attribute, nosetests will try to run gen_test as a test
# anywhere it is imported.
gen_test.__test__ = False # type: ignore
class LogTrapTestCase(unittest.TestCase):
"""A test case that captures and discards all logging output
if the test passes.
Some libraries can produce a lot of logging output even when
the test succeeds, so this class can be useful to minimize the noise.
Simply use it as a base class for your test case. It is safe to combine
with AsyncTestCase via multiple inheritance
(``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``)
This class assumes that only one log handler is configured and
that it is a `~logging.StreamHandler`. This is true for both
`logging.basicConfig` and the "pretty logging" configured by
`tornado.options`. It is not compatible with other log buffering
mechanisms, such as those provided by some test runners.
.. deprecated:: 4.1
Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`.
"""
def run(self, result=None):
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
handler = logger.handlers[0]
if (len(logger.handlers) > 1 or
not isinstance(handler, logging.StreamHandler)):
# Logging has been configured in a way we don't recognize,
# so just leave it alone.
super(LogTrapTestCase, self).run(result)
return
old_stream = handler.stream
try:
handler.stream = StringIO()
gen_log.info("RUNNING TEST: " + str(self))
old_error_count = len(result.failures) + len(result.errors)
super(LogTrapTestCase, self).run(result)
new_error_count = len(result.failures) + len(result.errors)
if new_error_count != old_error_count:
old_stream.write(handler.stream.getvalue())
finally:
handler.stream = old_stream
class ExpectLog(logging.Filter):
"""Context manager to capture and suppress expected log output.
Useful to make tests of error conditions less noisy, while still
leaving unexpected log entries visible. *Not thread safe.*
The attribute ``logged_stack`` is set to true if any exception
stack trace was logged.
Usage::
with ExpectLog('tornado.application', "Uncaught exception"):
error_response = self.fetch("/some_page")
.. versionchanged:: 4.3
Added the ``logged_stack`` attribute.
"""
def __init__(self, logger, regex, required=True):
"""Constructs an ExpectLog context manager.
:param logger: Logger object (or name of logger) to watch. Pass
an empty string to watch the root logger.
:param regex: Regular expression to match. Any log entries on
the specified logger that match this regex will be suppressed.
:param required: If true, an exception will be raised if the end of
the ``with`` statement is reached without matching any log entries.
"""
if isinstance(logger, basestring_type):
logger = logging.getLogger(logger)
self.logger = logger
self.regex = re.compile(regex)
self.required = required
self.matched = False
self.logged_stack = False
def filter(self, record):
if record.exc_info:
self.logged_stack = True
message = record.getMessage()
if self.regex.match(message):
self.matched = True
return False
return True
def __enter__(self):
self.logger.addFilter(self)
return self
def __exit__(self, typ, value, tb):
self.logger.removeFilter(self)
if not typ and self.required and not self.matched:
raise Exception("did not get expected log message")
def main(**kwargs):
"""A simple test runner.
This test runner is essentially equivalent to `unittest.main` from
the standard library, but adds support for tornado-style option
parsing and log formatting. It is *not* necessary to use this
`main` function to run tests using `AsyncTestCase`; these tests
are self-contained and can run with any test runner.
The easiest way to run a test is via the command line::
python -m tornado.testing tornado.test.stack_context_test
See the standard library unittest module for ways in which tests can
be specified.
Projects with many tests may wish to define a test script like
``tornado/test/runtests.py``. This script should define a method
``all()`` which returns a test suite and then call
`tornado.testing.main()`. Note that even when a test script is
used, the ``all()`` test suite may be overridden by naming a
single test on the command line::
# Runs all tests
python -m tornado.test.runtests
# Runs one test
python -m tornado.test.runtests tornado.test.stack_context_test
Additional keyword arguments passed through to ``unittest.main()``.
For example, use ``tornado.testing.main(verbosity=2)``
to show many test details as they are run.
See http://docs.python.org/library/unittest.html#unittest.main
for full argument list.
"""
from tornado.options import define, options, parse_command_line
define('exception_on_interrupt', type=bool, default=True,
help=("If true (default), ctrl-c raises a KeyboardInterrupt "
"exception. This prints a stack trace but cannot interrupt "
"certain operations. If false, the process is more reliably "
"killed, but does not print a stack trace."))
# support the same options as unittest's command-line interface
define('verbose', type=bool)
define('quiet', type=bool)
define('failfast', type=bool)
define('catch', type=bool)
define('buffer', type=bool)
argv = [sys.argv[0]] + parse_command_line(sys.argv)
if not options.exception_on_interrupt:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if options.verbose is not None:
kwargs['verbosity'] = 2
if options.quiet is not None:
kwargs['verbosity'] = 0
if options.failfast is not None:
kwargs['failfast'] = True
if options.catch is not None:
kwargs['catchbreak'] = True
if options.buffer is not None:
kwargs['buffer'] = True
if __name__ == '__main__' and len(argv) == 1:
print("No tests specified", file=sys.stderr)
sys.exit(1)
try:
# In order to be able to run tests by their fully-qualified name
# on the command line without importing all tests here,
# module must be set to None. Python 3.2's unittest.main ignores
# defaultTest if no module is given (it tries to do its own
# test discovery, which is incompatible with auto2to3), so don't
# set module if we're not asking for a specific test.
if len(argv) > 1:
unittest.main(module=None, argv=argv, **kwargs)
else:
unittest.main(defaultTest="all", argv=argv, **kwargs)
except SystemExit as e:
if e.code == 0:
gen_log.info('PASS')
else:
gen_log.error('FAIL')
raise
if __name__ == '__main__':
main()
| gpl-3.0 |
crvv/shadowsocks | utils/autoban.py | 1033 | 2156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See README')
parser.add_argument('-c', '--count', default=3, type=int,
help='with how many failure times it should be '
'considered as an attack')
config = parser.parse_args()
ips = {}
banned = set()
for line in sys.stdin:
if 'can not parse header when' in line:
ip = line.split()[-1].split(':')[0]
if ip not in ips:
ips[ip] = 1
print(ip)
sys.stdout.flush()
else:
ips[ip] += 1
if ip not in banned and ips[ip] >= config.count:
banned.add(ip)
cmd = 'iptables -A INPUT -s %s -j DROP' % ip
print(cmd, file=sys.stderr)
sys.stderr.flush()
os.system(cmd)
| apache-2.0 |
s20121035/rk3288_android5.1_repo | external/chromium_org/components/cronet/tools/cr_cronet.py | 25 | 1974 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
cr_cronet.py - cr - like helper tool for cronet developers
"""
import argparse
import os
import sys
def run(command):
print command
return os.system(command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command',
choices=['gyp',
'sync',
'build',
'install',
'proguard',
'test',
'debug'])
parser.add_argument('-r', '--release', action='store_true',
help='use release configuration')
options = parser.parse_args()
print options
gyp_defines = 'GYP_DEFINES="OS=android enable_websockets=0 '+ \
'disable_file_support=1 disable_ftp_support=1 '+ \
'use_icu_alternatives_on_android=1" '
out_dir = 'out/Debug'
release_arg = ''
if options.release:
out_dir = 'out/Release'
release_arg = ' --release'
if (options.command=='gyp'):
return run (gyp_defines + ' gclient runhooks')
if (options.command=='sync'):
return run ('git pull --rebase && ' + gyp_defines + ' gclient sync')
if (options.command=='build'):
return run ('ninja -C ' + out_dir + ' cronet_test_instrumentation_apk')
if (options.command=='install'):
return run ('build/android/adb_install_apk.py ' + release_arg + \
' --apk=CronetTest.apk')
if (options.command=='proguard'):
return run ('ninja -C ' + out_dir + ' cronet_sample_proguard_apk')
if (options.command=='test'):
return run ('build/android/test_runner.py instrumentation '+ \
release_arg + ' --test-apk=CronetTestInstrumentation')
parser.print_help()
return 1
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
napkindrawing/ansible | lib/ansible/modules/cloud/google/gce_eip.py | 70 | 6857 | #!/usr/bin/python
# Copyright 2017 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: gce_eip
version_added: "2.3"
short_description: Create or Destroy Global or Regional External IP addresses.
description:
- Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.19.0"
notes:
- Global addresses can only be used with Global Forwarding Rules.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
name:
description:
- Name of Address.
required: true
region:
description:
- Region to create the address in. Set to 'global' to create a global address.
required: true
state:
description: The state the address should be in. C(present) or C(absent) are the only valid options.
default: present
required: false
choices: [present, absent]
'''
EXAMPLES = '''
# Create a Global external IP address
gce_eip:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
name: my-global-ip
region: global
state: present
# Create a Regional external IP address
gce_eip:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
name: my-global-ip
region: us-east1
state: present
'''
RETURN = '''
address:
description: IP address being operated on
returned: always
type: string
sample: "35.186.222.233"
name:
description: name of the address being operated on
returned: always
type: string
sample: "my-address"
region:
description: Which region an address belongs.
returned: always
type: string
sample: "global"
'''
USER_AGENT_VERSION = 'v1'
USER_AGENT_PRODUCT = 'Ansible-gce_eip'
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
def get_address(gce, name, region):
"""
Get an Address from GCE.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param name: Name of the Address.
:type name: ``str``
:return: A GCEAddress object or None.
:rtype: :class: `GCEAddress` or None
"""
try:
return gce.ex_get_address(name=name, region=region)
except ResourceNotFoundError:
return None
def create_address(gce, params):
"""
Create a new Address.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param params: Dictionary of parameters needed by the module.
:type params: ``dict``
:return: Tuple with changed status and address.
:rtype: tuple in the format of (bool, str)
"""
changed = False
return_data = []
address = gce.ex_create_address(
name=params['name'], region=params['region'])
if address:
changed = True
return_data = address.address
return (changed, return_data)
def delete_address(address):
"""
Delete an Address.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param params: Dictionary of parameters needed by the module.
:type params: ``dict``
:return: Tuple with changed status and address.
:rtype: tuple in the format of (bool, str)
"""
changed = False
return_data = []
if address.destroy():
changed = True
return_data = address.address
return (changed, return_data)
def main():
module = AnsibleModule(argument_spec=dict(
name=dict(required=True),
state=dict(choices=['absent', 'present'], default='present'),
region=dict(required=True),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(), ), )
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(
msg='libcloud with GCE support (+0.19) required for this module.')
gce = gcp_connect(module, Provider.GCE, get_driver,
USER_AGENT_PRODUCT, USER_AGENT_VERSION)
params = {}
params['state'] = module.params.get('state')
params['name'] = module.params.get('name')
params['region'] = module.params.get('region')
changed = False
json_output = {'state': params['state']}
address = get_address(gce, params['name'], region=params['region'])
if params['state'] == 'absent':
if not address:
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown address: %s" %
(params['name']))
else:
# Delete
(changed, json_output['address']) = delete_address(address)
else:
if not address:
# Create
(changed, json_output['address']) = create_address(gce,
params)
else:
changed = False
json_output['address'] = address.address
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gcp import gcp_connect
if __name__ == '__main__':
main()
| gpl-3.0 |
shepherd44/kernel_3.14.4 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
mengxn/tensorflow | tensorflow/python/ops/image_grad.py | 101 | 3785 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains Gradient functions for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
@ops.RegisterGradient("ResizeNearestNeighbor")
def _ResizeNearestNeighborGrad(op, grad):
"""The derivatives for nearest neighbor resizing.
Args:
op: The ResizeNearestNeighbor op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input and the output.
"""
image = op.inputs[0]
if image.get_shape()[1:3].is_fully_defined():
image_shape = image.get_shape()[1:3]
else:
image_shape = array_ops.shape(image)[1:3]
# pylint: disable=protected-access
grads = gen_image_ops._resize_nearest_neighbor_grad(
grad,
image_shape,
align_corners=op.get_attr("align_corners"))
# pylint: enable=protected-access
return [grads, None]
@ops.RegisterGradient("ResizeBilinear")
def _ResizeBilinearGrad(op, grad):
"""The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops._resize_bilinear_grad(
grad,
op.inputs[0],
align_corners=op.get_attr("align_corners"))
# pylint: enable=protected-access
return [grad0, None]
@ops.RegisterGradient("CropAndResize")
def _CropAndResizeGrad(op, grad):
"""The derivatives for crop_and_resize.
We back-propagate to the image only when the input image tensor has floating
point dtype but we always back-propagate to the input boxes tensor.
Args:
op: The CropAndResize op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input image, boxes, as well as the always-None
gradients w.r.t. box_ind and crop_size.
"""
image = op.inputs[0]
if image.get_shape().is_fully_defined():
image_shape = image.get_shape().as_list()
else:
image_shape = array_ops.shape(image)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops.crop_and_resize_grad_image(grad,
op.inputs[1],
op.inputs[2],
image_shape,
T=op.get_attr("T"))
# pylint: enable=protected-access
else:
grad0 = None
grad1 = gen_image_ops.crop_and_resize_grad_boxes(grad, op.inputs[0],
op.inputs[1], op.inputs[2])
return [grad0, grad1, None, None]
| apache-2.0 |
aakhundov/tf-rnn-adaptive | train_sort.py | 1 | 5275 | import sys
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from act_wrapper import ACTWrapper
from act_model import ACTModel
from data import generate_sort_data
def echo(message, file):
print(message)
file.write(message + "\n")
SEED = 0
TRAIN_STEPS = 250000
BATCH_SIZE = 16
VAL_SIZE = 1000
MIN_NUMBERS = 2
MAX_NUMBERS = 10
MIN_TIME_STEPS = MIN_NUMBERS * 2
MAX_TIME_STEPS = MAX_NUMBERS * 2
INPUT_SIZE = 2
NUM_CLASSES = MAX_NUMBERS
NUM_OUTPUTS = 1
NUM_HIDDEN = 512
PONDER_LIMIT = 10
TIME_PENALTY = 0.001
LEARNING_RATE = 0.001
WITH_ACT = True
if __name__ == "__main__":
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == "-seed":
SEED = int(sys.argv[1]); del sys.argv[1]
elif option == "-penalty":
TIME_PENALTY = float(sys.argv[1]); del sys.argv[1]
elif option == "-act":
WITH_ACT = bool(int(sys.argv[1])); del sys.argv[1]
else:
print(sys.argv[0], ": invalid option", option)
sys.exit(1)
model_name = "{0}_{1}_{2}".format(
"sort",
TIME_PENALTY if WITH_ACT else "x",
SEED
)
np.random.seed(SEED)
tf.set_random_seed(SEED)
print(model_name)
print()
print("min numbers", MIN_NUMBERS)
print("max numbers", MAX_NUMBERS)
print("min time steps", MIN_TIME_STEPS)
print("max time steps", MAX_TIME_STEPS)
print("time penalty", TIME_PENALTY)
print("ponder limit", PONDER_LIMIT)
print("learning rate", LEARNING_RATE)
print("with ACT" if WITH_ACT else "without ACT")
print()
cell = rnn.BasicLSTMCell(NUM_HIDDEN)
if WITH_ACT:
cell = ACTWrapper(cell, ponder_limit=PONDER_LIMIT)
inputs = tf.placeholder(tf.float32, [None, MAX_TIME_STEPS, INPUT_SIZE])
targets = tf.placeholder(tf.int64, [None, MAX_TIME_STEPS, NUM_OUTPUTS])
seq_length = tf.placeholder(tf.int64, [None])
print("Creating model...")
model = ACTModel(
inputs, targets, MAX_TIME_STEPS, NUM_CLASSES, cell, NUM_OUTPUTS, TIME_PENALTY,
seq_length=seq_length, target_offset=seq_length // 2,
optimizer=tf.train.AdamOptimizer(LEARNING_RATE)
)
log_path = "./results/logs/" + model_name + ".txt"
model_path = "./results/models/" + model_name + ".ckpt"
saver = tf.train.Saver()
log = open(log_path, "w")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.2
with tf.Session(config=config) as sess:
print("Initializing variables...")
sess.run(tf.global_variables_initializer())
print("Training...")
print()
if WITH_ACT:
echo("{:10}{:<10}{:<15}{:<17}{:<30}".format(
"steps", "error",
"softmax loss", "ponder loss",
"min / avg / std / max ponder"
), log)
echo("-" * 83, log)
else:
echo("{:10}{:<10}{:<15}".format(
"steps", "error", "softmax loss"
), log)
echo("-" * 32, log)
val_xs, val_ys, val_seq = generate_sort_data(
VAL_SIZE, min_numbers=MIN_NUMBERS, max_numbers=MAX_NUMBERS, seed=12345)
for step in range(TRAIN_STEPS):
batch_xs, batch_ys, batch_seq = generate_sort_data(
BATCH_SIZE, min_numbers=MIN_NUMBERS, max_numbers=MAX_NUMBERS)
sess.run(model.training, feed_dict={
inputs: batch_xs,
targets: batch_ys,
seq_length: batch_seq
})
if (step + 1) % 1000 == 0:
if WITH_ACT:
val_error, val_soft_loss, val_pond_loss, val_ponder = sess.run(
[model.evaluation, model.softmax_loss, model.ponder_loss, model.ponder_steps],
feed_dict={
inputs: val_xs,
targets: val_ys,
seq_length: val_seq
}
)
val_ponder = np.ravel(val_ponder)
val_ponder = val_ponder[np.nonzero(val_ponder)]
echo("{:<10d}{:<10.2f}{:<15.6}{:<17.6}{:<30}".format(
step + 1, 100 * val_error,
val_soft_loss, val_pond_loss,
"{:.2f} / {:.2f} / {:.2f} / {:.2f}".format(
np.min(val_ponder), np.mean(val_ponder),
np.std(val_ponder), np.max(val_ponder)
)
), log)
else:
val_error, val_loss = sess.run(
[model.evaluation, model.softmax_loss],
feed_dict={
inputs: val_xs,
targets: val_ys,
seq_length: val_seq
}
)
echo("{:<10d}{:<10.2f}{:<15.6}".format(
step + 1, 100 * val_error, val_loss
), log)
print()
print("Saving model...")
saver.save(sess, model_path)
log.close()
| apache-2.0 |
seungjin/app5-seungjin-net.appspot.com | django/contrib/gis/geos/prototypes/topology.py | 311 | 2226 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
__all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull',
'geos_difference', 'geos_envelope', 'geos_intersection',
'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify',
'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate']
from ctypes import c_char_p, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
def topology(func, *args):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
### Topology Routines ###
geos_boundary = topology(GEOSFunc('GEOSBoundary'))
geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int)
geos_centroid = topology(GEOSFunc('GEOSGetCentroid'))
geos_convexhull = topology(GEOSFunc('GEOSConvexHull'))
geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR)
geos_envelope = topology(GEOSFunc('GEOSEnvelope'))
geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR)
geos_linemerge = topology(GEOSFunc('GEOSLineMerge'))
geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface'))
geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double)
geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double)
geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR)
geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFunc('GEOSRelate')
geos_relate.argtypes = [GEOM_PTR, GEOM_PTR]
geos_relate.restype = geos_char_p
geos_relate.errcheck = check_string
# Routines only in GEOS 3.1+
if GEOS_PREPARE:
geos_cascaded_union = GEOSFunc('GEOSUnionCascaded')
geos_cascaded_union.argtypes = [GEOM_PTR]
geos_cascaded_union.restype = GEOM_PTR
__all__.append('geos_cascaded_union')
| bsd-3-clause |
wolfskaempf/ga_statistics | lib/python2.7/site-packages/django/contrib/auth/migrations/0001_initial.py | 143 | 4370 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.auth.models
from django.core import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(to='contenttypes.ContentType', to_field='id')),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': set([('content_type', 'codename')]),
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(to='auth.Group', verbose_name='groups', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user')),
('user_permissions', models.ManyToManyField(to='auth.Permission', verbose_name='user permissions', blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user')),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| mit |
bleib1dj/boto | tests/unit/glacier/test_response.py | 100 | 1656 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import AWSMockServiceTestCase
from boto.glacier.layer1 import Layer1
from boto.glacier.response import GlacierResponse
class TestResponse(AWSMockServiceTestCase):
connection_class = Layer1
def test_204_body_isnt_passed_to_json(self):
response = self.create_response(status_code=204,header=[('Content-Type','application/json')])
result = GlacierResponse(response,response.getheaders())
self.assertEquals(result.status, response.status)
if __name__ == '__main__':
unittest.main()
| mit |
whereismyjetpack/ansible | lib/ansible/modules/windows/win_iis_webapppool.py | 15 | 2611 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_iis_webapppool
version_added: "2.0"
short_description: Configures a IIS Web Application Pool.
description:
- Creates, Removes and configures a IIS Web Application Pool
options:
name:
description:
- Names of application pool
required: true
default: null
aliases: []
state:
description:
- State of the binding
choices:
- absent
- stopped
- started
- restarted
required: false
default: null
aliases: []
attributes:
description:
- Application Pool attributes from string where attributes are separated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
required: false
default: null
aliases: []
author: Henrik Wallström
'''
EXAMPLES = r'''
- name: Return information about an existing application pool
win_iis_webapppool:
name: DefaultAppPool
- name: Ensure AppPool is started
win_iis_webapppool:
name: AppPool
state: started
- name: Ensure AppPool is stopped
win_iis_webapppool:
name: AppPool
state: stopped
- name: Restart AppPool
win_iis_webapppool:
name: AppPool
state: restart
- name: Change application pool attributes without touching state
win_iis_webapppool:
name: AppPool
attributes: managedRuntimeVersion:v4.0|autoStart:false
- name: Create AnotherAppPool and start it using .NET 4.0 and disabling autostart
win_iis_webapppool:
name: AnotherAppPool
state: started
attributes: managedRuntimeVersion:v4.0|autoStart:false
- name: Create AppPool and start it using .NET 4.0
win_iis_webapppool:
name: AppPool
state: started
attributes: managedRuntimeVersion:v4.0
register: webapppool
'''
| gpl-3.0 |
vipmike007/avocado-vt | virttest/remote_commander/remote_runner.py | 1 | 26020 | #!/usr/bin/env python
'''
Created on Dec 6, 2013
:author: jzupka
'''
import os
import sys
import select
import time
import stat
import gc
import logging
import traceback
import subprocess
import string
import random
import shutil
import signal
import remote_interface
import messenger as ms
from .. import data_dir
def daemonize(pipe_root_path=data_dir.get_tmp_dir()):
"""
Init daemon.
:param pipe_root_path: path to directory for pipe.
:return: [True if child, stdin_path, stdou_path, stderr_path]
"""
def is_file_open(path):
"""
Determine process which open file.
:param path: Path to file.
:return: [[pid,mode], ... ].
"""
opens = []
pids = os.listdir('/proc')
for pid in sorted(pids):
try:
int(pid)
except ValueError:
continue
fd_dir = os.path.join('/proc', pid, 'fd')
try:
for filepath in os.listdir(fd_dir):
try:
p = os.path.join(fd_dir, filepath)
link = os.readlink(os.path.join(fd_dir, filepath))
if link == path:
mode = os.lstat(p).st_mode
opens.append([pid, mode])
except OSError:
continue
except OSError, e:
if e.errno == 2:
continue
raise
return opens
def daemonize():
"""
Run guest as a daemon.
"""
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
pid = os.fork()
if gc_was_enabled:
gc.enable()
if pid > 0: # If parent return False
os.waitpid(pid, 0)
return 0
except OSError, e:
sys.stderr.write("Daemonize failed: %s\n" % (e))
sys.exit(1)
os.chdir("/")
os.setsid()
os.umask(0)
try:
pid = os.fork()
if gc_was_enabled:
gc.enable()
if pid > 0: # If parent Exit
sys.exit(0)
except OSError, e:
sys.stderr.write("Daemonize failed: %s\n" % (e))
sys.exit(1)
if gc_was_enabled:
gc.enable()
sys.stdout.flush()
sys.stderr.flush()
return 1
stdin_path = os.path.join(pipe_root_path, "stdin")
stdout_path = os.path.join(pipe_root_path, "stdout")
stderr_path = os.path.join(pipe_root_path, "stderr")
results_path = os.path.join(pipe_root_path, "results")
inputs_path = os.path.join(pipe_root_path, "inputs")
for f in [stdin_path, stdout_path, stderr_path, results_path, inputs_path]:
try:
os.mkfifo(f)
except OSError, e:
if e.errno == 17:
pass
# Check for a pidfile to see if the daemon already runs
openers = is_file_open(stdout_path)
rundaemon = False
if len(openers) > 0:
for i in openers:
if i[1] & stat.S_IWUSR:
rundaemon = True
openers.remove(i)
if len(openers) > 0:
for i in openers:
os.kill(int(i[0]), 9)
time.sleep(0.3)
# Start the daemon
child = False
if not rundaemon:
child = daemonize()
if child == 0:
return (child,
inputs_path,
results_path,
stdin_path,
stdout_path,
stderr_path)
else:
signal.signal(signal.SIGIO, signal.SIG_DFL)
return (child,
results_path,
inputs_path,
stdin_path,
stdout_path,
stderr_path)
def create_process_cmd():
"""
Create child process without clean process data thanks that it is possible
call function and classes from child process.
"""
r_c, w_p = os.pipe()
r_p, w_c = os.pipe()
r_si, w_si = os.pipe()
r_so, w_so = os.pipe()
r_se, w_se = os.pipe()
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
pid = os.fork()
if pid == 0: # Child process
os.close(r_p)
os.close(w_p)
os.close(w_si)
os.close(r_so)
os.close(r_se)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
sys.stdin = os.fdopen(r_si, 'r', 0)
sys.stdout = os.fdopen(w_so, 'w', 0)
sys.stderr = os.fdopen(w_se, 'w', 0)
if gc_was_enabled:
gc.enable()
return (0, r_c, w_c, None, None, None)
else:
os.close(r_c)
os.close(w_c)
os.close(r_si)
os.close(w_so)
os.close(w_se)
if gc_was_enabled:
gc.enable()
return (pid, r_p, w_p, w_si, r_so, r_se)
def gen_tmp_dir(root_path):
"""
Try to create tmp dir with special name.
"""
path = None
while (path is None or os.path.exists(path)):
rname = "runner" + "".join(random.sample(string.letters, 4))
path = os.path.join(root_path, rname)
try:
if not os.path.exists(path):
os.mkdir(path)
return path
except:
continue
def clean_tmp_dir(path):
"""
Clean up directory.
"""
shutil.rmtree(path, True)
def sort_fds_event(fds):
hup = [x[0] for x in fds if x[1] & select.POLLHUP]
read = [x[0] for x in fds if x[1] & select.POLLIN]
write = [x[0] for x in fds if x[1] & select.POLLOUT]
return hup, read, write
def close_unused_fds(fds):
"""
Close all file descriptors which are not necessary anymore.
:param fds: file descriptors
:type fds: builtin.list
"""
for fd in fds:
os.close(fd)
class CmdFinish(object):
"""
Class used for communication with child process. This class
"""
__slots__ = ["pid"]
def __init__(self, parent=False):
if not parent:
self.pid = os.getpid()
else:
self.pid = os.getppid()
self.pid = self.pid
class CmdSlave(object):
"""
Representation of BaseCmd on slave side.
"""
def __init__(self, baseCmd):
"""
:param baseCmd: basecmd for encapsulation.
"""
self.basecmd = baseCmd
self.cmd_id = baseCmd.cmd_id
self.obj = None
self.pid = None
self.r_pipe = None
self.w_pipe = None
self.stdin_pipe = None
self.stdout_pipe = None
self.stderr_pipe = None
self.async = False
self.nohup = False
self.manage = False
self.msg = None
def close_pipes(self):
"""
Close command communication pipe.
"""
if self.r_pipe is not None:
os.close(self.r_pipe)
if self.w_pipe is not None:
os.close(self.w_pipe)
if self.stdin_pipe is not None:
os.close(self.stdin_pipe)
if self.stdout_pipe is not None:
os.close(self.stdout_pipe)
if self.stderr_pipe is not None:
os.close(self.stderr_pipe)
def parse_func_name(self, func_name, commander):
"""
Parse name sended from master.
format: ``["manage|async|nohup| ", "fnname1", "fnname2", ...]``
:param func_name: Function name
:param commander: Where to execute the command (remote or local)
"""
if func_name[0] == "manage": # start command in main process.
self.manage = True
func_name = func_name[1:]
if func_name[0] == "async": # start command in new process.
self.async = True
func_name = func_name[1:]
if func_name[0] == "nohup": # start command in new daemon process.
self.nohup = True
func_name = func_name[1:]
if hasattr(commander, func_name[0]):
obj = getattr(commander, func_name[0])
elif func_name[0] in commander.globals:
obj = commander.globals[func_name[0]]
elif func_name[0] in commander.locals:
obj = commander.locals[func_name[0]]
else:
obj = globals()[func_name[0]]
if len(func_name) > 1:
for name in func_name[1:]:
obj = getattr(obj, name)
return obj
def __call__(self, commander):
"""
Call command cmd(*args, **kargs)
"""
self.obj = self.parse_func_name(self.basecmd.func, commander)
if self.manage: # start command in main process
self.basecmd.results = self.obj(*self.basecmd.args,
**self.basecmd.kargs)
self.basecmd._finished = True
self.finish(commander)
elif self.async: # start command in new process
self.basecmd.results = self.__call_async__(commander)
self.basecmd._async = True
elif self.nohup: # start command in new daemon process
if self.basecmd.cmd_hash is None:
self.basecmd.cmd_hash = gen_tmp_dir(data_dir.get_tmp_dir())
self.basecmd.results = self.__call_nohup__(commander)
self.basecmd._async = True
else: # start command in new process but wait for input.
self.basecmd.results = self.__call_async__(commander)
def __call_async__(self, commander):
(self.pid, self.r_pipe, self.w_pipe, self.stdin_pipe,
self.stdout_pipe, self.stderr_pipe) = create_process_cmd()
if self.pid == 0: # Child process make commands
commander._close_cmds_stdios(self)
self.msg = ms.Messenger(ms.StdIOWrapperIn(self.r_pipe),
ms.StdIOWrapperOut(self.w_pipe))
try:
self.basecmd.results = self.obj(*self.basecmd.args,
**self.basecmd.kargs)
except Exception:
err_msg = traceback.format_exc()
self.msg.write_msg(remote_interface.CmdTraceBack(err_msg))
sys.exit(-1)
finally:
self.msg.write_msg(self.basecmd.results)
self.msg.write_msg(CmdFinish())
sys.exit(0)
else: # Parent process create communication interface to child process
self.msg = ms.Messenger(ms.StdIOWrapperIn(self.r_pipe),
ms.StdIOWrapperOut(self.w_pipe))
def __call_nohup__(self, commander):
(pid, self.r_path, self.w_path, self.stdin_path, self.stdout_path,
self.stderr_path) = daemonize(self.basecmd.cmd_hash)
if pid == 1: # Child process make commands
commander._close_cmds_stdios(self)
(self.pid, r_pipe, w_pipe, stdin_pipe,
stdout_pipe, stderr_pipe) = create_process_cmd()
if self.pid == 0: # Child process make commands
self.msg = ms.Messenger(ms.StdIOWrapperIn(r_pipe),
ms.StdIOWrapperOut(w_pipe))
try:
self.basecmd.results = self.obj(*self.basecmd.args,
**self.basecmd.kargs)
except Exception:
err_msg = traceback.format_exc()
self.msg.write_msg(remote_interface.CmdTraceBack(err_msg))
sys.exit(-1)
finally:
self.msg.write_msg(self.basecmd.results)
sys.exit(0)
else:
# helper child process open communication pipes.
# This process is able to manage problem with connection width
# main parent process. It allows start unchanged child process.
self.r_pipe = os.open(self.r_path, os.O_RDONLY)
self.w_pipe = os.open(self.w_path, os.O_WRONLY)
sys.stdout = os.fdopen(os.open(self.stdout_path, os.O_WRONLY),
"w",
0)
sys.stderr = os.fdopen(os.open(self.stderr_path, os.O_WRONLY),
"w",
0)
sys.stdin = os.fdopen(os.open(self.stdin_path, os.O_RDONLY),
"r",
0)
w_fds = [r_pipe, w_pipe, stdin_pipe, stdout_pipe, stderr_pipe]
m_fds = [self.r_pipe,
self.w_pipe,
sys.stdin.fileno(),
sys.stdout.fileno(),
sys.stderr.fileno()]
p = select.poll()
p.register(r_pipe)
p.register(w_pipe)
# p.register(stdin_pipe)
p.register(stdout_pipe)
p.register(stderr_pipe)
p.register(self.r_pipe)
# p.register(self.w_pipe)
p.register(sys.stdin.fileno())
# p.register(sys.stdout.fileno())
# p.register(sys.stderr.fileno())
io_map = {r_pipe: self.w_pipe,
self.r_pipe: w_pipe,
sys.stdin.fileno(): stdin_pipe,
stdout_pipe: sys.stdout.fileno(),
stderr_pipe: sys.stderr.fileno()}
while 1:
d = p.poll()
w_ev = [x for x in d if x[0] in w_fds]
m_ev = [x for x in d if x[0] in m_fds]
w_hup, w_read, _ = sort_fds_event(w_ev)
m_hup, m_read, _ = sort_fds_event(m_ev)
if m_hup:
time.sleep(0.1)
if w_hup: # child process finished
for r in w_read:
data = os.read(r, 16384)
os.write(io_map[r], data)
break
for r in w_read:
data = os.read(r, 16384)
os.write(io_map[r], data)
for r in m_read:
data = os.read(r, 16384)
os.write(io_map[r], data)
self.msg = ms.Messenger(ms.StdIOWrapperIn(self.r_pipe),
ms.StdIOWrapperOut(self.w_pipe))
self.msg.write_msg(CmdFinish())
exit(0)
else: # main process open communication named pipes.
self.w_pipe = os.open(self.w_path, os.O_WRONLY)
self.r_pipe = os.open(self.r_path, os.O_RDONLY)
self.stdout_pipe = os.open(self.stdout_path, os.O_RDONLY)
self.stderr_pipe = os.open(self.stderr_path, os.O_RDONLY)
self.stdin_pipe = os.open(self.stdin_path, os.O_WRONLY)
self.msg = ms.Messenger(ms.StdIOWrapperIn(self.r_pipe),
ms.StdIOWrapperOut(self.w_pipe))
def work(self):
"""
Wait for message from running child process
"""
succ, msg = self.msg.read_msg()
if isinstance(msg, CmdFinish):
try:
pid, _ = os.waitpid(msg.pid, 0)
except OSError:
pid = msg.pid
if (succ is False or pid == msg.pid):
self.basecmd._finished = True
return True
else:
return False
else:
self.basecmd.results = msg
def recover_paths(self):
"""
Helper function for reconnect to daemon/nohup process.
"""
self.stdin_path = os.path.join(self.basecmd.cmd_hash, "stdin")
self.stdout_path = os.path.join(self.basecmd.cmd_hash, "stdout")
self.stderr_path = os.path.join(self.basecmd.cmd_hash, "stderr")
self.w_path = os.path.join(self.basecmd.cmd_hash, "results")
self.r_path = os.path.join(self.basecmd.cmd_hash, "inputs")
def recover_fds(self):
"""
Helper function for reconnect to daemon/nohup process.
"""
if self.r_pipe is None:
self.recover_paths()
self.w_pipe = os.open(self.w_path, os.O_WRONLY)
self.r_pipe = os.open(self.r_path, os.O_RDONLY)
self.stdin_pipe = os.open(self.stdin_path, os.O_WRONLY)
self.stdout_pipe = os.open(self.stdout_path, os.O_RDONLY)
self.stderr_pipe = os.open(self.stderr_path, os.O_RDONLY)
self.msg = ms.Messenger(ms.StdIOWrapperIn(self.r_pipe),
ms.StdIOWrapperOut(self.w_pipe))
def finish(self, commander):
"""
Remove cmd from commander commands on finish of process.
"""
self.close_pipes()
if self.basecmd.cmd_hash:
clean_tmp_dir(self.basecmd.cmd_hash)
self.basecmd.cmd_hash = None
del commander.cmds[self.cmd_id]
class CommanderSlave(ms.Messenger):
"""
Class commander slace is responsible for communication with commander
master. It invoke commands to slave part and receive messages from them.
For communication is used only stdin and stdout which are streams from
slave part.
"""
def __init__(self, stdin, stdout, o_stdout, o_stderr):
super(CommanderSlave, self).__init__(stdin, stdout)
self._exit = False
self.cmds = {}
self.globals = {}
self.locals = {}
self.o_stdout = o_stdout
self.o_stderr = o_stderr
def cmd_loop(self):
"""
Wait for commands from master and receive results and outputs from
commands.
"""
try:
while (not self._exit):
stdios = [self.stdin, self.o_stdout, self.o_stderr]
r_pipes = [cmd.r_pipe for cmd in self.cmds.values()
if cmd.r_pipe is not None]
stdouts = [cmd.stdout_pipe for cmd in self.cmds.values()
if cmd.stdout_pipe is not None]
stderrs = [cmd.stderr_pipe for cmd in self.cmds.values()
if cmd.stderr_pipe is not None]
r, _, _ = select.select(
stdios + r_pipes + stdouts + stderrs, [], [])
if self.stdin in r: # command from controller
cmd = CmdSlave(self.read_msg()[1])
self.cmds[cmd.cmd_id] = cmd
try:
cmd(self)
self.write_msg(cmd.basecmd)
except Exception:
err_msg = traceback.format_exc()
self.write_msg(
remote_interface.CommanderError(err_msg))
if self.o_stdout in r: # Send message from stdout
msg = os.read(self.o_stdout, 16384)
self.write_msg(remote_interface.StdOut(msg))
if self.o_stderr in r: # Send message from stdout
msg = os.read(self.o_stderr, 16384)
self.write_msg(remote_interface.StdErr(msg))
# test all commands for io
for cmd in self.cmds.values():
if cmd.stdout_pipe in r: # command stdout
data = os.read(cmd.stdout_pipe, 16384)
if data != "": # pipe is not closed on another side.
self.write_msg(remote_interface.StdOut(data,
cmd.cmd_id))
else:
os.close(cmd.stdout_pipe)
cmd.stdout_pipe = None
if cmd.stderr_pipe in r: # command stderr
data = os.read(cmd.stderr_pipe, 16384)
if data != "": # pipe is not closed on another side.
self.write_msg(remote_interface.StdErr(data,
cmd.cmd_id))
else:
os.close(cmd.stderr_pipe)
cmd.stderr_pipe = None
if cmd.r_pipe in r: # command results
if cmd.work():
cmd.finish(self)
self.write_msg(cmd.basecmd)
except Exception:
err_msg = traceback.format_exc()
self.write_msg(remote_interface.CommanderError(err_msg))
def _close_cmds_stdios(self, exclude_cmd):
for cmd in self.cmds.values():
if cmd is not exclude_cmd:
cmd.close_pipes()
class CommanderSlaveCmds(CommanderSlave):
"""
Class extends CommanderSlave and adds to them special commands like
shell process, interactive python, send_msg to cmd.
"""
def __init__(self, stdin, stdout, o_stdout, o_stderr):
super(CommanderSlaveCmds, self).__init__(stdin, stdout,
o_stdout, o_stderr)
while (1):
succ, data = self.read_msg()
if succ and data == "start":
break
self.write_msg("Started")
def shell(self, cmd):
"""
Starts shell process. Stdout is automatically copyed to basecmd.stdout
:param cmd: Command which should be started.
:return: basecmd with return code of cmd.
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr)
return process.wait()
def interactive(self):
"""
Starts interactive python.
"""
while 1:
out = raw_input()
if out == "":
return
try:
exec out
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "On Guest exception from: \n" + "".join(
traceback.format_exception(exc_type,
exc_value,
exc_traceback))
print "FAIL: Guest command exception."
def send_msg(self, msg, cmd_id):
"""
Send msg to cmd with id == cmd_id
:param msg: message passed to cmd over the stdin
:type msg: str
:param cmd_id: id of cmd.
"""
os.write(self.cmds[cmd_id].stdin_pipe, msg)
def register_cmd(self, basecmd, basecmd_cmd_id):
"""
Second side of set_commander cmd from master. It register existing
cmd to CommandSlave dict.
:param basecmd: cmd which should be added to CommandSlave dict
:type basecmd: BaseCmd
:param basecmd_cmd_id: number under which should be stored
:type basecmd_cmd_id: int
"""
remote_interface.BaseCmd.single_cmd_id = basecmd_cmd_id
cmd = CmdSlave(basecmd)
self.cmds[basecmd.cmd_id] = cmd
if cmd.basecmd.cmd_hash is not None:
cmd.recover_fds()
return basecmd
def add_function(self, f_code):
"""
Adds function to client code.
:param f_code: Code of function.
:type f_code: str.
"""
exec(f_code, globals(), globals())
def copy_file(self, name, path, content):
"""
Really naive implementation of copping files. Should be used only for
short files.
"""
f = open(os.path.join(path, name), "w")
f.write(content)
f.close()
def import_src(self, name, path=None):
"""
Import file to running python session.
"""
if path:
if path not in sys.path:
sys.path.append(path)
mod = __import__(name, globals(), locals())
globals()[name] = mod
sys.modules[name] = mod
def exit(self):
"""
Method for killing command slave.
"""
self._exit = True
return "bye"
def remote_agent(in_stream_cls, out_stream_cls):
"""
Connect file descriptors to right pipe and start slave command loop.
When something happend it raise exception which could be caught by cmd
master.
:params in_stream_cls: Class encapsulated input stream.
:params out_stream_cls: Class encapsulated output stream.
"""
try:
fd_stdout = sys.stdout.fileno()
fd_stderr = sys.stderr.fileno()
fd_stdin = sys.stdin.fileno()
soutr, soutw = os.pipe()
serrr, serrw = os.pipe()
sys.stdout = os.fdopen(soutw, 'w', 0)
sys.stderr = os.fdopen(serrw, 'w', 0)
os.write(fd_stdout, "#")
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
w_stdin = None
w_stdout = out_stream_cls(fd_stdout)
w_stdin = in_stream_cls(fd_stdin)
cmd = CommanderSlaveCmds(w_stdin,
w_stdout,
soutr,
serrr)
cmd.cmd_loop()
except SystemExit:
pass
except:
e = traceback.format_exc()
sys.stderr.write(e)
# traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "agent":
remote_agent(ms.StdIOWrapperIn, ms.StdIOWrapperOut)
elif sys.argv[1] == "agent_base64":
remote_agent(ms.StdIOWrapperInBase64, ms.StdIOWrapperOutBase64)
| gpl-2.0 |
girving/tensorflow | tensorflow/python/framework/framework_lib.py | 39 | 3589 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Classes and functions for building TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Classes used when building a Graph.
from tensorflow.python.framework.device import DeviceSpec
from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import IndexedSlices
from tensorflow.python.framework.sparse_tensor import SparseTensor
from tensorflow.python.framework.sparse_tensor import SparseTensorValue
# Utilities used when building a Graph.
from tensorflow.python.framework.ops import device
from tensorflow.python.framework.ops import container
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.framework.ops import colocate_with
from tensorflow.python.framework.ops import control_dependencies
from tensorflow.python.framework.ops import get_default_graph
from tensorflow.python.framework.ops import reset_default_graph
from tensorflow.python.framework.ops import GraphKeys
from tensorflow.python.framework.ops import add_to_collection
from tensorflow.python.framework.ops import add_to_collections
from tensorflow.python.framework.ops import get_collection
from tensorflow.python.framework.ops import get_collection_ref
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices
from tensorflow.python.framework.random_seed import get_seed
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.framework.sparse_tensor import convert_to_tensor_or_sparse_tensor
from tensorflow.python.framework.importer import import_graph_def
# Utilities for working with Tensors
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
# Needed when you defined a new Op in C++.
from tensorflow.python.framework.ops import RegisterGradient
from tensorflow.python.framework.ops import NotDifferentiable
from tensorflow.python.framework.ops import NoGradient
from tensorflow.python.framework.ops import RegisterShape
from tensorflow.python.framework.tensor_shape import Dimension
from tensorflow.python.framework.tensor_shape import TensorShape
# Needed when interfacing tensorflow to new array libraries
from tensorflow.python.framework.ops import register_tensor_conversion_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.framework.dtypes import * # pylint: disable=redefined-builtin
# Load a TensorFlow plugin
from tensorflow.python.framework.load_library import *
# pylint: enable=wildcard-import
| apache-2.0 |
cranmer/parametrized-learning | taylor/rootnotes.py | 1 | 1776 | """
Helper module for displaying ROOT canvases in ipython notebooks
Usage example:
# Save this file as rootnotes.py to your working directory.
import rootnotes
c1 = rootnotes.default_canvas()
fun1 = TF1( 'fun1', 'abs(sin(x)/x)', 0, 10)
c1.SetGridx()
c1.SetGridy()
fun1.Draw()
c1
More examples: http://mazurov.github.io/webfest2013/
@author alexander.mazurov@cern.ch
@author andrey.ustyuzhanin@cern.ch
@date 2013-08-09
"""
import ROOT
ROOT.gROOT.SetBatch()
import tempfile
from IPython.core import display
def canvas(name="icanvas", size=(800, 600)):
"""Helper method for creating canvas"""
# Check if icanvas already exists
canvas = ROOT.gROOT.FindObject(name)
assert len(size) == 2
if canvas:
return canvas
else:
return ROOT.TCanvas(name, name, size[0], size[1])
def default_canvas(name="icanvas", size=(800, 600)):
""" depricated """
return canvas(name=name, size=size)
def _display_canvas(canvas):
file = tempfile.NamedTemporaryFile(suffix=".png")
canvas.SaveAs(file.name)
ip_img = display.Image(filename=file.name, format='png', embed=True)
return ip_img._repr_png_()
def _display_any(obj):
file = tempfile.NamedTemporaryFile(suffix=".png")
obj.Draw()
ROOT.gPad.SaveAs(file.name)
ip_img = display.Image(filename=file.name, format='png', embed=True)
return ip_img._repr_png_()
# register display function with PNG formatter:
png_formatter = get_ipython().display_formatter.formatters['image/png'] # noqa
# Register ROOT types in ipython
#
# In [1]: canvas = rootnotes.canvas()
# In [2]: canvas
# Out [2]: [image will be here]
png_formatter.for_type(ROOT.TCanvas, _display_canvas)
png_formatter.for_type(ROOT.TF1, _display_any)
| bsd-2-clause |
balloob/home-assistant | homeassistant/components/homeassistant/__init__.py | 11 | 6982 | """Integration providing core pieces of infrastructure."""
import asyncio
import itertools as it
import logging
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_CONTROL
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
RESTART_EXIT_CODE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
import homeassistant.core as ha
from homeassistant.exceptions import HomeAssistantError, Unauthorized, UnknownUser
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.service import async_extract_entity_ids
_LOGGER = logging.getLogger(__name__)
DOMAIN = ha.DOMAIN
SERVICE_RELOAD_CORE_CONFIG = "reload_core_config"
SERVICE_CHECK_CONFIG = "check_config"
SERVICE_UPDATE_ENTITY = "update_entity"
SERVICE_SET_LOCATION = "set_location"
SCHEMA_UPDATE_ENTITY = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
async def async_setup(hass: ha.HomeAssistant, config: dict) -> bool:
"""Set up general services related to Home Assistant."""
async def async_handle_turn_service(service):
"""Handle calls to homeassistant.turn_on/off."""
entity_ids = await async_extract_entity_ids(hass, service)
# Generic turn on/off method requires entity id
if not entity_ids:
_LOGGER.error(
"homeassistant/%s cannot be called without entity_id", service.service
)
return
# Group entity_ids by domain. groupby requires sorted data.
by_domain = it.groupby(
sorted(entity_ids), lambda item: ha.split_entity_id(item)[0]
)
tasks = []
for domain, ent_ids in by_domain:
# This leads to endless loop.
if domain == DOMAIN:
_LOGGER.warning(
"Called service homeassistant.%s with invalid entity IDs %s",
service.service,
", ".join(ent_ids),
)
continue
# We want to block for all calls and only return when all calls
# have been processed. If a service does not exist it causes a 10
# second delay while we're blocking waiting for a response.
# But services can be registered on other HA instances that are
# listening to the bus too. So as an in between solution, we'll
# block only if the service is defined in the current HA instance.
blocking = hass.services.has_service(domain, service.service)
# Create a new dict for this call
data = dict(service.data)
# ent_ids is a generator, convert it to a list.
data[ATTR_ENTITY_ID] = list(ent_ids)
tasks.append(
hass.services.async_call(
domain, service.service, data, blocking, context=service.context
)
)
if tasks:
await asyncio.gather(*tasks)
service_schema = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids}, extra=vol.ALLOW_EXTRA)
hass.services.async_register(
ha.DOMAIN, SERVICE_TURN_OFF, async_handle_turn_service, schema=service_schema
)
hass.services.async_register(
ha.DOMAIN, SERVICE_TURN_ON, async_handle_turn_service, schema=service_schema
)
hass.services.async_register(
ha.DOMAIN, SERVICE_TOGGLE, async_handle_turn_service, schema=service_schema
)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if call.service == SERVICE_HOMEASSISTANT_STOP:
hass.async_create_task(hass.async_stop())
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/config/logs) for details.",
"Config validating",
f"{ha.DOMAIN}.check_config",
)
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
hass.async_create_task(hass.async_stop(RESTART_EXIT_CODE))
async def async_handle_update_service(call):
"""Service handler for updating an entity."""
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
)
for entity in call.data[ATTR_ENTITY_ID]:
if not user.permissions.check_entity(entity, POLICY_CONTROL):
raise Unauthorized(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
perm_category=CAT_ENTITIES,
)
tasks = [
hass.helpers.entity_component.async_update_entity(entity)
for entity in call.data[ATTR_ENTITY_ID]
]
if tasks:
await asyncio.wait(tasks)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_HOMEASSISTANT_STOP, async_handle_core_service
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_HOMEASSISTANT_RESTART, async_handle_core_service
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_CHECK_CONFIG, async_handle_core_service
)
hass.services.async_register(
ha.DOMAIN,
SERVICE_UPDATE_ENTITY,
async_handle_update_service,
schema=SCHEMA_UPDATE_ENTITY,
)
async def async_handle_reload_config(call):
"""Service handler for reloading core config."""
try:
conf = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
# auth only processed during startup
await conf_util.async_process_ha_core_config(hass, conf.get(ha.DOMAIN) or {})
hass.helpers.service.async_register_admin_service(
ha.DOMAIN, SERVICE_RELOAD_CORE_CONFIG, async_handle_reload_config
)
async def async_set_location(call):
"""Service handler to set location."""
await hass.config.async_update(
latitude=call.data[ATTR_LATITUDE], longitude=call.data[ATTR_LONGITUDE]
)
hass.helpers.service.async_register_admin_service(
ha.DOMAIN,
SERVICE_SET_LOCATION,
async_set_location,
vol.Schema({ATTR_LATITUDE: cv.latitude, ATTR_LONGITUDE: cv.longitude}),
)
return True
| apache-2.0 |
yapdns/yapdnsbeat | vendor/github.com/elastic/beats/packetbeat/tests/system/test_0015_udpjson.py | 10 | 2365 | from packetbeat import BaseTest
from nose.tools import nottest
import socket
class Test(BaseTest):
@nottest
def test_udpjson_config(self):
"""
Should start with sniffer and udpjson inputs configured.
"""
self.render_config_template(
mysql_ports=[3306],
input_plugins=["sniffer", "udpjson"]
)
self.run_packetbeat(pcap="mysql_with_whitespaces.pcap")
objs = self.read_output()
assert all([o["type"] == "mysql" for o in objs])
assert len(objs) == 7
@nottest
def test_only_udpjson_config(self):
"""
It should be possible to start without the sniffer configured.
"""
self.render_config_template(
input_plugins=["udpjson"]
)
packetbeat = self.start_packetbeat(debug_selectors=["udpjson"])
self.wait_until(
lambda: self.log_contains(
msg="UDPJson plugin listening on 127.0.0.1:9712"),
max_timeout=2)
packetbeat.kill_and_wait()
@nottest
def test_send_udpjson_msg(self):
"""
It should be possible to send a UDP message and read it from
the output.
"""
self.render_config_template(
input_plugins=["udpjson"]
)
packetbeat = self.start_packetbeat(debug_selectors=["udpjson"])
self.wait_until(
lambda: self.log_contains(
msg="UDPJson plugin listening on 127.0.0.1:9712"),
max_timeout=2,
name="Log contains listening")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto("""{"type": "http", "status": "OK"}""",
("127.0.0.1", 9712))
sock.sendto("""{"type": "mysql", "status": "Error"}""",
("127.0.0.1", 9712))
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=2,
name="Output has 2 lines")
objs = self.read_output()
expected_fields = ["shipper", "status", "type", "@timestamp", "count"]
self.all_have_only_fields(objs, expected_fields)
assert objs[0]["type"] == "http"
assert objs[0]["status"] == "OK"
assert objs[1]["type"] == "mysql"
assert objs[1]["status"] == "Error"
packetbeat.kill_and_wait()
| mit |
LiangfengD/code-for-blog | 2008/google_web_search.py | 13 | 3674 | # GoogleWebSearch
#
# Useful for executing web searches on Google and returning
# result URLs.
#
# Depends on the following modules:
# - BeautifulSoup:
# For parsing the HTML results returned by Google
# - mechanize:
# For executing Google search queries
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain, given that you won't use it
# for swamping Google with search queries, since this is against
# Google's terms of service.
# It is to serve as an example and for limited personal use only.
#
import logging
import pprint
import sys
import socket
import urllib
from urllib2 import HTTPError
from BeautifulSoup import BeautifulSoup
import mechanize
class GoogleWebSearch(object):
""" An interface to the Google web search.
Usage:
Call search() with your search string and the page number
for Google. The page number is the number of the results
page returned by Google for the search. If makes sense to
set it to 1 the first time you issue a search.
Then, call get_result_count() to get the total amount of
results found by Google.
get_result_urls() will return the URLs found in the last
call to search()
"""
def __init__(self):
# In order not to hang forever if the server doesn't reply
#
socket.setdefaulttimeout(5)
# You can insert your browser's header here, if you want
# Find the header by placing:
# javascript:document.writeln(navigator.userAgent)
#
# In the address bar
#
browser_header = ' '.join([
'Mozilla/5.0',
'(Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.1)',
'Gecko/2008070208 Firefox/3.0.1'])
self.browser = mechanize.Browser()
self.browser.addheaders = [('User-agent', browser_header)]
self.browser.set_handle_robots(False)
self.result_page = ''
def search(self, search_for, page=1):
""" Search for the string, and fetch the specified result
page. page must be >= 1
"""
self.result_page = ''
assert page > 0
start = (page - 1) * 10
query = urllib.urlencode(
{
'q': search_for,
'start': start,
'hl': 'en',
'sa': 'N',
}
)
base = 'http://www.google.com/search?'
query_url = base + query
logging.info('Executing: ' + query_url)
self.browser.open(query_url)
self.result_page = self.browser.response().read()
def get_result_count(self):
soup = BeautifulSoup(self.result_page)
ssb = soup.findAll('div', attrs={'id': 'ssb'})
lines = ssb[0].p.contents
for i in xrange(len(lines)):
if lines[i].find('about') > 0 and i < len(lines) - 1:
return int(lines[i + 1].contents[0].replace(',', ''))
return 0
def get_result_urls(self):
soup = BeautifulSoup(self.result_page)
res = soup.findAll('div', attrs={'id': 'res'})[0]
glist = res.findAll('li', attrs={'class': 'g'})
urls = []
for g in glist:
link = g.h3.a
urls.append(link['href'])
return urls
if __name__ == "__main__":
#~ logging.basicConfig(level=logging.INFO)
gws = GoogleWebSearch()
gws.search('python game', 2)
print gws.get_result_count()
pprint.pprint(gws.get_result_urls())
| unlicense |
mclois/iteexe | twisted/test/proto_helpers.py | 16 | 1647 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from twisted.protocols import basic
from twisted.internet import error
class LineSendingProtocol(basic.LineReceiver):
lostConn = False
def __init__(self, lines, start = True):
self.lines = lines[:]
self.response = []
self.start = start
def connectionMade(self):
if self.start:
map(self.sendLine, self.lines)
def lineReceived(self, line):
if not self.start:
map(self.sendLine, self.lines)
self.lines = []
self.response.append(line)
def connectionLost(self, reason):
self.lostConn = True
class FakeDatagramTransport:
noAddr = object()
def __init__(self):
self.written = []
def write(self, packet, addr=noAddr):
self.written.append((packet, addr))
class StringTransport:
disconnecting = 0
def __init__(self):
self.clear()
def clear(self):
self.io = StringIO()
def value(self):
return self.io.getvalue()
def write(self, data):
self.io.write(data)
def writeSequence(self, data):
self.io.write(''.join(data))
def loseConnection(self):
pass
def getPeer(self):
return ('StringIO', repr(self.io))
def getHost(self):
return ('StringIO', repr(self.io))
class StringTransportWithDisconnection(StringTransport):
def loseConnection(self):
self.protocol.connectionLost(error.ConnectionDone("Bye."))
| gpl-2.0 |
LeMinaw/minaw.net | register/views.py | 1 | 2544 | from django.shortcuts import render
from django.http import HttpResponse
from django.db import IntegrityError
from urllib.request import urlopen
from hashlib import sha256
from os import environ
from register.models import *
from register.forms import *
from register.data import ACTIVATION_KEYS
import requests
import re
def main(request):
error = None
success = False
if request.method == "POST":
form = RegisterForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
code = form.cleaned_data['code']
userlist = urlopen("https://www.skywanderersgame.com/userlist.php").read().decode()
if "<span>%s</span>" % username in userlist:
code_crypt = code.encode('utf-8')
code_crypt = sha256(code_crypt).hexdigest()
try:
db_code = ActivationCode.objects.get(code=code_crypt)
if db_code.active:
user = Member(username=username, user_rank=db_code.rank)
try:
user.save()
db_code.active = False
db_code.save()
success = True
data = {'username':username, 'user_rank':db_code.rank}
r = requests.post("https://www.skywanderersgame.com/private/setrank.php", data=data, auth=('skywdr', environ.get("RANKS_PWD")))
except IntegrityError:
error = "This username is already using an activation code."
else:
error = "This activation code has already been used."
except ActivationCode.DoesNotExist:
error = "Invalid activation code!"
else:
error = "This username is not registered on the Skywanderers forums!"
else:
form = RegisterForm()
return render(request, "register/main.html", locals())
def load(request):
if environ.get("LOAD_KEYS") == 'TRUE':
for code, rank in ACTIVATION_KEYS.items():
activation_code = ActivationCode(code=code, rank=rank, active=True, active_disc=True)
try:
activation_code.save()
except IntegrityError:
print("The code %s already exists in base." % code)
return HttpResponse("done")
return HttpResponse("nope")
| gpl-3.0 |
seung-lab/cloud-volume | cloudvolume/sharedmemory.py | 1 | 6678 | from collections import defaultdict
import errno
import math
import mmap
import os
import sys
import time
import multiprocessing as mp
from six.moves import range
import numpy as np
from .lib import Bbox, Vec, mkdir
SHM_DIRECTORY = '/dev/shm/'
EMULATED_SHM_DIRECTORY = '/tmp/cloudvolume-shm'
EMULATE_SHM = not os.path.isdir(SHM_DIRECTORY)
PLATFORM_SHM_DIRECTORY = SHM_DIRECTORY if not EMULATE_SHM else EMULATED_SHM_DIRECTORY
class SharedMemoryReadError(Exception):
pass
class SharedMemoryAllocationError(Exception):
pass
def ndarray(shape, dtype, location, order='F', readonly=False, lock=None, **kwargs):
"""
Create a shared memory numpy array.
Lock is only necessary while doing multiprocessing on
platforms without /dev/shm type shared memory as
filesystem emulation will be used instead.
Allocating the shared array requires cleanup on your part.
A shared memory file will be located at sharedmemory.PLATFORM_SHM_DIRECTORY + location
and must be unlinked when you're done. It will outlive the program.
You should also call .close() on the mmap file handle when done. However,
this is less of a problem because the operating system will close the
file handle on process termination.
Parameters:
shape: same as numpy.ndarray
dtype: same as numpy.ndarray
location: the shared memory filename
lock: (optional) multiprocessing.Lock
Returns: (mmap filehandle, shared ndarray)
"""
if EMULATE_SHM:
return ndarray_fs(
shape, dtype, location, lock,
readonly, order, emulate_shm=True, **kwargs
)
return ndarray_shm(shape, dtype, location, readonly, order, **kwargs)
def ndarray_fs(
shape, dtype, location, lock,
readonly=False, order='F', emulate_shm=False,
**kwargs
):
"""Emulate shared memory using the filesystem."""
dbytes = np.dtype(dtype).itemsize
nbytes = Vec(*shape).rectVolume() * dbytes
if emulate_shm:
directory = mkdir(EMULATED_SHM_DIRECTORY)
filename = os.path.join(directory, location)
else:
filename = location
if lock:
lock.acquire()
try:
allocate_shm_file(filename, nbytes, dbytes, readonly)
finally:
if lock:
lock.release()
with open(filename, 'r+b') as f:
array_like = mmap.mmap(f.fileno(), 0) # map entire file
renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs)
renderbuffer.setflags(write=(not readonly))
return array_like, renderbuffer
def allocate_shm_file(filename, nbytes, dbytes, readonly):
exists = os.path.exists(filename)
size = 0 if not exists else os.path.getsize(filename)
if readonly and not exists:
raise SharedMemoryReadError(filename + " has not been allocated. Requested " + str(nbytes) + " bytes.")
elif readonly and size != nbytes:
raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format(
filename, size, nbytes
))
if exists:
if size > nbytes:
with open(filename, 'wb') as f:
os.ftruncate(f.fileno(), nbytes)
elif size < nbytes:
# too small? just remake it below
os.unlink(filename)
exists = os.path.exists(filename)
if not exists:
# Previously we were writing out real files full of zeros,
# but a) that takes forever and b) modern OSes support sparse
# files (i.e. gigabytes of zeros that take up only a few real bytes).
#
# The following should take advantage of this functionality and be faster.
# It should work on Python 2.7 Unix, and Python 3.5+ on Unix and Windows.
#
# References:
# https://stackoverflow.com/questions/8816059/create-file-of-particular-size-in-python
# https://docs.python.org/3/library/os.html#os.ftruncate
# https://docs.python.org/2/library/os.html#os.ftruncate
#
with open(filename, 'wb') as f:
os.ftruncate(f.fileno(), nbytes)
def ndarray_shm(shape, dtype, location, readonly=False, order='F', **kwargs):
"""Create a shared memory numpy array. Requires /dev/shm to exist."""
import posix_ipc
from posix_ipc import O_CREAT
import psutil
nbytes = Vec(*shape).rectVolume() * np.dtype(dtype).itemsize
available = psutil.virtual_memory().available
preexisting = 0
# This might only work on Ubuntu
shmloc = os.path.join(SHM_DIRECTORY, location)
if os.path.exists(shmloc):
preexisting = os.path.getsize(shmloc)
elif readonly:
raise SharedMemoryReadError(shmloc + " has not been allocated. Requested " + str(nbytes) + " bytes.")
if readonly and preexisting != nbytes:
raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format(
shmloc, preexisting, nbytes
))
if (nbytes - preexisting) > available:
overallocated = nbytes - preexisting - available
overpercent = (100 * overallocated / (preexisting + available))
raise SharedMemoryAllocationError("""
Requested more memory than is available.
Shared Memory Location: {}
Shape: {}
Requested Bytes: {}
Available Bytes: {}
Preexisting Bytes*: {}
Overallocated Bytes*: {} (+{:.2f}%)
* Preexisting is only correct on linux systems that support /dev/shm/""" \
.format(location, shape, nbytes, available, preexisting, overallocated, overpercent))
# This might seem like we're being "extra safe" but consider
# a threading condition where the condition of the shared memory
# was adjusted between the check above and now. Better to make sure
# that we don't accidently change anything if readonly is set.
flags = 0 if readonly else O_CREAT
size = 0 if readonly else int(nbytes)
try:
shared = posix_ipc.SharedMemory(location, flags=flags, size=size)
array_like = mmap.mmap(shared.fd, shared.size)
os.close(shared.fd)
renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs)
except OSError as err:
if err.errno == errno.ENOMEM: # Out of Memory
posix_ipc.unlink_shared_memory(location)
raise
renderbuffer.setflags(write=(not readonly))
return array_like, renderbuffer
def unlink(location):
if EMULATE_SHM:
return unlink_fs(location)
return unlink_shm(location)
def unlink_shm(location):
import posix_ipc
try:
posix_ipc.unlink_shared_memory(location)
except posix_ipc.ExistentialError:
return False
return True
def unlink_fs(location):
directory = mkdir(EMULATED_SHM_DIRECTORY)
try:
filename = os.path.join(directory, location)
os.unlink(filename)
return True
except OSError:
return False
| bsd-3-clause |
evinstk/TantechEngineOriginal | lib/googletest-release-1.7.0/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
alash3al/rethinkdb | external/v8_3.30.33.16/testing/gmock/gtest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| agpl-3.0 |
supriyasingh01/github_basics | Internetworking Distributed Project/finalProject/ovs/pox-master/pox/forwarding/l2_multi.py | 5 | 11994 | # Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
A messy first shot at a standalone L2 switch that learns ethernet
addresses across the entire network and picks short paths between
them.
Depends on openflow.discovery
Works with openflow.spanning_tree
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
from collections import defaultdict
from pox.openflow.discovery import Discovery
from pox.lib.util import dpidToStr
log = core.getLogger()
# Adjacency map. [sw1][sw2] -> port from sw1 to sw2
adjacency = defaultdict(lambda:defaultdict(lambda:None))
# Switches we know of. [dpid] -> Switch
switches = {}
# ethaddr -> (switch, port)
mac_map = {}
# [sw1][sw2] -> (distance, intermediate)
path_map = defaultdict(lambda:defaultdict(lambda:(None,None)))
def _calc_paths ():
"""
Essentially Floyd-Warshall algorithm
"""
sws = switches.values()
path_map.clear()
for k in sws:
for j,port in adjacency[k].iteritems():
if port is None: continue
path_map[k][j] = (1,None)
path_map[k][k] = (0,None) # distance, intermediate
"""
for i in sws:
for j in sws:
a = path_map[i][j][0]
#a = adjacency[i][j]
if a is None: a = "*"
print a,
print
"""
for k in sws:
for i in sws:
for j in sws:
if path_map[i][k][0] is not None:
if path_map[k][j][0] is not None:
# i -> k -> j exists
ikj_dist = path_map[i][k][0]+path_map[k][j][0]
if path_map[i][j][0] is None or ikj_dist < path_map[i][j][0]:
# i -> k -> j is better than existing
path_map[i][j] = (ikj_dist, k)
"""
print "--------------------"
for i in sws:
for j in sws:
print path_map[i][j][0],
print
"""
def _get_raw_path (src, dst):
if len(path_map) == 0: _calc_paths()
if src is dst:
# We're here!
return []
if path_map[src][dst][0] is None:
return None
intermediate = path_map[src][dst][1]
if intermediate is None:
# Directly connected
return []
return _get_raw_path(src, intermediate) + [intermediate] + \
_get_raw_path(intermediate, dst)
def _check_path (p):
for i in range(len(p) - 1):
if adjacency[p[i][0]][p[i+1][0]] != p[i][1]:
return False
return True
def _get_path (src, dst, final_port):
#print "path from",src,"to",dst
if src == dst:
path = [src]
else:
path = _get_raw_path(src, dst)
if path is None: return None
path = [src] + path + [dst]
# print "raw: ",path
r = []
for s1,s2 in zip(path[:-1],path[1:]):
port = adjacency[s1][s2]
r.append((s1,port))
r.append((dst, final_port))
# print "cooked: ",r
assert _check_path(r)
return r
class PathInstalled (Event):
"""
Fired when a path is installed
"""
def __init__ (self, path):
Event.__init__(self)
self.path = path
class Switch (EventMixin):
def __init__ (self):
self.connection = None
self.ports = None
self.dpid = None
self._listeners = None
def __repr__ (self):
return dpidToStr(self.dpid)
def _install (self, switch, port, match, buf = -1):
msg = of.ofp_flow_mod()
msg.match = match
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.actions.append(of.ofp_action_output(port = port))
msg.buffer_id = buf
switch.connection.send(msg)
def _install_path (self, p, match, buffer_id = -1):
for sw,port in p[1:]:
self._install(sw, port, match)
self._install(p[0][0], p[0][1], match, buffer_id)
core.l2_multi.raiseEvent(PathInstalled(p))
def install_path (self, dst_sw, last_port, match, event):#buffer_id, packet):
p = _get_path(self, dst_sw, last_port)
if p is None:
log.warning("Can't get from %s to %s", match.dl_src, match.dl_dst)
import pox.lib.packet as pkt
if (match.dl_type == pkt.ethernet.IP_TYPE and
event.parsed.find('ipv4')):
# It's IP -- let's send a destination unreachable
log.debug("Dest unreachable (%s -> %s)",
match.dl_src, match.dl_dst)
from pox.lib.addresses import EthAddr
e = pkt.ethernet()
e.src = EthAddr(dpidToStr(self.dpid)) #FIXME: Hmm...
e.dst = match.dl_src
e.type = e.IP_TYPE
ipp = pkt.ipv4()
ipp.protocol = ipp.ICMP_PROTOCOL
ipp.srcip = match.nw_dst #FIXME: Ridiculous
ipp.dstip = match.nw_src
icmp = pkt.icmp()
icmp.type = pkt.ICMP.TYPE_DEST_UNREACH
icmp.code = pkt.ICMP.CODE_UNREACH_HOST
orig_ip = event.parsed.find('ipv4')
d = orig_ip.pack()
d = d[:orig_ip.hl * 4 + 8]
import struct
d = struct.pack("!HH", 0,0) + d #FIXME: MTU
icmp.payload = d
ipp.payload = icmp
e.payload = ipp
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = event.port))
msg.data = e.pack()
self.connection.send(msg)
return
self._install_path(p, match, event.ofp.buffer_id)
log.debug("Installing path for %s -> %s %04x (%i hops)", match.dl_src, match.dl_dst, match.dl_type, len(p))
#log.debug("installing path for %s.%i -> %s.%i" %
# (src[0].dpid, src[1], dst[0].dpid, dst[1]))
def _handle_PacketIn (self, event):
def flood ():
""" Floods the packet """
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
def drop ():
# Kill the buffer
if event.ofp.buffer_id != -1:
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
event.ofp.buffer_id = -1 # Mark is dead
msg.in_port = event.port
self.connection.send(msg)
packet = event.parsed
loc = (self, event.port) # Place we saw this ethaddr
oldloc = mac_map.get(packet.src) # Place we last saw this ethaddr
if packet.type == packet.LLDP_TYPE:
drop()
return
#print packet.src,"*",loc,oldloc
if oldloc is None:
if packet.src.isMulticast() == False:
mac_map[packet.src] = loc # Learn position for ethaddr
log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
elif oldloc != loc:
# ethaddr seen at different place!
if loc[1] not in adjacency[loc[0]].values():
# New place is another "plain" port (probably)
log.debug("%s moved from %s.%i to %s.%i?", packet.src,
dpidToStr(oldloc[0].connection.dpid), oldloc[1],
dpidToStr( loc[0].connection.dpid), loc[1])
if packet.src.isMulticast() == False:
mac_map[packet.src] = loc # Learn position for ethaddr
log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
elif packet.dst.isMulticast() == False:
# New place is a switch-to-switch port!
#TODO: This should be a flood. It'd be nice if we knew. We could
# check if the port is in the spanning tree if it's available.
# Or maybe we should flood more carefully?
log.warning("Packet from %s arrived at %s.%i without flow",# -- dropping",
packet.src, dpidToStr(self.dpid), event.port)
#drop()
#return
if packet.dst.isMulticast():
log.debug("Flood multicast from %s", packet.src)
flood()
else:
if packet.dst not in mac_map:
log.debug("%s unknown -- flooding" % (packet.dst,))
flood()
else:
dest = mac_map[packet.dst]
#print packet.dst, "is on", dest
match = of.ofp_match.from_packet(packet)
self.install_path(dest[0], dest[1], match, event)
def disconnect (self):
if self.connection is not None:
log.debug("Disconnect %s" % (self.connection,))
self.connection.removeListeners(self._listeners)
self.connection = None
self._listeners = None
def connect (self, connection):
if self.dpid is None:
self.dpid = connection.dpid
assert self.dpid == connection.dpid
if self.ports is None:
self.ports = connection.features.ports
self.disconnect()
log.debug("Connect %s" % (connection,))
self.connection = connection
self._listeners = self.listenTo(connection)
def _handle_ConnectionDown (self, event):
self.disconnect()
pass
class l2_multi (EventMixin):
_eventMixin_events = set([
PathInstalled,
])
def __init__ (self):
self.listenTo(core.openflow, priority=0)
self.listenTo(core.openflow_discovery)
def _handle_LinkEvent (self, event):
def flip (link):
return Discovery.Link(link[2],link[3], link[0],link[1])
l = event.link
sw1 = switches[l.dpid1]
sw2 = switches[l.dpid2]
# Invalidate all flows and path info.
# For link adds, this makes sure that if a new link leads to an
# improved path, we use it.
# For link removals, this makes sure that we don't use a
# path that may have been broken.
#NOTE: This could be radically improved! (e.g., not *ALL* paths break)
clear = of.ofp_flow_mod(match=of.ofp_match(),command=of.OFPFC_DELETE)
for sw in switches.itervalues():
sw.connection.send(clear)
path_map.clear()
if event.removed:
# This link no longer okay
if sw2 in adjacency[sw1]: del adjacency[sw1][sw2]
if sw1 in adjacency[sw2]: del adjacency[sw2][sw1]
# But maybe there's another way to connect these...
for ll in core.openflow_discovery.adjacency:
if ll.dpid1 == l.dpid1 and ll.dpid2 == l.dpid2:
if flip(ll) in core.openflow_discovery.adjacency:
# Yup, link goes both ways
adjacency[sw1][sw2] = ll.port1
adjacency[sw2][sw1] = ll.port2
# Fixed -- new link chosen to connect these
break
else:
# If we already consider these nodes connected, we can
# ignore this link up.
# Otherwise, we might be interested...
if adjacency[sw1][sw2] is None:
# These previously weren't connected. If the link
# exists in both directions, we consider them connected now.
if flip(l) in core.openflow_discovery.adjacency:
# Yup, link goes both ways -- connected!
adjacency[sw1][sw2] = l.port1
adjacency[sw2][sw1] = l.port2
# If we have learned a MAC on this port which we now know to
# be connected to a switch, unlearn it.
bad_macs = set()
for mac,(sw,port) in mac_map.iteritems():
#print sw,sw1,port,l.port1
if sw is sw1 and port == l.port1:
if mac not in bad_macs:
log.debug("Unlearned %s", mac)
bad_macs.add(mac)
if sw is sw2 and port == l.port2:
if mac not in bad_macs:
log.debug("Unlearned %s", mac)
bad_macs.add(mac)
for mac in bad_macs:
del mac_map[mac]
def _handle_ConnectionUp (self, event):
sw = switches.get(event.dpid)
if sw is None:
# New switch
sw = Switch()
switches[event.dpid] = sw
sw.connect(event.connection)
else:
sw.connect(event.connection)
def launch ():
if 'openflow_discovery' not in core.components:
import pox.openflow.discovery as discovery
core.registerNew(discovery.Discovery)
core.registerNew(l2_multi)
| cc0-1.0 |
vasyarv/edx-platform | common/test/acceptance/tests/lms/test_lms_dashboard.py | 64 | 3835 | # -*- coding: utf-8 -*-
"""
End-to-end tests for the main LMS Dashboard (aka, Student Dashboard).
"""
from ..helpers import UniqueCourseTest
from ...fixtures.course import CourseFixture
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.dashboard import DashboardPage
class BaseLmsDashboardTest(UniqueCourseTest):
""" Base test suite for the LMS Student Dashboard """
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some parameters are provided by the parent setUp() routine, such as the following:
# self.course_id, self.course_info, self.unique_id
super(BaseLmsDashboardTest, self).setUp()
# Load page objects for use by the tests
self.dashboard_page = DashboardPage(self.browser)
# Configure some aspects of the test course and install the settings into the course
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
)
self.course_fixture.add_advanced_settings({
u"social_sharing_url": {u"value": "http://custom/course/url"}
})
self.course_fixture.install()
# Create the test user, register them for the course, and authenticate
self.username = "test_{uuid}".format(uuid=self.unique_id[0:6])
self.email = "{user}@example.com".format(user=self.username)
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
course_id=self.course_id
).visit()
# Navigate the authenticated, enrolled user to the dashboard page and get testing!
self.dashboard_page.visit()
class LmsDashboardPageTest(BaseLmsDashboardTest):
""" Test suite for the LMS Student Dashboard page """
def test_dashboard_course_listings(self):
"""
Perform a general validation of the course listings section
"""
course_listings = self.dashboard_page.get_course_listings()
self.assertEqual(len(course_listings), 1)
def test_dashboard_social_sharing_feature(self):
"""
Validate the behavior of the social sharing feature
"""
twitter_widget = self.dashboard_page.get_course_social_sharing_widget('twitter')
twitter_url = "https://twitter.com/intent/tweet?text=Testing+feature%3A%20http%3A%2F%2Fcustom%2Fcourse%2Furl" # pylint: disable=line-too-long
self.assertEqual(twitter_widget.attrs('title')[0], 'Share on Twitter')
self.assertEqual(twitter_widget.attrs('data-tooltip')[0], 'Share on Twitter')
self.assertEqual(twitter_widget.attrs('aria-haspopup')[0], 'true')
self.assertEqual(twitter_widget.attrs('aria-expanded')[0], 'false')
self.assertEqual(twitter_widget.attrs('target')[0], '_blank')
self.assertIn(twitter_url, twitter_widget.attrs('href')[0])
self.assertIn(twitter_url, twitter_widget.attrs('onclick')[0])
facebook_widget = self.dashboard_page.get_course_social_sharing_widget('facebook')
facebook_url = "https://www.facebook.com/sharer/sharer.php?u=http%3A%2F%2Fcustom%2Fcourse%2Furl"
self.assertEqual(facebook_widget.attrs('title')[0], 'Share on Facebook')
self.assertEqual(facebook_widget.attrs('data-tooltip')[0], 'Share on Facebook')
self.assertEqual(facebook_widget.attrs('aria-haspopup')[0], 'true')
self.assertEqual(facebook_widget.attrs('aria-expanded')[0], 'false')
self.assertEqual(facebook_widget.attrs('target')[0], '_blank')
self.assertIn(facebook_url, facebook_widget.attrs('href')[0])
self.assertIn(facebook_url, facebook_widget.attrs('onclick')[0])
| agpl-3.0 |
kennedyshead/home-assistant | homeassistant/components/meteo_france/config_flow.py | 2 | 4663 | """Config flow to configure the Meteo-France integration."""
import logging
from meteofrance_api.client import MeteoFranceClient
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_MODE
from homeassistant.core import callback
from .const import CONF_CITY, DOMAIN, FORECAST_MODE, FORECAST_MODE_DAILY
_LOGGER = logging.getLogger(__name__)
class MeteoFranceFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Meteo-France config flow."""
VERSION = 1
def __init__(self):
"""Init MeteoFranceFlowHandler."""
self.places = []
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return MeteoFranceOptionsFlowHandler(config_entry)
@callback
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(CONF_CITY, default=user_input.get(CONF_CITY, "")): str}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return self._show_setup_form(user_input, errors)
city = user_input[CONF_CITY] # Might be a city name or a postal code
latitude = user_input.get(CONF_LATITUDE)
longitude = user_input.get(CONF_LONGITUDE)
if not latitude:
client = MeteoFranceClient()
self.places = await self.hass.async_add_executor_job(
client.search_places, city
)
_LOGGER.debug("Places search result: %s", self.places)
if not self.places:
errors[CONF_CITY] = "empty"
return self._show_setup_form(user_input, errors)
return await self.async_step_cities()
# Check if already configured
await self.async_set_unique_id(f"{latitude}, {longitude}")
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=city,
data={CONF_LATITUDE: latitude, CONF_LONGITUDE: longitude},
)
async def async_step_import(self, user_input):
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_cities(self, user_input=None):
"""Step where the user choose the city from the API search results."""
if not user_input:
if len(self.places) > 1 and self.source != SOURCE_IMPORT:
places_for_form = {}
for place in self.places:
places_for_form[_build_place_key(place)] = f"{place}"
return self.async_show_form(
step_id="cities",
data_schema=vol.Schema(
{
vol.Required(CONF_CITY): vol.All(
vol.Coerce(str), vol.In(places_for_form)
)
}
),
)
user_input = {CONF_CITY: _build_place_key(self.places[0])}
city_infos = user_input[CONF_CITY].split(";")
return await self.async_step_user(
{
CONF_CITY: city_infos[0],
CONF_LATITUDE: city_infos[1],
CONF_LONGITUDE: city_infos[2],
}
)
class MeteoFranceOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_MODE,
default=self.config_entry.options.get(
CONF_MODE, FORECAST_MODE_DAILY
),
): vol.In(FORECAST_MODE)
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
def _build_place_key(place) -> str:
return f"{place};{place.latitude};{place.longitude}"
| apache-2.0 |
johnzhd/gpsmap | gpsmap/db_build.py | 1 | 2339 | import pymongo
import backend
def clear_db():
connect = pymongo.MongoClient(backend.global_db_url)
db = connect.get_database(backend.global_db_name)
l = [backend.global_db_origin_collection,
backend.global_db_user_collection,
backend.global_db_device_collection,
backend.global_db_calc_collection]
for d in l:
try:
r = db.drop_collection(d)
except Exception as e:
print("{0} --- {1}".format(__name__, e))
def build_db():
connect = pymongo.MongoClient(backend.global_db_url)
db = connect.get_database(backend.global_db_name)
try:
r = db.create_collection(backend.global_db_origin_collection)
collection = db.get_collection(backend.global_db_origin_collection)
r = collection.ensure_index([("id", pymongo.ASCENDING)])
r = collection.ensure_index([("time", pymongo.DESCENDING)])
r = collection.ensure_index([("loc", pymongo.GEOSPHERE)])
except Exception as e:
print("{0} --- {1}".format(__name__, e))
try:
r = db.create_collection(backend.global_db_user_collection)
collection = db.get_collection(backend.global_db_user_collection)
r = collection.ensure_index([("id", pymongo.ASCENDING)])
r = collection.ensure_index([("time", pymongo.DESCENDING)])
except Exception as e:
print("{0} --- {1}".format(__name__, e))
try:
r = db.create_collection(backend.global_db_device_collection)
collection = db.get_collection(backend.global_db_device_collection)
r = collection.ensure_index([("device", pymongo.ASCENDING)])
r = collection.ensure_index([("time", pymongo.DESCENDING)])
r = collection.ensure_index([("loc", pymongo.GEOSPHERE)])
except Exception as e:
print("{0} --- {1}".format(__name__, e))
try:
r = db.create_collection(backend.global_db_calc_collection)
collection = db.get_collection(backend.global_db_calc_collection)
r = collection.ensure_index([("id", pymongo.ASCENDING)])
r = collection.ensure_index([("time", pymongo.DESCENDING)])
r = collection.ensure_index([("loc", pymongo.GEOSPHERE)])
except Exception as e:
print("{0} --- {1}".format(__name__, e))
if __name__ == "__main__":
clear_db()
build_db()
| mit |
wujinjun/TFbook | chapter5/mnist_lenet5.py | 1 | 3795 | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/",one_hot = True) #导入数据
sess = tf.InteractiveSession() #开启一个Session
def weight_variable(shape): #定义一个权重变量
initial = tf.truncated_normal(shape,stddev=0.1) #初始化一个正态分布噪声,标准差为0.1
return tf.Variable(initial)
def bias_variable(shape): #定义一个偏置变量
initial = tf.constant(0.1,shape=shape) #初始化一个常量,取值为0.1,用来避免死亡节点
return tf.Variable(initial)
def conv2d(x,W): #定义一个卷积层,输入参数为(输入向量X,权重向量W)
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME') #定义一个二维卷积层,
def max_pool_2x2(x): #定义一个池化层
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') #调用TF的池化层函数
x = tf.placeholder(tf.float32,[None,784]) #定义输入,条目不限,维度为784
y_ = tf.placeholder(tf.float32,[None,10]) #定义输入,条目不限,类别为10
x_image = tf.reshape(x,[-1,28,28,1]) #对1*785的数据进行reshape,转化为二维数据,-1代表样本不固定,尺寸转化为28*28,通道为1
W_conv1 = weight_variable([5,5,1,32]) #初始化第一个卷积层的权重变量,5*5的核,1个通道,32个不同的卷积核
b_conv1 = bias_variable([32]) #初始化对应于卷积核的偏置
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1) #调用卷积层定义函数,初始化第一个卷积层的激活函数,使用relu激活函数
h_pool1 = max_pool_2x2(h_conv1) #调用池化层定义函数,初始化第一个卷积层之后的池化层
W_conv2 = weight_variable([5,5,32,64]) #初始化第二个卷积层的权重变量,5*5的核,来自上一层的32个通道,64个不同的卷积核
b_conv2 = bias_variable([64]) #对应于64个核的偏置
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64,1024]) #初始化第一个全连接层,大小变为7*7*64,共有1024个隐含节点
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
keep_prob = tf.placeholder(tf.float32) #定义dropout的placeholder
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob) #定义dropout层,输入是fc1层输出结果以及keep_prob
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2) #上一层dropout的输出连接一个softmax层
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv),reduction_indices=[1])) #求交叉熵
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) #使用Adm优化器求得最小的交叉熵
correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1)) #比较预测结果与真实标签准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #求准确率
tf.global_variables_initializer().run()
for i in range(20000): #进行20000次训练迭代
batch = mnist.train.next_batch(50) #使用大小为50的mini-batch
if i%100==0: #每迭代100次打印一次
train_accuracy = accuracy.eval(feed_dict={x:batch[0],y_:batch[1],keep_prob:1.0}) #评测时,dropout比例为1.0
print("step %d ,training accuracy %g" %(i,train_accuracy))
train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5}) #训练时,dropout比例为0.5
print("test accuracy %g" %accuracy.eval(feed_dict={x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0})) #评测时,dropout比例为1.0 | gpl-3.0 |
charbeljc/OCB | addons/hw_posbox_upgrade/controllers/main.py | 172 | 4161 | # -*- coding: utf-8 -*-
import logging
import os
import time
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import threading
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
upgrade_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Odoo's PosBox - Software Upgrade</title>
<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
$(function(){
var upgrading = false;
$('#upgrade').click(function(){
console.log('click');
if(!upgrading){
upgrading = true;
$('#upgrade').text('Upgrading, Please Wait');
$.ajax({
url:'/hw_proxy/perform_upgrade/'
}).then(function(status){
$('#upgrade').html('Upgrade Successful<br \\>Click to Restart the PosBox');
$('#upgrade').off('click');
$('#upgrade').click(function(){
$.ajax({ url:'/hw_proxy/perform_restart' })
$('#upgrade').text('Restarting');
$('#upgrade').off('click');
setTimeout(function(){
window.location = '/'
},30*1000);
});
},function(){
$('#upgrade').text('Upgrade Failed');
});
}
});
});
</script>
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.centering{
text-align: center;
}
#upgrade {
padding: 20px;
background: rgb(121, 197, 107);
color: white;
border-radius: 3px;
text-align: center;
margin: 30px;
text-decoration: none;
display: inline-block;
}
</style>
</head>
<body>
<h1>PosBox Software Upgrade</h1>
<p>
This tool will help you perform an upgrade of the PosBox's software.
However the preferred method to upgrade the posbox is to flash the sd-card with
the <a href='http://nightly.openerp.com/trunk/posbox/'>latest image</a>. The upgrade
procedure is explained into to the <a href='/hw_proxy/static/doc/manual.pdf'>PosBox manual</a>
</p>
<p>
To upgrade the posbox, click on the upgrade button. The upgrade will take a few minutes. <b>Do not reboot</b> the PosBox during the upgrade.
</p>
<div class='centering'>
<a href='#' id='upgrade'>Upgrade</a>
</div>
</body>
</html>
"""
class PosboxUpgrader(hw_proxy.Proxy):
def __init__(self):
super(PosboxUpgrader,self).__init__()
self.upgrading = threading.Lock()
self.last_upgrade = 0
@http.route('/hw_proxy/upgrade', type='http', auth='none', )
def upgrade(self):
return upgrade_template
@http.route('/hw_proxy/perform_upgrade', type='http', auth='none')
def perform_upgrade(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'UPTODATE'
else:
os.system('/bin/bash /home/pi/openerp/update.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
@http.route('/hw_proxy/perform_restart', type='http', auth='none')
def perform_restart(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'RESTARTED'
else:
os.system('/bin/bash /home/pi/openerp/restart.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
| agpl-3.0 |
jaraco/pytest | _pytest/vendored_packages/pluggy.py | 19 | 30741 | """
PluginManager, basic initialization and tracing.
pluggy is the cristallized core of plugin management as used
by some 150 plugins for pytest.
Pluggy uses semantic versioning. Breaking changes are only foreseen for
Major releases (incremented X in "X.Y.Z"). If you want to use pluggy in
your project you should thus use a dependency restriction like
"pluggy>=0.1.0,<1.0" to avoid surprises.
pluggy is concerned with hook specification, hook implementations and hook
calling. For any given hook specification a hook call invokes up to N implementations.
A hook implementation can influence its position and type of execution:
if attributed "tryfirst" or "trylast" it will be tried to execute
first or last. However, if attributed "hookwrapper" an implementation
can wrap all calls to non-hookwrapper implementations. A hookwrapper
can thus execute some code ahead and after the execution of other hooks.
Hook specification is done by way of a regular python function where
both the function name and the names of all its arguments are significant.
Each hook implementation function is verified against the original specification
function, including the names of all its arguments. To allow for hook specifications
to evolve over the livetime of a project, hook implementations can
accept less arguments. One can thus add new arguments and semantics to
a hook specification by adding another argument typically without breaking
existing hook implementations.
The chosen approach is meant to let a hook designer think carefuly about
which objects are needed by an extension writer. By contrast, subclass-based
extension mechanisms often expose a lot more state and behaviour than needed,
thus restricting future developments.
Pluggy currently consists of functionality for:
- a way to register new hook specifications. Without a hook
specification no hook calling can be performed.
- a registry of plugins which contain hook implementation functions. It
is possible to register plugins for which a hook specification is not yet
known and validate all hooks when the system is in a more referentially
consistent state. Setting an "optionalhook" attribution to a hook
implementation will avoid PluginValidationError's if a specification
is missing. This allows to have optional integration between plugins.
- a "hook" relay object from which you can launch 1:N calls to
registered hook implementation functions
- a mechanism for ordering hook implementation functions
- mechanisms for two different type of 1:N calls: "firstresult" for when
the call should stop when the first implementation returns a non-None result.
And the other (default) way of guaranteeing that all hook implementations
will be called and their non-None result collected.
- mechanisms for "historic" extension points such that all newly
registered functions will receive all hook calls that happened
before their registration.
- a mechanism for discovering plugin objects which are based on
setuptools based entry points.
- a simple tracing mechanism, including tracing of plugin calls and
their arguments.
"""
import sys
import inspect
__version__ = '0.4.0'
__all__ = ["PluginManager", "PluginValidationError", "HookCallError",
"HookspecMarker", "HookimplMarker"]
_py3 = sys.version_info > (3, 0)
class HookspecMarker:
""" Decorator helper class for marking functions as hook specifications.
You can instantiate it with a project_name to get a decorator.
Calling PluginManager.add_hookspecs later will discover all marked functions
if the PluginManager uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(self, function=None, firstresult=False, historic=False):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to add_hookspecs(). If passed no
function, returns a decorator which can be applied to a function
later using the attributes supplied.
If firstresult is True the 1:N hook call (N being the number of registered
hook implementation functions) will stop at I<=N when the I'th function
returns a non-None result.
If historic is True calls to a hook will be memorized and replayed
on later registered plugins.
"""
def setattr_hookspec_opts(func):
if historic and firstresult:
raise ValueError("cannot have a historic firstresult hook")
setattr(func, self.project_name + "_spec",
dict(firstresult=firstresult, historic=historic))
return func
if function is not None:
return setattr_hookspec_opts(function)
else:
return setattr_hookspec_opts
class HookimplMarker:
""" Decorator helper class for marking functions as hook implementations.
You can instantiate with a project_name to get a decorator.
Calling PluginManager.register later will discover all marked functions
if the PluginManager uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(self, function=None, hookwrapper=False, optionalhook=False,
tryfirst=False, trylast=False):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to register(). If passed no function,
returns a decorator which can be applied to a function later using
the attributes supplied.
If optionalhook is True a missing matching hook specification will not result
in an error (by default it is an error if no matching spec is found).
If tryfirst is True this hook implementation will run as early as possible
in the chain of N hook implementations for a specfication.
If trylast is True this hook implementation will run as late as possible
in the chain of N hook implementations.
If hookwrapper is True the hook implementations needs to execute exactly
one "yield". The code before the yield is run early before any non-hookwrapper
function is run. The code after the yield is run after all non-hookwrapper
function have run. The yield receives an ``_CallOutcome`` object representing
the exception or result outcome of the inner calls (including other hookwrapper
calls).
"""
def setattr_hookimpl_opts(func):
setattr(func, self.project_name + "_impl",
dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
tryfirst=tryfirst, trylast=trylast))
return func
if function is None:
return setattr_hookimpl_opts
else:
return setattr_hookimpl_opts(function)
def normalize_hookimpl_opts(opts):
opts.setdefault("tryfirst", False)
opts.setdefault("trylast", False)
opts.setdefault("hookwrapper", False)
opts.setdefault("optionalhook", False)
class _TagTracer:
def __init__(self):
self._tag2proc = {}
self.writer = None
self.indent = 0
def get(self, name):
return _TagTracerSub(self, (name,))
def format_message(self, tags, args):
if isinstance(args[-1], dict):
extra = args[-1]
args = args[:-1]
else:
extra = {}
content = " ".join(map(str, args))
indent = " " * self.indent
lines = [
"%s%s [%s]\n" % (indent, content, ":".join(tags))
]
for name, value in extra.items():
lines.append("%s %s: %s\n" % (indent, name, value))
return lines
def processmessage(self, tags, args):
if self.writer is not None and args:
lines = self.format_message(tags, args)
self.writer(''.join(lines))
try:
self._tag2proc[tags](tags, args)
except KeyError:
pass
def setwriter(self, writer):
self.writer = writer
def setprocessor(self, tags, processor):
if isinstance(tags, str):
tags = tuple(tags.split(":"))
else:
assert isinstance(tags, tuple)
self._tag2proc[tags] = processor
class _TagTracerSub:
def __init__(self, root, tags):
self.root = root
self.tags = tags
def __call__(self, *args):
self.root.processmessage(self.tags, args)
def setmyprocessor(self, processor):
self.root.setprocessor(self.tags, processor)
def get(self, name):
return self.__class__(self.root, self.tags + (name,))
def _raise_wrapfail(wrap_controller, msg):
co = wrap_controller.gi_code
raise RuntimeError("wrap_controller at %r %s:%d %s" %
(co.co_name, co.co_filename, co.co_firstlineno, msg))
def _wrapped_call(wrap_controller, func):
""" Wrap calling to a function with a generator which needs to yield
exactly once. The yield point will trigger calling the wrapped function
and return its _CallOutcome to the yield point. The generator then needs
to finish (raise StopIteration) in order for the wrapped call to complete.
"""
try:
next(wrap_controller) # first yield
except StopIteration:
_raise_wrapfail(wrap_controller, "did not yield")
call_outcome = _CallOutcome(func)
try:
wrap_controller.send(call_outcome)
_raise_wrapfail(wrap_controller, "has second yield")
except StopIteration:
pass
return call_outcome.get_result()
class _CallOutcome:
""" Outcome of a function call, either an exception or a proper result.
Calling the ``get_result`` method will return the result or reraise
the exception raised when the function was called. """
excinfo = None
def __init__(self, func):
try:
self.result = func()
except BaseException:
self.excinfo = sys.exc_info()
def force_result(self, result):
self.result = result
self.excinfo = None
def get_result(self):
if self.excinfo is None:
return self.result
else:
ex = self.excinfo
if _py3:
raise ex[1].with_traceback(ex[2])
_reraise(*ex) # noqa
if not _py3:
exec("""
def _reraise(cls, val, tb):
raise cls, val, tb
""")
class _TracedHookExecution:
def __init__(self, pluginmanager, before, after):
self.pluginmanager = pluginmanager
self.before = before
self.after = after
self.oldcall = pluginmanager._inner_hookexec
assert not isinstance(self.oldcall, _TracedHookExecution)
self.pluginmanager._inner_hookexec = self
def __call__(self, hook, hook_impls, kwargs):
self.before(hook.name, hook_impls, kwargs)
outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs))
self.after(outcome, hook.name, hook_impls, kwargs)
return outcome.get_result()
def undo(self):
self.pluginmanager._inner_hookexec = self.oldcall
class PluginManager(object):
""" Core Pluginmanager class which manages registration
of plugin objects and 1:N hook calling.
You can register new hooks by calling ``add_hookspec(module_or_class)``.
You can register plugin objects (which contain hooks) by calling
``register(plugin)``. The Pluginmanager is initialized with a
prefix that is searched for in the names of the dict of registered
plugin objects. An optional excludefunc allows to blacklist names which
are not considered as hooks despite a matching prefix.
For debugging purposes you can call ``enable_tracing()``
which will subsequently send debug information to the trace helper.
"""
def __init__(self, project_name, implprefix=None):
""" if implprefix is given implementation functions
will be recognized if their name matches the implprefix. """
self.project_name = project_name
self._name2plugin = {}
self._plugin2hookcallers = {}
self._plugin_distinfo = []
self.trace = _TagTracer().get("pluginmanage")
self.hook = _HookRelay(self.trace.root.get("hook"))
self._implprefix = implprefix
self._inner_hookexec = lambda hook, methods, kwargs: \
_MultiCall(methods, kwargs, hook.spec_opts).execute()
def _hookexec(self, hook, methods, kwargs):
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
return self._inner_hookexec(hook, methods, kwargs)
def register(self, plugin, name=None):
""" Register a plugin and return its canonical name or None if the name
is blocked from registering. Raise a ValueError if the plugin is already
registered. """
plugin_name = name or self.get_canonical_name(plugin)
if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
if self._name2plugin.get(plugin_name, -1) is None:
return # blocked plugin, return None to indicate no registration
raise ValueError("Plugin already registered: %s=%s\n%s" %
(plugin_name, plugin, self._name2plugin))
# XXX if an error happens we should make sure no state has been
# changed at point of return
self._name2plugin[plugin_name] = plugin
# register matching hook implementations of the plugin
self._plugin2hookcallers[plugin] = hookcallers = []
for name in dir(plugin):
hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
if hookimpl_opts is not None:
normalize_hookimpl_opts(hookimpl_opts)
method = getattr(plugin, name)
hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
hook = getattr(self.hook, name, None)
if hook is None:
hook = _HookCaller(name, self._hookexec)
setattr(self.hook, name, hook)
elif hook.has_spec():
self._verify_hook(hook, hookimpl)
hook._maybe_apply_history(hookimpl)
hook._add_hookimpl(hookimpl)
hookcallers.append(hook)
return plugin_name
def parse_hookimpl_opts(self, plugin, name):
method = getattr(plugin, name)
try:
res = getattr(method, self.project_name + "_impl", None)
except Exception:
res = {}
if res is not None and not isinstance(res, dict):
# false positive
res = None
elif res is None and self._implprefix and name.startswith(self._implprefix):
res = {}
return res
def unregister(self, plugin=None, name=None):
""" unregister a plugin object and all its contained hook implementations
from internal data structures. """
if name is None:
assert plugin is not None, "one of name or plugin needs to be specified"
name = self.get_name(plugin)
if plugin is None:
plugin = self.get_plugin(name)
# if self._name2plugin[name] == None registration was blocked: ignore
if self._name2plugin.get(name):
del self._name2plugin[name]
for hookcaller in self._plugin2hookcallers.pop(plugin, []):
hookcaller._remove_plugin(plugin)
return plugin
def set_blocked(self, name):
""" block registrations of the given name, unregister if already registered. """
self.unregister(name=name)
self._name2plugin[name] = None
def is_blocked(self, name):
""" return True if the name blogs registering plugins of that name. """
return name in self._name2plugin and self._name2plugin[name] is None
def add_hookspecs(self, module_or_class):
""" add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. """
names = []
for name in dir(module_or_class):
spec_opts = self.parse_hookspec_opts(module_or_class, name)
if spec_opts is not None:
hc = getattr(self.hook, name, None)
if hc is None:
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
setattr(self.hook, name, hc)
else:
# plugins registered this hook without knowing the spec
hc.set_specification(module_or_class, spec_opts)
for hookfunction in (hc._wrappers + hc._nonwrappers):
self._verify_hook(hc, hookfunction)
names.append(name)
if not names:
raise ValueError("did not find any %r hooks in %r" %
(self.project_name, module_or_class))
def parse_hookspec_opts(self, module_or_class, name):
method = getattr(module_or_class, name)
return getattr(method, self.project_name + "_spec", None)
def get_plugins(self):
""" return the set of registered plugins. """
return set(self._plugin2hookcallers)
def is_registered(self, plugin):
""" Return True if the plugin is already registered. """
return plugin in self._plugin2hookcallers
def get_canonical_name(self, plugin):
""" Return canonical name for a plugin object. Note that a plugin
may be registered under a different name which was specified
by the caller of register(plugin, name). To obtain the name
of an registered plugin use ``get_name(plugin)`` instead."""
return getattr(plugin, "__name__", None) or str(id(plugin))
def get_plugin(self, name):
""" Return a plugin or None for the given name. """
return self._name2plugin.get(name)
def has_plugin(self, name):
""" Return True if a plugin with the given name is registered. """
return self.get_plugin(name) is not None
def get_name(self, plugin):
""" Return name for registered plugin or None if not registered. """
for name, val in self._name2plugin.items():
if plugin == val:
return name
def _verify_hook(self, hook, hookimpl):
if hook.is_historic() and hookimpl.hookwrapper:
raise PluginValidationError(
"Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" %
(hookimpl.plugin_name, hook.name))
for arg in hookimpl.argnames:
if arg not in hook.argnames:
raise PluginValidationError(
"Plugin %r\nhook %r\nargument %r not available\n"
"plugin definition: %s\n"
"available hookargs: %s" %
(hookimpl.plugin_name, hook.name, arg,
_formatdef(hookimpl.function), ", ".join(hook.argnames)))
def check_pending(self):
""" Verify that all hooks which have not been verified against
a hook specification are optional, otherwise raise PluginValidationError"""
for name in self.hook.__dict__:
if name[0] != "_":
hook = getattr(self.hook, name)
if not hook.has_spec():
for hookimpl in (hook._wrappers + hook._nonwrappers):
if not hookimpl.optionalhook:
raise PluginValidationError(
"unknown hook %r in plugin %r" %
(name, hookimpl.plugin))
def load_setuptools_entrypoints(self, entrypoint_name):
""" Load modules from querying the specified setuptools entrypoint name.
Return the number of loaded plugins. """
from pkg_resources import (iter_entry_points, DistributionNotFound,
VersionConflict)
for ep in iter_entry_points(entrypoint_name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
"Plugin %r could not be loaded: %s!" % (ep.name, e))
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
return len(self._plugin_distinfo)
def list_plugin_distinfo(self):
""" return list of distinfo/plugin tuples for all setuptools registered
plugins. """
return list(self._plugin_distinfo)
def list_name_plugin(self):
""" return list of name/plugin pairs. """
return list(self._name2plugin.items())
def get_hookcallers(self, plugin):
""" get all hook callers for the specified plugin. """
return self._plugin2hookcallers.get(plugin)
def add_hookcall_monitoring(self, before, after):
""" add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_CallOutcome`` object
which represents the result of the overall hook call.
"""
return _TracedHookExecution(self, before, after).undo
def enable_tracing(self):
""" enable tracing of hook calls and return an undo function. """
hooktrace = self.hook._trace
def before(hook_name, methods, kwargs):
hooktrace.root.indent += 1
hooktrace(hook_name, kwargs)
def after(outcome, hook_name, methods, kwargs):
if outcome.excinfo is None:
hooktrace("finish", hook_name, "-->", outcome.result)
hooktrace.root.indent -= 1
return self.add_hookcall_monitoring(before, after)
def subset_hook_caller(self, name, remove_plugins):
""" Return a new _HookCaller instance for the named method
which manages calls to all registered plugins except the
ones from remove_plugins. """
orig = getattr(self.hook, name)
plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
if plugins_to_remove:
hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class,
orig.spec_opts)
for hookimpl in (orig._wrappers + orig._nonwrappers):
plugin = hookimpl.plugin
if plugin not in plugins_to_remove:
hc._add_hookimpl(hookimpl)
# we also keep track of this hook caller so it
# gets properly removed on plugin unregistration
self._plugin2hookcallers.setdefault(plugin, []).append(hc)
return hc
return orig
class _MultiCall:
""" execute a call into multiple python functions/methods. """
# XXX note that the __multicall__ argument is supported only
# for pytest compatibility reasons. It was never officially
# supported there and is explicitely deprecated since 2.8
# so we can remove it soon, allowing to avoid the below recursion
# in execute() and simplify/speed up the execute loop.
def __init__(self, hook_impls, kwargs, specopts={}):
self.hook_impls = hook_impls
self.kwargs = kwargs
self.kwargs["__multicall__"] = self
self.specopts = specopts
def execute(self):
all_kwargs = self.kwargs
self.results = results = []
firstresult = self.specopts.get("firstresult")
while self.hook_impls:
hook_impl = self.hook_impls.pop()
try:
args = [all_kwargs[argname] for argname in hook_impl.argnames]
except KeyError:
for argname in hook_impl.argnames:
if argname not in all_kwargs:
raise HookCallError(
"hook call must provide argument %r" % (argname,))
if hook_impl.hookwrapper:
return _wrapped_call(hook_impl.function(*args), self.execute)
res = hook_impl.function(*args)
if res is not None:
if firstresult:
return res
results.append(res)
if not firstresult:
return results
def __repr__(self):
status = "%d meths" % (len(self.hook_impls),)
if hasattr(self, "results"):
status = ("%d results, " % len(self.results)) + status
return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs)
def varnames(func, startindex=None):
""" return argument name tuple for a function, method, class or callable.
In case of a class, its "__init__" method is considered.
For methods the "self" parameter is not included unless you are passing
an unbound method with Python3 (which has no supports for unbound methods)
"""
cache = getattr(func, "__dict__", {})
try:
return cache["_varnames"]
except KeyError:
pass
if inspect.isclass(func):
try:
func = func.__init__
except AttributeError:
return ()
startindex = 1
else:
if not inspect.isfunction(func) and not inspect.ismethod(func):
try:
func = getattr(func, '__call__', func)
except Exception:
return ()
if startindex is None:
startindex = int(inspect.ismethod(func))
try:
rawcode = func.__code__
except AttributeError:
return ()
try:
x = rawcode.co_varnames[startindex:rawcode.co_argcount]
except AttributeError:
x = ()
else:
defaults = func.__defaults__
if defaults:
x = x[:-len(defaults)]
try:
cache["_varnames"] = x
except TypeError:
pass
return x
class _HookRelay:
""" hook holder object for performing 1:N hook calls where N is the number
of registered plugins.
"""
def __init__(self, trace):
self._trace = trace
class _HookCaller(object):
def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
self.name = name
self._wrappers = []
self._nonwrappers = []
self._hookexec = hook_execute
if specmodule_or_class is not None:
assert spec_opts is not None
self.set_specification(specmodule_or_class, spec_opts)
def has_spec(self):
return hasattr(self, "_specmodule_or_class")
def set_specification(self, specmodule_or_class, spec_opts):
assert not self.has_spec()
self._specmodule_or_class = specmodule_or_class
specfunc = getattr(specmodule_or_class, self.name)
argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class))
assert "self" not in argnames # sanity check
self.argnames = ["__multicall__"] + list(argnames)
self.spec_opts = spec_opts
if spec_opts.get("historic"):
self._call_history = []
def is_historic(self):
return hasattr(self, "_call_history")
def _remove_plugin(self, plugin):
def remove(wrappers):
for i, method in enumerate(wrappers):
if method.plugin == plugin:
del wrappers[i]
return True
if remove(self._wrappers) is None:
if remove(self._nonwrappers) is None:
raise ValueError("plugin %r not found" % (plugin,))
def _add_hookimpl(self, hookimpl):
if hookimpl.hookwrapper:
methods = self._wrappers
else:
methods = self._nonwrappers
if hookimpl.trylast:
methods.insert(0, hookimpl)
elif hookimpl.tryfirst:
methods.append(hookimpl)
else:
# find last non-tryfirst method
i = len(methods) - 1
while i >= 0 and methods[i].tryfirst:
i -= 1
methods.insert(i + 1, hookimpl)
def __repr__(self):
return "<_HookCaller %r>" % (self.name,)
def __call__(self, **kwargs):
assert not self.is_historic()
return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
def call_historic(self, proc=None, kwargs=None):
self._call_history.append((kwargs or {}, proc))
# historizing hooks don't return results
self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
def call_extra(self, methods, kwargs):
""" Call the hook with some additional temporarily participating
methods using the specified kwargs as call parameters. """
old = list(self._nonwrappers), list(self._wrappers)
for method in methods:
opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
hookimpl = HookImpl(None, "<temp>", method, opts)
self._add_hookimpl(hookimpl)
try:
return self(**kwargs)
finally:
self._nonwrappers, self._wrappers = old
def _maybe_apply_history(self, method):
if self.is_historic():
for kwargs, proc in self._call_history:
res = self._hookexec(self, [method], kwargs)
if res and proc is not None:
proc(res[0])
class HookImpl:
def __init__(self, plugin, plugin_name, function, hook_impl_opts):
self.function = function
self.argnames = varnames(self.function)
self.plugin = plugin
self.opts = hook_impl_opts
self.plugin_name = plugin_name
self.__dict__.update(hook_impl_opts)
class PluginValidationError(Exception):
""" plugin failed validation. """
class HookCallError(Exception):
""" Hook was called wrongly. """
if hasattr(inspect, 'signature'):
def _formatdef(func):
return "%s%s" % (
func.__name__,
str(inspect.signature(func))
)
else:
def _formatdef(func):
return "%s%s" % (
func.__name__,
inspect.formatargspec(*inspect.getargspec(func))
)
| mit |
Pablo126/SSBW | Entrega1/lib/python3.5/site-packages/rest_framework_mongoengine/serializers.py | 1 | 26652 | import copy
from collections import OrderedDict
from mongoengine import fields as me_fields
from mongoengine.errors import ValidationError as me_ValidationError
from rest_framework import fields as drf_fields
from rest_framework import serializers
from rest_framework.compat import unicode_to_repr
from rest_framework.utils.field_mapping import ClassLookupDict
from rest_framework_mongoengine import fields as drfm_fields
from rest_framework_mongoengine.validators import (
UniqueTogetherValidator, UniqueValidator
)
from .repr import serializer_repr
from .utils import (
COMPOUND_FIELD_TYPES, get_field_info, get_field_kwargs,
get_generic_embedded_kwargs, get_nested_embedded_kwargs,
get_nested_relation_kwargs, get_relation_kwargs, has_default,
is_abstract_model
)
def raise_errors_on_nested_writes(method_name, serializer, validated_data):
# *** inherited from DRF 3, altered for EmbeddedDocumentSerializer to pass ***
assert not any(
isinstance(field, serializers.BaseSerializer) and
not isinstance(field, EmbeddedDocumentSerializer) and
(key in validated_data)
for key, field in serializer.fields.items()
), (
'The `.{method_name}()` method does not support writable nested'
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'nested serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
assert not any(
'.' in field.source and (key in validated_data) and
isinstance(validated_data[key], (list, dict))
for key, field in serializer.fields.items()
), (
'The `.{method_name}()` method does not support writable dotted-source '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'dotted-source serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
class DocumentSerializer(serializers.ModelSerializer):
""" Serializer for Documents.
Recognized primitve fields:
* ``StringField``
* ``URLField``
* ``EmailField``
* ``IntField``
* ``LongField``
* ``FloatField``
* ``DecimalField``
* ``BooleanField``
* ``DateTimeField``
* ``ComplexDateTimeField``
* ``ObjectIdField``
* ``SequenceField`` (assumes it has integer counter)
* ``UUIDField``
* ``GeoPointField``
* ``GeoJsonBaseField`` (all those fields)
Compound fields: ``ListField`` and ``DictField`` are mapped to corresponding DRF fields, with respect to nested field specification.
The ``ReferenceField`` is handled like ``ForeignKey`` in DRF: there nested serializer autogenerated if serializer depth greater then 0, otherwise it's handled by it's own (results as ``str(id)``).
For ``EmbeddedDocumentField`` also nested serializer autogenerated for non-zero depth, otherwise it is skipped. TODO: THIS IS PROBABLY WRONG AND SHOULD BE FIXED.
Generic fields ``GenericReferenceField`` and ``GenericEmbeddedDocumentField`` are handled by their own with corresponding serializer fields.
Not well supported or untested:
``FileField``
``ImageField``
``BinaryField``
All other fields are mapped to ``DocumentField`` and probably will work wrong.
"""
serializer_field_mapping = {
me_fields.StringField: drf_fields.CharField,
me_fields.URLField: drf_fields.URLField,
me_fields.EmailField: drf_fields.EmailField,
me_fields.IntField: drf_fields.IntegerField,
me_fields.LongField: drf_fields.IntegerField,
me_fields.FloatField: drf_fields.FloatField,
me_fields.DecimalField: drf_fields.DecimalField,
me_fields.BooleanField: drf_fields.BooleanField,
me_fields.DateTimeField: drf_fields.DateTimeField,
me_fields.ComplexDateTimeField: drf_fields.DateTimeField,
me_fields.ObjectIdField: drfm_fields.ObjectIdField,
me_fields.FileField: drfm_fields.FileField,
me_fields.ImageField: drfm_fields.ImageField,
me_fields.SequenceField: drf_fields.IntegerField,
me_fields.UUIDField: drf_fields.UUIDField,
me_fields.GeoPointField: drfm_fields.GeoPointField,
me_fields.GeoJsonBaseField: drfm_fields.GeoJSONField,
me_fields.DynamicField: drfm_fields.DynamicField,
me_fields.BaseField: drfm_fields.DocumentField
}
# induct failure if they occasionally used somewhere
serializer_related_field = None
serializer_related_to_field = None
serializer_url_field = None
" class to create fields for references "
serializer_reference_field = drfm_fields.ReferenceField
" class to create fields for generic references "
serializer_reference_generic = drfm_fields.GenericReferenceField
" class to create nested serializers for references (defaults to DocumentSerializer) "
serializer_reference_nested = None
" class to create fields for generic embedded "
serializer_embedded_generic = drfm_fields.GenericEmbeddedDocumentField
" class to create nested serializers for embedded (defaults to EmbeddedDocumentSerializer) "
serializer_embedded_nested = None
" class to create nested serializers for embedded at max recursion "
serializer_embedded_bottom = drf_fields.HiddenField
_saving_instances = True
def create(self, validated_data):
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
try:
# recursively create EmbeddedDocuments from their validated data
# before creating the document instance itself
instance = self.recursive_save(validated_data)
except TypeError as exc:
msg = (
'Got a `TypeError` when calling `%s.objects.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.objects.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception text was: %s.' %
(
ModelClass.__name__,
ModelClass.__name__,
type(self).__name__,
exc
)
)
raise TypeError(msg)
except me_ValidationError as exc:
msg = (
'Got a `ValidationError` when calling `%s.objects.create()`. '
'This may be because request data satisfies serializer validations '
'but not Mongoengine`s. You may need to check consistency between '
'%s and %s.\nIf that is not the case, please open a ticket '
'regarding this issue on https://github.com/umutbozkurt/django-rest-framework-mongoengine/issues'
'\nOriginal exception was: %s' %
(
ModelClass.__name__,
ModelClass.__name__,
type(self).__name__,
exc
)
)
raise me_ValidationError(msg)
return instance
def to_internal_value(self, data):
"""
Calls super() from DRF, but with an addition.
Creates initial_data and _validated_data for nested
EmbeddedDocumentSerializers, so that recursive_save could make
use of them.
"""
# for EmbeddedDocumentSerializers create initial data
# so that _get_dynamic_data could use them
for field in self._writable_fields:
if isinstance(field, EmbeddedDocumentSerializer) and field.field_name in data:
field.initial_data = data[field.field_name]
ret = super(DocumentSerializer, self).to_internal_value(data)
# for EmbeddedDcoumentSerializers create _validated_data
# so that create()/update() could use them
for field in self._writable_fields:
if isinstance(field, EmbeddedDocumentSerializer) and field.field_name in ret:
field._validated_data = ret[field.field_name]
return ret
def recursive_save(self, validated_data, instance=None):
'''Recursively traverses validated_data and creates EmbeddedDocuments
of the appropriate subtype from them.
Returns Mongonengine model instance.
'''
# me_data is an analogue of validated_data, but contains
# mongoengine EmbeddedDocument instances for nested data structures,
# instead of OrderedDicts, for example:
# validated_data = {'id:, "1", 'embed': OrderedDict({'a': 'b'})}
# me_data = {'id': "1", 'embed': <EmbeddedDocument>}
me_data = dict()
for key, value in validated_data.items():
try:
field = self.fields[key]
# for EmbeddedDocumentSerializers, call recursive_save
if isinstance(field, EmbeddedDocumentSerializer):
me_data[key] = field.recursive_save(value)
# same for lists of EmbeddedDocumentSerializers i.e.
# ListField(EmbeddedDocumentField) or EmbeddedDocumentListField
elif ((isinstance(field, serializers.ListSerializer) or
isinstance(field, serializers.ListField)) and
isinstance(field.child, EmbeddedDocumentSerializer)):
me_data[key] = []
for datum in value:
me_data[key].append(field.child.recursive_save(datum))
# same for dicts of EmbeddedDocumentSerializers (or, speaking
# in Mongoengine terms, MapField(EmbeddedDocument(Embed))
elif (isinstance(field, drfm_fields.DictField) and
hasattr(field, "child") and
isinstance(field.child, EmbeddedDocumentSerializer)):
me_data[key] = {}
for datum_key, datum_value in value.items():
me_data[key][datum_key] = field.child.recursive_save(datum_value)
else:
me_data[key] = value
except KeyError: # this is dynamic data
me_data[key] = value
# create (if needed), save (if needed) and return mongoengine instance
if not instance:
instance = self.Meta.model(**me_data)
else:
for key, value in me_data.items():
setattr(instance, key, value)
if self._saving_instances:
instance.save()
return instance
def update(self, instance, validated_data):
raise_errors_on_nested_writes('update', self, validated_data)
instance = self.recursive_save(validated_data, instance)
return instance
def get_model(self):
return self.Meta.model
def get_fields(self):
assert hasattr(self, 'Meta'), (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'model'), (
'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.__class__.__name__
)
)
depth = getattr(self.Meta, 'depth', 0)
depth_embedding = getattr(self.Meta, 'depth_embedding', 5)
if depth is not None:
assert depth >= 0, "'depth' may not be negative."
assert depth <= 10, "'depth' may not be greater than 10."
declared_fields = copy.deepcopy(self._declared_fields)
model = self.get_model()
if model is None:
return {}
if is_abstract_model(model):
raise ValueError(
'Cannot use ModelSerializer with Abstract Models.'
)
# Retrieve metadata about fields & relationships on the model class.
self.field_info = get_field_info(model)
field_names = self.get_field_names(declared_fields, self.field_info)
# Determine any extra field arguments and hidden fields that
# should be included
extra_kwargs = self.get_extra_kwargs()
extra_kwargs, hidden_fields = self.get_uniqueness_extra_kwargs(field_names, extra_kwargs)
# Determine the fields that should be included on the serializer.
fields = OrderedDict()
for field_name in field_names:
# If the field is explicitly declared on the class then use that.
if field_name in declared_fields:
fields[field_name] = declared_fields[field_name]
continue
# Determine the serializer field class and keyword arguments.
field_class, field_kwargs = self.build_field(
field_name, self.field_info, model, depth, depth_embedding
)
extra_field_kwargs = extra_kwargs.get(field_name, {})
field_kwargs = self.include_extra_kwargs(
field_kwargs, extra_field_kwargs
)
# Create the serializer field.
fields[field_name] = field_class(**field_kwargs)
# Add in any hidden fields.
fields.update(hidden_fields)
return fields
def get_field_names(self, declared_fields, model_info):
field_names = super(DocumentSerializer, self).get_field_names(declared_fields, model_info)
# filter out child fields
return [fn for fn in field_names if '.child' not in fn]
def get_default_field_names(self, declared_fields, model_info):
return (
[model_info.pk.name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.references.keys()) +
list(model_info.embedded.keys())
)
def build_field(self, field_name, info, model_class, nested_depth, embedded_depth):
if field_name in info.fields_and_pk:
model_field = info.fields_and_pk[field_name]
if isinstance(model_field, COMPOUND_FIELD_TYPES):
child_name = field_name + '.child'
if child_name in info.fields or child_name in info.embedded or child_name in info.references:
child_class, child_kwargs = self.build_field(child_name, info, model_class, nested_depth, embedded_depth)
child_field = child_class(**child_kwargs)
else:
child_field = None
return self.build_compound_field(field_name, model_field, child_field)
else:
return self.build_standard_field(field_name, model_field)
if field_name in info.references:
relation_info = info.references[field_name]
if nested_depth and relation_info.related_model:
return self.build_nested_reference_field(field_name, relation_info, nested_depth)
else:
return self.build_reference_field(field_name, relation_info, nested_depth)
if field_name in info.embedded:
relation_info = info.embedded[field_name]
if not relation_info.related_model:
return self.build_generic_embedded_field(field_name, relation_info, embedded_depth)
if embedded_depth:
return self.build_nested_embedded_field(field_name, relation_info, embedded_depth)
else:
return self.build_bottom_embedded_field(field_name, relation_info, embedded_depth)
if hasattr(model_class, field_name):
return self.build_property_field(field_name, model_class)
return self.build_unknown_field(field_name, model_class)
def build_standard_field(self, field_name, model_field):
field_mapping = ClassLookupDict(self.serializer_field_mapping)
field_class = field_mapping[model_field]
field_kwargs = get_field_kwargs(field_name, model_field)
if 'choices' in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)
valid_kwargs = set((
'read_only', 'write_only',
'required', 'default', 'initial', 'source',
'label', 'help_text', 'style',
'error_messages', 'validators', 'allow_null', 'allow_blank',
'choices'
))
for key in list(field_kwargs.keys()):
if key not in valid_kwargs:
field_kwargs.pop(key)
if 'regex' in field_kwargs:
field_class = drf_fields.RegexField
if not issubclass(field_class, drfm_fields.DocumentField):
# `model_field` is only valid for the fallback case of
# `ModelField`, which is used when no other typed field
# matched to the model field.
field_kwargs.pop('model_field', None)
if not issubclass(field_class, drf_fields.CharField) and not issubclass(field_class, drf_fields.ChoiceField):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop('allow_blank', None)
if field_class is drf_fields.BooleanField and field_kwargs.get('allow_null', False):
field_kwargs.pop('allow_null', None)
field_kwargs.pop('default', None)
field_class = drf_fields.NullBooleanField
return field_class, field_kwargs
def build_compound_field(self, field_name, model_field, child_field):
if isinstance(model_field, me_fields.ListField):
field_class = drf_fields.ListField
elif isinstance(model_field, me_fields.DictField):
field_class = drfm_fields.DictField
else:
return self.build_unknown_field(field_name, model_field.owner_document)
field_kwargs = get_field_kwargs(field_name, model_field)
field_kwargs.pop('model_field', None)
if child_field is not None:
field_kwargs['child'] = child_field
return field_class, field_kwargs
def build_reference_field(self, field_name, relation_info, nested_depth):
if not relation_info.related_model:
field_class = self.serializer_reference_generic
field_kwargs = get_relation_kwargs(field_name, relation_info)
if not issubclass(field_class, drfm_fields.DocumentField):
field_kwargs.pop('model_field', None)
else:
field_class = self.serializer_reference_field
field_kwargs = get_relation_kwargs(field_name, relation_info)
return field_class, field_kwargs
def build_nested_reference_field(self, field_name, relation_info, nested_depth):
subclass = self.serializer_reference_nested or DocumentSerializer
class NestedSerializer(subclass):
class Meta:
model = relation_info.related_model
fields = '__all__'
depth = nested_depth - 1
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(field_name, relation_info)
return field_class, field_kwargs
def build_generic_embedded_field(self, field_name, relation_info, embedded_depth):
field_class = self.serializer_embedded_generic
field_kwargs = get_generic_embedded_kwargs(field_name, relation_info)
return field_class, field_kwargs
def build_nested_embedded_field(self, field_name, relation_info, embedded_depth):
subclass = self.serializer_embedded_nested or EmbeddedDocumentSerializer
class EmbeddedSerializer(subclass):
class Meta:
model = relation_info.related_model
fields = '__all__'
depth_embedding = embedded_depth - 1
field_class = EmbeddedSerializer
field_kwargs = get_nested_embedded_kwargs(field_name, relation_info)
return field_class, field_kwargs
def build_bottom_embedded_field(self, field_name, relation_info, embedded_depth):
field_class = self.serializer_embedded_bottom
field_kwargs = get_nested_embedded_kwargs(field_name, relation_info)
field_kwargs['default'] = None
return field_class, field_kwargs
def get_uniqueness_extra_kwargs(self, field_names, extra_kwargs):
# extra_kwargs contains 'default', 'required', 'validators=[UniqValidator]'
# hidden_fields contains fields involved in constraints, but missing in serializer fields
model = self.Meta.model
uniq_extra_kwargs = {}
hidden_fields = {}
field_names = set(field_names)
unique_fields = set()
unique_together_fields = set()
# include `unique_with` from model indexes
# so long as all the field names are included on the serializer.
uniq_indexes = filter(lambda i: i.get('unique', False), model._meta.get('index_specs', []))
for idx in uniq_indexes:
field_set = set(map(lambda e: e[0], idx['fields']))
if field_names.issuperset(field_set):
if len(field_set) == 1:
unique_fields |= field_set
else:
unique_together_fields |= field_set
for field_name in unique_fields:
uniq_extra_kwargs[field_name] = {
'required': True,
'validators': [UniqueValidator(queryset=model.objects)]
}
for field_name in unique_together_fields:
fld = model._fields[field_name]
if has_default(fld):
uniq_extra_kwargs[field_name] = {'default': fld.default}
else:
uniq_extra_kwargs[field_name] = {'required': True}
# Update `extra_kwargs` with any new options.
for key, value in uniq_extra_kwargs.items():
if key in extra_kwargs:
if key == 'validators' and key in extra_kwargs:
extra_kwargs[key].append(value)
extra_kwargs[key].update(value)
else:
extra_kwargs[key] = value
return extra_kwargs, hidden_fields
def get_unique_together_validators(self):
model = self.Meta.model
validators = []
field_names = set(self.get_field_names(self._declared_fields, self.field_info))
uniq_indexes = filter(lambda i: i.get('unique', False), model._meta.get('index_specs', []))
for idx in uniq_indexes:
if not idx.get('unique', False):
continue
field_set = tuple(map(lambda e: e[0], idx['fields']))
if len(field_set) > 1 and field_names.issuperset(set(field_set)):
validators.append(UniqueTogetherValidator(
queryset=model.objects,
fields=field_set
))
return validators
def get_unique_for_date_validators(self):
# not supported in mongo
return []
def __repr__(self):
return unicode_to_repr(serializer_repr(self, indent=1))
class EmbeddedDocumentSerializer(DocumentSerializer):
""" Serializer for EmbeddedDocuments.
Skips id field and uniqueness validation.
When saving, skips calling instance.save
"""
_saving_instances = False
def get_default_field_names(self, declared_fields, model_info):
# skip id field
return (
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.references.keys()) +
list(model_info.embedded.keys())
)
def get_unique_together_validators(self):
# skip the valaidators
return []
class DynamicDocumentSerializer(DocumentSerializer):
""" Serializer for DynamicDocuments.
Maps all undefined fields to :class:`fields.DynamicField`.
"""
def to_internal_value(self, data):
'''
Updates _validated_data with dynamic data, i.e. data,
not listed in fields.
'''
ret = super(DynamicDocumentSerializer, self).to_internal_value(data)
dynamic_data = self._get_dynamic_data(ret)
ret.update(dynamic_data)
return ret
def _get_dynamic_data(self, validated_data):
'''
Returns dict of data, not declared in serializer fields.
Should be called after self.is_valid().
'''
result = {}
for key in self.initial_data:
if key not in validated_data:
try:
field = self.fields[key]
# no exception? this is either SkipField or error
# in particular, this might be a read-only field
# that was mistakingly given a value
if not isinstance(field, drf_fields.SkipField):
msg = (
'Field %s is missing from validated data,'
'but is not a SkipField!'
) % key
raise AssertionError(msg)
except KeyError: # ok, this is dynamic data
result[key] = self.initial_data[key]
return result
def to_representation(self, instance):
ret = super(DynamicDocumentSerializer, self).to_representation(instance)
for field_name, field in self._map_dynamic_fields(instance).items():
ret[field_name] = field.to_representation(field.get_attribute(instance))
return ret
def _map_dynamic_fields(self, document):
dynamic_fields = {}
if document._dynamic:
for name, field in document._dynamic_fields.items():
dfield = drfm_fields.DynamicField(model_field=field, required=False)
dfield.bind(name, self)
dynamic_fields[name] = dfield
return dynamic_fields
| gpl-3.0 |
drayanaindra/shoop | doc/_ext/djangodocs.py | 321 | 12049 | """
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import __version__ as sphinx_ver, addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.compat import Directive
from sphinx.util.console import bold
from sphinx.util.nodes import set_source_info
from sphinx.writers.html import SmartyPantsHTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
self.verbatim = ''
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append('\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (fname,))
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
self.verbatim = None
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
| agpl-3.0 |
hsum/sqlalchemy | test/orm/test_cycles.py | 22 | 40070 | """Tests cyclical mapper relationships.
We might want to try an automated generate of much of this, all combos of
T1<->T2, with o2m or m2o between them, and a third T3 with o2m/m2o to one/both
T1/T2.
"""
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, sessionmaker
from sqlalchemy.testing import eq_
from sqlalchemy.testing.assertsql import RegexSQL, CompiledSQL, AllOf
from sqlalchemy.testing import fixtures
class SelfReferentialTest(fixtures.MappedTest):
"""A self-referential mapper with an additional list of child objects."""
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('c1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('parent_c1', Integer, ForeignKey('t1.c1')),
Column('data', String(20)))
Table('t2', metadata,
Column('c1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('c1id', Integer, ForeignKey('t1.c1')),
Column('data', String(20)))
@classmethod
def setup_classes(cls):
class C1(cls.Basic):
def __init__(self, data=None):
self.data = data
class C2(cls.Basic):
def __init__(self, data=None):
self.data = data
def test_single(self):
C1, t1 = self.classes.C1, self.tables.t1
mapper(C1, t1, properties = {
'c1s':relationship(C1, cascade="all"),
'parent':relationship(C1,
primaryjoin=t1.c.parent_c1 == t1.c.c1,
remote_side=t1.c.c1,
lazy='select',
uselist=False)})
a = C1('head c1')
a.c1s.append(C1('another c1'))
sess = create_session( )
sess.add(a)
sess.flush()
sess.delete(a)
sess.flush()
def test_many_to_one_only(self):
"""
test that the circular dependency sort can assemble a many-to-one
dependency processor when only the object on the "many" side is
actually in the list of modified objects.
"""
C1, t1 = self.classes.C1, self.tables.t1
mapper(C1, t1, properties={
'parent':relationship(C1,
primaryjoin=t1.c.parent_c1 == t1.c.c1,
remote_side=t1.c.c1)})
c1 = C1()
sess = create_session()
sess.add(c1)
sess.flush()
sess.expunge_all()
c1 = sess.query(C1).get(c1.c1)
c2 = C1()
c2.parent = c1
sess.add(c2)
sess.flush()
assert c2.parent_c1==c1.c1
def test_cycle(self):
C2, C1, t2, t1 = (self.classes.C2,
self.classes.C1,
self.tables.t2,
self.tables.t1)
mapper(C1, t1, properties = {
'c1s' : relationship(C1, cascade="all"),
'c2s' : relationship(mapper(C2, t2), cascade="all, delete-orphan")})
a = C1('head c1')
a.c1s.append(C1('child1'))
a.c1s.append(C1('child2'))
a.c1s[0].c1s.append(C1('subchild1'))
a.c1s[0].c1s.append(C1('subchild2'))
a.c1s[1].c2s.append(C2('child2 data1'))
a.c1s[1].c2s.append(C2('child2 data2'))
sess = create_session( )
sess.add(a)
sess.flush()
sess.delete(a)
sess.flush()
def test_setnull_ondelete(self):
C1, t1 = self.classes.C1, self.tables.t1
mapper(C1, t1, properties={
'children':relationship(C1)
})
sess = create_session()
c1 = C1()
c2 = C1()
c1.children.append(c2)
sess.add(c1)
sess.flush()
assert c2.parent_c1 == c1.c1
sess.delete(c1)
sess.flush()
assert c2.parent_c1 is None
sess.expire_all()
assert c2.parent_c1 is None
class SelfReferentialNoPKTest(fixtures.MappedTest):
"""A self-referential relationship that joins on a column other than the primary key column"""
@classmethod
def define_tables(cls, metadata):
Table('item', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('uuid', String(32), unique=True, nullable=False),
Column('parent_uuid', String(32), ForeignKey('item.uuid'),
nullable=True))
@classmethod
def setup_classes(cls):
class TT(cls.Basic):
def __init__(self):
self.uuid = hex(id(self))
@classmethod
def setup_mappers(cls):
item, TT = cls.tables.item, cls.classes.TT
mapper(TT, item, properties={
'children': relationship(
TT,
remote_side=[item.c.parent_uuid],
backref=backref('parent', remote_side=[item.c.uuid]))})
def test_basic(self):
TT = self.classes.TT
t1 = TT()
t1.children.append(TT())
t1.children.append(TT())
s = create_session()
s.add(t1)
s.flush()
s.expunge_all()
t = s.query(TT).filter_by(id=t1.id).one()
eq_(t.children[0].parent_uuid, t1.uuid)
def test_lazy_clause(self):
TT = self.classes.TT
s = create_session()
t1 = TT()
t2 = TT()
t1.children.append(t2)
s.add(t1)
s.flush()
s.expunge_all()
t = s.query(TT).filter_by(id=t2.id).one()
eq_(t.uuid, t2.uuid)
eq_(t.parent.uuid, t1.uuid)
class InheritTestOne(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("parent", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("parent_data", String(50)),
Column("type", String(10)))
Table("child1", metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
Column("child1_data", String(50)))
Table("child2", metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
Column("child1_id", Integer, ForeignKey("child1.id"),
nullable=False),
Column("child2_data", String(50)))
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1, child2, parent, Parent, Child1, Child2 = (cls.tables.child1,
cls.tables.child2,
cls.tables.parent,
cls.classes.Parent,
cls.classes.Child1,
cls.classes.Child2)
mapper(Parent, parent)
mapper(Child1, child1, inherits=Parent)
mapper(Child2, child2, inherits=Parent, properties=dict(
child1=relationship(Child1,
primaryjoin=child2.c.child1_id == child1.c.id)))
def test_many_to_one_only(self):
"""test similar to SelfReferentialTest.testmanytooneonly"""
Child1, Child2 = self.classes.Child1, self.classes.Child2
session = create_session()
c1 = Child1()
c1.child1_data = "qwerty"
session.add(c1)
session.flush()
session.expunge_all()
c1 = session.query(Child1).filter_by(child1_data="qwerty").one()
c2 = Child2()
c2.child1 = c1
c2.child2_data = "asdfgh"
session.add(c2)
# the flush will fail if the UOW does not set up a many-to-one DP
# attached to a task corresponding to c1, since "child1_id" is not
# nullable
session.flush()
class InheritTestTwo(fixtures.MappedTest):
"""
The fix in BiDirectionalManyToOneTest raised this issue, regarding the
'circular sort' containing UOWTasks that were still polymorphic, which
could create duplicate entries in the final sort
"""
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('cid', Integer, ForeignKey('c.id')))
Table('b', metadata,
Column('id', Integer, ForeignKey("a.id"), primary_key=True),
)
Table('c', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('aid', Integer,
ForeignKey('a.id', name="foo")))
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(cls.Basic):
pass
def test_flush(self):
a, A, c, b, C, B = (self.tables.a,
self.classes.A,
self.tables.c,
self.tables.b,
self.classes.C,
self.classes.B)
mapper(A, a, properties={
'cs':relationship(C, primaryjoin=a.c.cid==c.c.id)})
mapper(B, b, inherits=A, inherit_condition=b.c.id == a.c.id)
mapper(C, c, properties={
'arel':relationship(A, primaryjoin=a.c.id == c.c.aid)})
sess = create_session()
bobj = B()
sess.add(bobj)
cobj = C()
sess.add(cobj)
sess.flush()
class BiDirectionalManyToOneTest(fixtures.MappedTest):
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
Column('t2id', Integer, ForeignKey('t2.id')))
Table('t2', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
Column('t1id', Integer,
ForeignKey('t1.id', name="foo_fk")))
Table('t3', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
Column('t1id', Integer, ForeignKey('t1.id'), nullable=False),
Column('t2id', Integer, ForeignKey('t2.id'), nullable=False))
@classmethod
def setup_classes(cls):
class T1(cls.Basic):
pass
class T2(cls.Basic):
pass
class T3(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
t2, T2, T3, t1, t3, T1 = (cls.tables.t2,
cls.classes.T2,
cls.classes.T3,
cls.tables.t1,
cls.tables.t3,
cls.classes.T1)
mapper(T1, t1, properties={
't2':relationship(T2, primaryjoin=t1.c.t2id == t2.c.id)})
mapper(T2, t2, properties={
't1':relationship(T1, primaryjoin=t2.c.t1id == t1.c.id)})
mapper(T3, t3, properties={
't1':relationship(T1),
't2':relationship(T2)})
def test_reflush(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
o1 = T1()
o1.t2 = T2()
sess = create_session()
sess.add(o1)
sess.flush()
# the bug here is that the dependency sort comes up with T1/T2 in a
# cycle, but there are no T1/T2 objects to be saved. therefore no
# "cyclical subtree" gets generated, and one or the other of T1/T2
# gets lost, and processors on T3 don't fire off. the test will then
# fail because the FK's on T3 are not nullable.
o3 = T3()
o3.t1 = o1
o3.t2 = o1.t2
sess.add(o3)
sess.flush()
def test_reflush_2(self):
"""A variant on test_reflush()"""
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
o1 = T1()
o1.t2 = T2()
sess = create_session()
sess.add(o1)
sess.flush()
# in this case, T1, T2, and T3 tasks will all be in the cyclical
# tree normally. the dependency processors for T3 are part of the
# 'extradeps' collection so they all get assembled into the tree
# as well.
o1a = T1()
o2a = T2()
sess.add(o1a)
sess.add(o2a)
o3b = T3()
o3b.t1 = o1a
o3b.t2 = o2a
sess.add(o3b)
o3 = T3()
o3.t1 = o1
o3.t2 = o1.t2
sess.add(o3)
sess.flush()
class BiDirectionalOneToManyTest(fixtures.MappedTest):
"""tests two mappers with a one-to-many relationship to each other."""
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('c1', Integer, primary_key=True, test_needs_autoincrement=True),
Column('c2', Integer, ForeignKey('t2.c1')))
Table('t2', metadata,
Column('c1', Integer, primary_key=True, test_needs_autoincrement=True),
Column('c2', Integer,
ForeignKey('t1.c1', name='t1c1_fk')))
@classmethod
def setup_classes(cls):
class C1(cls.Basic):
pass
class C2(cls.Basic):
pass
def test_cycle(self):
C2, C1, t2, t1 = (self.classes.C2,
self.classes.C1,
self.tables.t2,
self.tables.t1)
mapper(C2, t2, properties={
'c1s': relationship(C1,
primaryjoin=t2.c.c1 == t1.c.c2,
uselist=True)})
mapper(C1, t1, properties={
'c2s': relationship(C2,
primaryjoin=t1.c.c1 == t2.c.c2,
uselist=True)})
a = C1()
b = C2()
c = C1()
d = C2()
e = C2()
f = C2()
a.c2s.append(b)
d.c1s.append(c)
b.c1s.append(c)
sess = create_session()
sess.add_all((a, b, c, d, e, f))
sess.flush()
class BiDirectionalOneToManyTest2(fixtures.MappedTest):
"""Two mappers with a one-to-many relationship to each other,
with a second one-to-many on one of the mappers"""
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('c1', Integer, primary_key=True, test_needs_autoincrement=True),
Column('c2', Integer, ForeignKey('t2.c1')),
test_needs_autoincrement=True)
Table('t2', metadata,
Column('c1', Integer, primary_key=True, test_needs_autoincrement=True),
Column('c2', Integer,
ForeignKey('t1.c1', name='t1c1_fq')),
test_needs_autoincrement=True)
Table('t1_data', metadata,
Column('c1', Integer, primary_key=True, test_needs_autoincrement=True),
Column('t1id', Integer, ForeignKey('t1.c1')),
Column('data', String(20)),
test_needs_autoincrement=True)
@classmethod
def setup_classes(cls):
class C1(cls.Basic):
pass
class C2(cls.Basic):
pass
class C1Data(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
t2, t1, C1Data, t1_data, C2, C1 = (cls.tables.t2,
cls.tables.t1,
cls.classes.C1Data,
cls.tables.t1_data,
cls.classes.C2,
cls.classes.C1)
mapper(C2, t2, properties={
'c1s': relationship(C1,
primaryjoin=t2.c.c1 == t1.c.c2,
uselist=True)})
mapper(C1, t1, properties={
'c2s': relationship(C2,
primaryjoin=t1.c.c1 == t2.c.c2,
uselist=True),
'data': relationship(mapper(C1Data, t1_data))})
def test_cycle(self):
C2, C1, C1Data = (self.classes.C2,
self.classes.C1,
self.classes.C1Data)
a = C1()
b = C2()
c = C1()
d = C2()
e = C2()
f = C2()
a.c2s.append(b)
d.c1s.append(c)
b.c1s.append(c)
a.data.append(C1Data(data='c1data1'))
a.data.append(C1Data(data='c1data2'))
c.data.append(C1Data(data='c1data3'))
sess = create_session()
sess.add_all((a, b, c, d, e, f))
sess.flush()
sess.delete(d)
sess.delete(c)
sess.flush()
class OneToManyManyToOneTest(fixtures.MappedTest):
"""
Tests two mappers, one has a one-to-many on the other mapper, the other
has a separate many-to-one relationship to the first. two tests will have
a row for each item that is dependent on the other. without the
"post_update" flag, such relationships raise an exception when
dependencies are sorted.
"""
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table('ball', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('person_id', Integer,
ForeignKey('person.id', name='fk_person_id')),
Column('data', String(30)))
Table('person', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('favorite_ball_id', Integer, ForeignKey('ball.id')),
Column('data', String(30)))
@classmethod
def setup_classes(cls):
class Person(cls.Basic):
pass
class Ball(cls.Basic):
pass
def test_cycle(self):
"""
This test has a peculiar aspect in that it doesn't create as many
dependent relationships as the other tests, and revealed a small
glitch in the circular dependency sorting.
"""
person, ball, Ball, Person = (self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person)
mapper(Ball, ball)
mapper(Person, person, properties=dict(
balls=relationship(Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id),
favorite=relationship(Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=ball.c.id)))
b = Ball()
p = Person()
p.balls.append(b)
sess = create_session()
sess.add(p)
sess.flush()
def test_post_update_m2o(self):
"""A cycle between two rows, with a post_update on the many-to-one"""
person, ball, Ball, Person = (self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person)
mapper(Ball, ball)
mapper(Person, person, properties=dict(
balls=relationship(Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id,
post_update=False,
cascade="all, delete-orphan"),
favorite=relationship(Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=person.c.favorite_ball_id,
post_update=True)))
b = Ball(data='some data')
p = Person(data='some data')
p.balls.append(b)
p.balls.append(Ball(data='some data'))
p.balls.append(Ball(data='some data'))
p.balls.append(Ball(data='some data'))
p.favorite = b
sess = create_session()
sess.add(b)
sess.add(p)
self.assert_sql_execution(
testing.db,
sess.flush,
RegexSQL("^INSERT INTO person", {'data':'some data'}),
RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}),
RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}),
RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}),
RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}),
CompiledSQL("UPDATE person SET favorite_ball_id=:favorite_ball_id "
"WHERE person.id = :person_id",
lambda ctx:{'favorite_ball_id':p.favorite.id, 'person_id':p.id}
),
)
sess.delete(p)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL("UPDATE person SET favorite_ball_id=:favorite_ball_id "
"WHERE person.id = :person_id",
lambda ctx: {'person_id': p.id, 'favorite_ball_id': None}),
CompiledSQL("DELETE FROM ball WHERE ball.id = :id", None), # lambda ctx:[{'id': 1L}, {'id': 4L}, {'id': 3L}, {'id': 2L}])
CompiledSQL("DELETE FROM person WHERE person.id = :id", lambda ctx:[{'id': p.id}])
)
def test_post_update_backref(self):
"""test bidirectional post_update."""
person, ball, Ball, Person = (self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person)
mapper(Ball, ball)
mapper(Person, person, properties=dict(
balls=relationship(Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id, post_update=True,
backref=backref('person', post_update=True)
),
favorite=relationship(Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=person.c.favorite_ball_id)
))
sess = sessionmaker()()
p1 = Person(data='p1')
p2 = Person(data='p2')
p3 = Person(data='p3')
b1 = Ball(data='b1')
b1.person = p1
sess.add_all([p1, p2, p3])
sess.commit()
# switch here. the post_update
# on ball.person can't get tripped up
# by the fact that there's a "reverse" prop.
b1.person = p2
sess.commit()
eq_(
p2, b1.person
)
# do it the other way
p3.balls.append(b1)
sess.commit()
eq_(
p3, b1.person
)
def test_post_update_o2m(self):
"""A cycle between two rows, with a post_update on the one-to-many"""
person, ball, Ball, Person = (self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person)
mapper(Ball, ball)
mapper(Person, person, properties=dict(
balls=relationship(Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id,
cascade="all, delete-orphan",
post_update=True,
backref='person'),
favorite=relationship(Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=person.c.favorite_ball_id)))
b = Ball(data='some data')
p = Person(data='some data')
p.balls.append(b)
b2 = Ball(data='some data')
p.balls.append(b2)
b3 = Ball(data='some data')
p.balls.append(b3)
b4 = Ball(data='some data')
p.balls.append(b4)
p.favorite = b
sess = create_session()
sess.add_all((b,p,b2,b3,b4))
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL("INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{'person_id':None, 'data':'some data'}),
CompiledSQL("INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{'person_id':None, 'data':'some data'}),
CompiledSQL("INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{'person_id':None, 'data':'some data'}),
CompiledSQL("INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{'person_id':None, 'data':'some data'}),
CompiledSQL("INSERT INTO person (favorite_ball_id, data) "
"VALUES (:favorite_ball_id, :data)",
lambda ctx:{'favorite_ball_id':b.id, 'data':'some data'}),
CompiledSQL("UPDATE ball SET person_id=:person_id "
"WHERE ball.id = :ball_id",
lambda ctx:[
{'person_id':p.id,'ball_id':b.id},
{'person_id':p.id,'ball_id':b2.id},
{'person_id':p.id,'ball_id':b3.id},
{'person_id':p.id,'ball_id':b4.id}
]
),
)
sess.delete(p)
self.assert_sql_execution(testing.db, sess.flush,
CompiledSQL("UPDATE ball SET person_id=:person_id "
"WHERE ball.id = :ball_id",
lambda ctx:[
{'person_id': None, 'ball_id': b.id},
{'person_id': None, 'ball_id': b2.id},
{'person_id': None, 'ball_id': b3.id},
{'person_id': None, 'ball_id': b4.id}
]
),
CompiledSQL("DELETE FROM person WHERE person.id = :id",
lambda ctx:[{'id':p.id}]),
CompiledSQL("DELETE FROM ball WHERE ball.id = :id",
lambda ctx:[{'id': b.id},
{'id': b2.id},
{'id': b3.id},
{'id': b4.id}])
)
class SelfReferentialPostUpdateTest(fixtures.MappedTest):
"""Post_update on a single self-referential mapper.
"""
@classmethod
def define_tables(cls, metadata):
Table('node', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('path', String(50), nullable=False),
Column('parent_id', Integer,
ForeignKey('node.id'), nullable=True),
Column('prev_sibling_id', Integer,
ForeignKey('node.id'), nullable=True),
Column('next_sibling_id', Integer,
ForeignKey('node.id'), nullable=True))
@classmethod
def setup_classes(cls):
class Node(cls.Basic):
def __init__(self, path=''):
self.path = path
def test_one(self):
"""Post_update only fires off when needed.
This test case used to produce many superfluous update statements,
particularly upon delete
"""
node, Node = self.tables.node, self.classes.Node
mapper(Node, node, properties={
'children': relationship(
Node,
primaryjoin=node.c.id==node.c.parent_id,
cascade="all",
backref=backref("parent", remote_side=node.c.id)
),
'prev_sibling': relationship(
Node,
primaryjoin=node.c.prev_sibling_id==node.c.id,
remote_side=node.c.id,
uselist=False),
'next_sibling': relationship(
Node,
primaryjoin=node.c.next_sibling_id==node.c.id,
remote_side=node.c.id,
uselist=False,
post_update=True)})
session = create_session()
def append_child(parent, child):
if parent.children:
parent.children[-1].next_sibling = child
child.prev_sibling = parent.children[-1]
parent.children.append(child)
def remove_child(parent, child):
child.parent = None
node = child.next_sibling
node.prev_sibling = child.prev_sibling
child.prev_sibling.next_sibling = node
session.delete(child)
root = Node('root')
about = Node('about')
cats = Node('cats')
stories = Node('stories')
bruce = Node('bruce')
append_child(root, about)
assert(about.prev_sibling is None)
append_child(root, cats)
assert(cats.prev_sibling is about)
assert(cats.next_sibling is None)
assert(about.next_sibling is cats)
assert(about.prev_sibling is None)
append_child(root, stories)
append_child(root, bruce)
session.add(root)
session.flush()
remove_child(root, cats)
# pre-trigger lazy loader on 'cats' to make the test easier
cats.children
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
CompiledSQL("UPDATE node SET prev_sibling_id=:prev_sibling_id "
"WHERE node.id = :node_id",
lambda ctx:{'prev_sibling_id':about.id, 'node_id':stories.id}),
CompiledSQL("UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx:{'next_sibling_id':stories.id, 'node_id':about.id}),
CompiledSQL("UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx:{'next_sibling_id':None, 'node_id':cats.id}),
),
CompiledSQL("DELETE FROM node WHERE node.id = :id",
lambda ctx:[{'id':cats.id}])
)
session.delete(root)
self.assert_sql_execution(
testing.db,
session.flush,
CompiledSQL("UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: [
{'node_id': about.id, 'next_sibling_id': None},
{'node_id': stories.id, 'next_sibling_id': None}
]
),
AllOf(
CompiledSQL("DELETE FROM node WHERE node.id = :id",
lambda ctx:{'id':about.id}
),
CompiledSQL("DELETE FROM node WHERE node.id = :id",
lambda ctx:{'id':stories.id}
),
CompiledSQL("DELETE FROM node WHERE node.id = :id",
lambda ctx:{'id':bruce.id}
),
),
CompiledSQL("DELETE FROM node WHERE node.id = :id",
lambda ctx:{'id':root.id}
),
)
about = Node('about')
cats = Node('cats')
about.next_sibling = cats
cats.prev_sibling = about
session.add(about)
session.flush()
session.delete(about)
cats.prev_sibling = None
session.flush()
class SelfReferentialPostUpdateTest2(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("a_table", metadata,
Column("id", Integer(), primary_key=True, test_needs_autoincrement=True),
Column("fui", String(128)),
Column("b", Integer(), ForeignKey("a_table.id")))
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
def test_one(self):
"""
Test that post_update remembers to be involved in update operations as
well, since it replaces the normal dependency processing completely
[ticket:413]
"""
A, a_table = self.classes.A, self.tables.a_table
mapper(A, a_table, properties={
'foo': relationship(A,
remote_side=[a_table.c.id],
post_update=True)})
session = create_session()
f1 = A(fui="f1")
session.add(f1)
session.flush()
f2 = A(fui="f2", foo=f1)
# at this point f1 is already inserted. but we need post_update
# to fire off anyway
session.add(f2)
session.flush()
session.expunge_all()
f1 = session.query(A).get(f1.id)
f2 = session.query(A).get(f2.id)
assert f2.foo is f1
class SelfReferentialPostUpdateTest3(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50), nullable=False),
Column('child_id', Integer,
ForeignKey('child.id', name='c1'), nullable=True))
Table('child', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50), nullable=False),
Column('child_id', Integer,
ForeignKey('child.id')),
Column('parent_id', Integer,
ForeignKey('parent.id'), nullable=True))
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
def __init__(self, name=''):
self.name = name
class Child(cls.Basic):
def __init__(self, name=''):
self.name = name
def test_one(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent, properties={
'children':relationship(Child, primaryjoin=parent.c.id==child.c.parent_id),
'child':relationship(Child, primaryjoin=parent.c.child_id==child.c.id, post_update=True)
})
mapper(Child, child, properties={
'parent':relationship(Child, remote_side=child.c.id)
})
session = create_session()
p1 = Parent('p1')
c1 = Child('c1')
c2 = Child('c2')
p1.children =[c1, c2]
c2.parent = c1
p1.child = c2
session.add_all([p1, c1, c2])
session.flush()
p2 = Parent('p2')
c3 = Child('c3')
p2.children = [c3]
p2.child = c3
session.add(p2)
session.delete(c2)
p1.children.remove(c2)
p1.child = None
session.flush()
p2.child = None
session.flush()
class PostUpdateBatchingTest(fixtures.MappedTest):
"""test that lots of post update cols batch together into a single UPDATE."""
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50), nullable=False),
Column('c1_id', Integer,
ForeignKey('child1.id', name='c1'), nullable=True),
Column('c2_id', Integer,
ForeignKey('child2.id', name='c2'), nullable=True),
Column('c3_id', Integer,
ForeignKey('child3.id', name='c3'), nullable=True)
)
Table('child1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50), nullable=False),
Column('parent_id', Integer,
ForeignKey('parent.id'), nullable=False))
Table('child2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50), nullable=False),
Column('parent_id', Integer,
ForeignKey('parent.id'), nullable=False))
Table('child3', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50), nullable=False),
Column('parent_id', Integer,
ForeignKey('parent.id'), nullable=False))
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
def __init__(self, name=''):
self.name = name
class Child1(cls.Basic):
def __init__(self, name=''):
self.name = name
class Child2(cls.Basic):
def __init__(self, name=''):
self.name = name
class Child3(cls.Basic):
def __init__(self, name=''):
self.name = name
def test_one(self):
child1, child2, child3, Parent, parent, Child1, Child2, Child3 = (self.tables.child1,
self.tables.child2,
self.tables.child3,
self.classes.Parent,
self.tables.parent,
self.classes.Child1,
self.classes.Child2,
self.classes.Child3)
mapper(Parent, parent, properties={
'c1s':relationship(Child1, primaryjoin=child1.c.parent_id==parent.c.id),
'c2s':relationship(Child2, primaryjoin=child2.c.parent_id==parent.c.id),
'c3s':relationship(Child3, primaryjoin=child3.c.parent_id==parent.c.id),
'c1':relationship(Child1, primaryjoin=child1.c.id==parent.c.c1_id, post_update=True),
'c2':relationship(Child2, primaryjoin=child2.c.id==parent.c.c2_id, post_update=True),
'c3':relationship(Child3, primaryjoin=child3.c.id==parent.c.c3_id, post_update=True),
})
mapper(Child1, child1)
mapper(Child2, child2)
mapper(Child3, child3)
sess = create_session()
p1 = Parent('p1')
c11, c12, c13 = Child1('c1'), Child1('c2'), Child1('c3')
c21, c22, c23 = Child2('c1'), Child2('c2'), Child2('c3')
c31, c32, c33 = Child3('c1'), Child3('c2'), Child3('c3')
p1.c1s = [c11, c12, c13]
p1.c2s = [c21, c22, c23]
p1.c3s = [c31, c32, c33]
sess.add(p1)
sess.flush()
p1.c1 = c12
p1.c2 = c23
p1.c3 = c31
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, "
"c3_id=:c3_id WHERE parent.id = :parent_id",
lambda ctx: {'c2_id': c23.id, 'parent_id': p1.id, 'c1_id': c12.id, 'c3_id': c31.id}
)
)
p1.c1 = p1.c2 = p1.c3 = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, "
"c3_id=:c3_id WHERE parent.id = :parent_id",
lambda ctx: {'c2_id': None, 'parent_id': p1.id, 'c1_id': None, 'c3_id': None}
)
)
| mit |
ishank08/scikit-learn | sklearn/ensemble/bagging.py | 23 | 38399 | """Bagging meta-estimator."""
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
from __future__ import division
import itertools
import numbers
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..metrics import r2_score, accuracy_score
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, check_X_y, check_array, column_or_1d
from ..utils.random import sample_without_replacement
from ..utils.validation import has_fit_parameter, check_is_fitted
from ..utils import indices_to_mask, check_consistent_length
from ..utils.fixes import bincount
from ..utils.metaestimators import if_delegate_has_method
from ..utils.multiclass import check_classification_targets
from .base import BaseEnsemble, _partition_estimators
__all__ = ["BaggingClassifier",
"BaggingRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _generate_indices(random_state, bootstrap, n_population, n_samples):
"""Draw randomly sampled indices."""
# Draw sample indices
if bootstrap:
indices = random_state.randint(0, n_population, n_samples)
else:
indices = sample_without_replacement(n_population, n_samples,
random_state=random_state)
return indices
def _generate_bagging_indices(random_state, bootstrap_features,
bootstrap_samples, n_features, n_samples,
max_features, max_samples):
"""Randomly draw feature and sample indices."""
# Get valid random state
random_state = check_random_state(random_state)
# Draw indices
feature_indices = _generate_indices(random_state, bootstrap_features,
n_features, max_features)
sample_indices = _generate_indices(random_state, bootstrap_samples,
n_samples, max_samples)
return feature_indices, sample_indices
def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,
seeds, total_n_estimators, verbose):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble._max_features
max_samples = ensemble._max_samples
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
support_sample_weight = has_fit_parameter(ensemble.base_estimator_,
"sample_weight")
if not support_sample_weight and sample_weight is not None:
raise ValueError("The base estimator doesn't support sample weight")
# Build estimators
estimators = []
estimators_features = []
for i in range(n_estimators):
if verbose > 1:
print("Building estimator %d of %d for this parallel run "
"(total %d)..." % (i + 1, n_estimators, total_n_estimators))
random_state = np.random.RandomState(seeds[i])
estimator = ensemble._make_estimator(append=False,
random_state=random_state)
# Draw random feature, sample indices
features, indices = _generate_bagging_indices(random_state,
bootstrap_features,
bootstrap, n_features,
n_samples, max_features,
max_samples)
# Draw samples, using sample weights, and then fit
if support_sample_weight:
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
if bootstrap:
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
else:
not_indices_mask = ~indices_to_mask(indices, n_samples)
curr_sample_weight[not_indices_mask] = 0
estimator.fit(X[:, features], y, sample_weight=curr_sample_weight)
# Draw samples, using a mask, and then fit
else:
estimator.fit((X[indices])[:, features], y[indices])
estimators.append(estimator)
estimators_features.append(features)
return estimators, estimators_features
def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(X[:, features])
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += \
proba_estimator[:, range(len(estimator.classes_))]
else:
# Resort to voting
predictions = estimator.predict(X[:, features])
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=np.int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))])
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
-np.inf)
return log_proba
def _parallel_decision_function(estimators, estimators_features, X):
"""Private function used to compute decisions within a job."""
return sum(estimator.decision_function(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
def _parallel_predict_regression(estimators, estimators_features, X):
"""Private function used to compute predictions within a job."""
return sum(estimator.predict(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
class BaseBagging(with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseBagging, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Returns self.
"""
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, optional (default=None)
Argument to use instead of self.max_samples.
max_depth : int, optional (default=None)
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Convert data
X, y = check_X_y(X, y, ['csr', 'csc'])
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# Remap output
n_samples, self.n_features_ = X.shape
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator()
if max_depth is not None:
self.base_estimator_.max_depth = max_depth
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
elif not isinstance(max_samples, (numbers.Integral, np.integer)):
max_samples = int(max_samples * X.shape[0])
if not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
# Store validated integer feature sampling value
self._max_features = max_features
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available"
" if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, 'estimators_'):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators,
self.n_jobs)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
seeds[starts[i]:starts[i + 1]],
total_n_estimators,
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ += list(itertools.chain.from_iterable(
t[0] for t in all_results))
self.estimators_features_ += list(itertools.chain.from_iterable(
t[1] for t in all_results))
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
# Default implementation
return column_or_1d(y, warn=True)
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for seed in self._seeds:
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_estimators()`
random_state = np.random.RandomState(seed)
feature_indices, sample_indices = _generate_bagging_indices(
random_state, self.bootstrap_features, self.bootstrap,
self.n_features_, self._n_samples, self._max_features,
self._max_samples)
yield feature_indices, sample_indices
@property
def estimators_samples_(self):
"""The subset of drawn samples for each base estimator.
Returns a dynamically generated list of boolean masks identifying
the samples used for for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
sample_masks = []
for _, sample_indices in self._get_estimators_indices():
mask = indices_to_mask(sample_indices, self._n_samples)
sample_masks.append(mask)
return sample_masks
class BaggingClassifier(BaseBagging, ClassifierMixin):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by a boolean mask.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingClassifier, self)._validate_estimator(
default=DecisionTreeClassifier())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
n_classes_ = self.n_classes_
classes_ = self.classes_
predictions = np.zeros((n_samples, n_classes_))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
# Create mask for OOB samples
mask = ~samples
if hasattr(estimator, "predict_proba"):
predictions[mask, :] += estimator.predict_proba(
(X[mask, :])[:, features])
else:
p = estimator.predict((X[mask, :])[:, features])
j = 0
for i in range(n_samples):
if mask[i]:
predictions[i, p[j]] += 1
j += 1
if (predictions.sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
oob_decision_function = (predictions /
predictions.sum(axis=1)[:, np.newaxis])
oob_score = accuracy_score(y, classes_.take(np.argmax(predictions,
axis=1)))
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score
def _validate_y(self, y):
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
predicted_probabilitiy = self.predict_proba(X)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)),
axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
if hasattr(self.base_estimator_, "predict_log_proba"):
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} "
"and input n_features is {1} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
return log_proba
else:
return np.log(self.predict_proba(X))
@if_delegate_has_method(delegate='base_estimator')
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {1} and "
"input n_features is {2} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
class BaggingRegressor(BaseBagging, RegressorMixin):
"""A Bagging regressor.
A Bagging regressor is an ensemble meta-estimator that fits base
regressors each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
estimators_ : list of estimators
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by a boolean mask.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_prediction_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
check_is_fitted(self, "estimators_features_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingRegressor, self)._validate_estimator(
default=DecisionTreeRegressor())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples,))
n_predictions = np.zeros((n_samples,))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
# Create mask for OOB samples
mask = ~samples
predictions[mask] += estimator.predict((X[mask, :])[:, features])
n_predictions[mask] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
self.oob_score_ = r2_score(y, predictions)
| bsd-3-clause |
tima/ansible | test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py | 39 | 15021 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import pytest
import os
from . placebo_fixtures import placeboify, maybe_sleep
from ansible.modules.cloud.amazon import ec2_vpc_vpn
from ansible.module_utils._text import to_text
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, boto3_tag_list_to_ansible_dict
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
def get_vgw(connection):
# see if two vgw exist and return them if so
vgw = connection.describe_vpn_gateways(Filters=[{'Name': 'tag:Ansible_VPN', 'Values': ['Test']}])
if len(vgw['VpnGateways']) >= 2:
return [vgw['VpnGateways'][0]['VpnGatewayId'], vgw['VpnGateways'][1]['VpnGatewayId']]
# otherwise create two and return them
vgw_1 = connection.create_vpn_gateway(Type='ipsec.1')
vgw_2 = connection.create_vpn_gateway(Type='ipsec.1')
for resource in (vgw_1, vgw_2):
connection.create_tags(Resources=[resource['VpnGateway']['VpnGatewayId']], Tags=[{'Key': 'Ansible_VPN', 'Value': 'Test'}])
return [vgw_1['VpnGateway']['VpnGatewayId'], vgw_2['VpnGateway']['VpnGatewayId']]
def get_cgw(connection):
# see if two cgw exist and return them if so
cgw = connection.describe_customer_gateways(DryRun=False, Filters=[{'Name': 'state', 'Values': ['available']},
{'Name': 'tag:Name', 'Values': ['Ansible-CGW']}])
if len(cgw['CustomerGateways']) >= 2:
return [cgw['CustomerGateways'][0]['CustomerGatewayId'], cgw['CustomerGateways'][1]['CustomerGatewayId']]
# otherwise create and return them
cgw_1 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='9.8.7.6', BgpAsn=65000)
cgw_2 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='5.4.3.2', BgpAsn=65000)
for resource in (cgw_1, cgw_2):
connection.create_tags(Resources=[resource['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Ansible-CGW', 'Value': 'Test'}])
return [cgw_1['CustomerGateway']['CustomerGatewayId'], cgw_2['CustomerGateway']['CustomerGatewayId']]
def get_dependencies():
if os.getenv('PLACEBO_RECORD'):
module = FakeModule(**{})
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
vgw = get_vgw(connection)
cgw = get_cgw(connection)
else:
vgw = ["vgw-35d70c2b", "vgw-32d70c2c"]
cgw = ["cgw-6113c87f", "cgw-9e13c880"]
return cgw, vgw
def setup_mod_conn(placeboify, params):
conn = placeboify.client('ec2')
m = FakeModule(**params)
return m, conn
def make_params(cgw, vgw, tags=None, filters=None, routes=None):
tags = {} if tags is None else tags
filters = {} if filters is None else filters
routes = [] if routes is None else routes
return {'customer_gateway_id': cgw,
'static_only': True,
'vpn_gateway_id': vgw,
'connection_type': 'ipsec.1',
'purge_tags': True,
'tags': tags,
'filters': filters,
'routes': routes}
def make_conn(placeboify, module, connection):
customer_gateway_id = module.params['customer_gateway_id']
static_only = module.params['static_only']
vpn_gateway_id = module.params['vpn_gateway_id']
connection_type = module.params['connection_type']
check_mode = module.params['check_mode']
changed = True
vpn = ec2_vpc_vpn.create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type)
return changed, vpn
def tear_down_conn(placeboify, connection, vpn_connection_id):
ec2_vpc_vpn.delete_connection(connection, vpn_connection_id)
def test_find_connection_vpc_conn_id(placeboify, maybe_sleep):
# setup dependencies for 2 vpn connections
dependencies = setup_req(placeboify, 2)
dep1, dep2 = dependencies[0], dependencies[1]
params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
# find the connection with a vpn_connection_id and assert it is the expected one
assert vpn1['VpnConnectionId'] == ec2_vpc_vpn.find_connection(conn1, params1, vpn1['VpnConnectionId'])['VpnConnectionId']
tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
def test_find_connection_filters(placeboify, maybe_sleep):
# setup dependencies for 2 vpn connections
dependencies = setup_req(placeboify, 2)
dep1, dep2 = dependencies[0], dependencies[1]
params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
# update to different tags
params1.update(tags={'Wrong': 'Tag'})
params2.update(tags={'Correct': 'Tag'})
ec2_vpc_vpn.ensure_present(conn1, params1)
ec2_vpc_vpn.ensure_present(conn2, params2)
# create some new parameters for a filter
params = {'filters': {'tags': {'Correct': 'Tag'}}}
# find the connection that has the parameters above
found = ec2_vpc_vpn.find_connection(conn1, params)
# assert the correct connection was found
assert found['VpnConnectionId'] == vpn2['VpnConnectionId']
# delete the connections
tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
def test_find_connection_insufficient_filters(placeboify, maybe_sleep):
# get list of customer gateways and virtual private gateways
cgw, vgw = get_dependencies()
# create two connections with the same tags
params = make_params(cgw[0], vgw[0], tags={'Correct': 'Tag'})
params2 = make_params(cgw[1], vgw[1], tags={'Correct': 'Tag'})
m, conn = setup_mod_conn(placeboify, params)
m2, conn2 = setup_mod_conn(placeboify, params2)
_, vpn1 = ec2_vpc_vpn.ensure_present(conn, m.params)
_, vpn2 = ec2_vpc_vpn.ensure_present(conn2, m2.params)
# reset the parameters so only filtering by tags will occur
m.params = {'filters': {'tags': {'Correct': 'Tag'}}}
# assert that multiple matching connections have been found
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.find_connection(conn, m.params)
assert error_message == "More than one matching VPN connection was found.To modify or delete a VPN please specify vpn_connection_id or add filters."
# delete the connections
tear_down_conn(placeboify, conn, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn, vpn2['VpnConnectionId'])
def test_find_connection_nonexistent(placeboify, maybe_sleep):
# create parameters but don't create a connection with them
params = {'filters': {'tags': {'Correct': 'Tag'}}}
m, conn = setup_mod_conn(placeboify, params)
# try to find a connection with matching parameters and assert None are found
assert ec2_vpc_vpn.find_connection(conn, m.params) is None
def test_create_connection(placeboify, maybe_sleep):
# get list of customer gateways and virtual private gateways
cgw, vgw = get_dependencies()
# create a connection
params = make_params(cgw[0], vgw[0])
m, conn = setup_mod_conn(placeboify, params)
changed, vpn = ec2_vpc_vpn.ensure_present(conn, m.params)
# assert that changed is true and that there is a connection id
assert changed is True
assert 'VpnConnectionId' in vpn
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_create_connection_that_exists(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# try to recreate the same connection
changed, vpn2 = ec2_vpc_vpn.ensure_present(conn, params)
# nothing should have changed
assert changed is False
assert vpn['VpnConnectionId'] == vpn2['VpnConnectionId']
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_modify_deleted_connection(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# delete it
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
# try to update the deleted connection
m.params.update(vpn_connection_id=vpn['VpnConnectionId'])
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.ensure_present(conn, m.params)
assert error_message == "There is no VPN connection available or pending with that id. Did you delete it?"
def test_delete_connection(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# delete it
changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
assert changed is True
assert vpn == {}
def test_delete_nonexistent_connection(placeboify, maybe_sleep):
# create parameters and ensure any connection matching (None) is deleted
params = {'filters': {'tags': {'ThisConnection': 'DoesntExist'}}}
m, conn = setup_mod_conn(placeboify, params)
changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
assert changed is False
assert vpn == {}
def test_check_for_update_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# add and remove a number of tags
m.params['tags'] = {'One': 'one', 'Two': 'two'}
ec2_vpc_vpn.ensure_present(conn, m.params)
m.params['tags'] = {'Two': 'two', 'Three': 'three', 'Four': 'four'}
changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId'])
flat_dict_changes = boto3_tag_list_to_ansible_dict(changes['tags_to_add'])
correct_changes = boto3_tag_list_to_ansible_dict([{'Key': 'Three', 'Value': 'three'}, {'Key': 'Four', 'Value': 'four'}])
assert flat_dict_changes == correct_changes
assert changes['tags_to_remove'] == ['One']
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
current_vgw = params['vpn_gateway_id']
# update a parameter that isn't modifiable
m.params.update(vpn_gateway_id="invalidchange")
err = 'You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are tags.'.format(current_vgw)
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.check_for_update(m, conn, vpn['VpnConnectionId'])
assert error_message == err
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_add_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# add a tag to the connection
ec2_vpc_vpn.add_tags(conn, vpn['VpnConnectionId'], add=[{'Key': 'Ansible-Test', 'Value': 'VPN'}])
# assert tag is there
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert current_vpn['Tags'] == [{'Key': 'Ansible-Test', 'Value': 'VPN'}]
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_remove_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# remove a tag from the connection
ec2_vpc_vpn.remove_tags(conn, vpn['VpnConnectionId'], remove=['Ansible-Test'])
# assert the tag is gone
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert 'Tags' not in current_vpn
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_add_routes(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# create connection with a route
ec2_vpc_vpn.add_routes(conn, vpn['VpnConnectionId'], ['195.168.2.0/24', '196.168.2.0/24'])
# assert both routes are there
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert set(each['DestinationCidrBlock'] for each in current_vpn['Routes']) == set(['195.168.2.0/24', '196.168.2.0/24'])
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def setup_req(placeboify, number_of_results=1):
''' returns dependencies for VPN connections '''
assert number_of_results in (1, 2)
results = []
cgw, vgw = get_dependencies()
for each in range(0, number_of_results):
params = make_params(cgw[each], vgw[each])
m, conn = setup_mod_conn(placeboify, params)
_, vpn = ec2_vpc_vpn.ensure_present(conn, params)
results.append({'module': m, 'connection': conn, 'vpn': vpn, 'params': params})
if number_of_results == 1:
return results[0]
else:
return results[0], results[1]
| gpl-3.0 |
GabrielBrascher/cloudstack | plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmVifModule.py | 8 | 1969 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
'''
from OvmCommonModule import *
class OvmVifDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vif = OvmVif()
vif.mac = deDict['mac']
vif.bridge = deDict['bridge']
return vif
class OvmVifEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVif): raise Exception("%s is not instance of OvmVif"%type(obj))
dct = {}
safeDictSet(obj, dct, 'mac')
safeDictSet(obj, dct, 'bridge')
safeDictSet(obj, dct, 'type')
safeDictSet(obj, dct, 'name')
return dct
def fromOvmVif(vif):
return normalizeToGson(json.dumps(vif, cls=OvmVifEncoder))
def fromOvmVifList(vifList):
return [fromOvmVif(v) for v in vifList]
def toOvmVif(jStr):
return json.loads(jStr, cls=OvmVifDecoder)
def toOvmVifList(jStr):
vifs = []
for i in jStr:
vif = toOvmVif(i)
vifs.append(vif)
return vifs
class OvmVif(OvmObject):
name = ''
mac = ''
bridge = ''
type = ''
mode = ''
def toXenString(self):
return "%s,%s,%s"%(self.mac, self.bridge, self.type)
| apache-2.0 |
matthewzimmer/carnd-behavioral-cloning | zimpy/networks/tensorflow/single_layer_linear.py | 2 | 12563 | import tensorflow as tf
import numpy as np
import math
import os
from zimpy.networks.base_neural_network import BaseNeuralNetwork, ConfigurationContext, HyperParametersContext
class SingleLayerHyperParametersContext(HyperParametersContext):
def __init__(
self,
hidden_layer_neuron_count=512,
**kwargs
):
"""
:param hidden_layer_neuron_count: number of neurons for the hidden layer
:param kwargs: Arguments to pass into to super constructor
"""
super(SingleLayerHyperParametersContext, self).__init__(**kwargs)
self.hidden_layer_neuron_count = hidden_layer_neuron_count
class SingleLayerLinear(BaseNeuralNetwork):
def fit(self):
data = self.config.data
hyper_parameters = self.config.hyper_parameters
features, labels, logits = self.__build_graph()
# Feed dicts for training, validation, test and prediction
train_feed_dict = {features: data.train_flat, labels: data.train_labels}
valid_feed_dict = {features: data.validate_flat, labels: data.validate_labels}
test_feed_dict = {features: data.test_flat, labels: data.test_labels}
predict_feed_dict = {features: data.predict_flat, labels: data.predict_labels}
# Passing global_step to minimize() will increment it at each step.
global_step = tf.Variable(0, trainable=False)
# Define loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
# Configure optimizer
if self.config.optimizer_type == ConfigurationContext.OPTIMIZER_TYPE_GRADIENT_DESCENT:
# decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
learning_rate = tf.train.exponential_decay(learning_rate=hyper_parameters.start_learning_rate,
global_step=global_step,
decay_steps=75000, decay_rate=0.96, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss,
global_step=global_step)
elif self.config.optimizer_type == ConfigurationContext.OPTIMIZER_TYPE_ADAGRAD:
learning_rate = tf.constant(hyper_parameters.start_learning_rate)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(loss)
training_epochs = hyper_parameters.epochs
batch_size = hyper_parameters.batch_size
num_training = data.num_training
batch_count = int(math.ceil(num_training / batch_size))
display_step = 1
# Launch the graph
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333, allow_growth=True)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
# config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(training_epochs):
for i in range(batch_count):
x_batch, y_batch, batch_start, batch_end = data.next_batch(batch_size)
batch_feed_dict = {features: x_batch, labels: y_batch}
# ImagePlotter.plot_images(ImageJitterer.jitter_images(data.train_orig[batch_start:batch_end]), batch_y)
# ImagePlotter.plot_images(data.train_orig[batch_start:batch_end], np.argmax(batch_y, axis=1))
# Run optimization op (backprop) and loss op (to get loss value)
sess.run(optimizer, feed_dict=batch_feed_dict)
# _, current_loss = sess.run([optimizer, loss], feed_dict=batch_feed_dict)
# self.track_loss(current_loss)
# Display logs per epoch step and very last batch iteration
if epoch % display_step == 0 or (epoch == (training_epochs - 1) and i == (batch_count - 1)):
total_iterations = (epoch + 1)
print("Epoch:", '%04d' % total_iterations, 'of', '%04d' % training_epochs)
self.config.hyper_parameters.end_learning_rate = sess.run(learning_rate)
# Calculate accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# store accuracies
self.train_accuracy = accuracy.eval(train_feed_dict)
self.validate_accuracy = accuracy.eval(valid_feed_dict)
self.test_accuracy = accuracy.eval(test_feed_dict)
self.predict_accuracy = accuracy.eval(predict_feed_dict)
# store predictions
self.train_predictions = tf.cast(correct_prediction.eval(train_feed_dict), "float").eval()
self.test_predictions = tf.cast(correct_prediction.eval(test_feed_dict), "float").eval()
self.predict_predictions = tf.cast(correct_prediction.eval(predict_feed_dict), "float").eval()
self.validate_predictions = tf.cast(correct_prediction.eval(valid_feed_dict), "float").eval()
self.loss = sess.run(loss, feed_dict=valid_feed_dict)
self.track_loss(self.loss)
print(" loss: ", "{:.9f}".format(self.loss))
print(" batch accuracy: ", accuracy.eval(batch_feed_dict))
print(" train accuracy: ", accuracy.eval(train_feed_dict))
print(" validate accuracy: ", accuracy.eval(valid_feed_dict))
print(" test accuracy: ", accuracy.eval(test_feed_dict))
print(" predict accuracy: ", accuracy.eval(predict_feed_dict))
print(" batch size: ", batch_size)
print(" learning rate: ", sess.run(learning_rate))
print('')
y_pred = tf.nn.softmax(logits)
top_5_op = tf.nn.top_k(y_pred, 5)
self.top_5 = sess.run(top_5_op,
feed_dict={features: data.predict_flat, labels: data.predict_labels})
saved = self.evaluate_accuracy(sess, accuracy.eval(valid_feed_dict), total_iterations)
if saved == True:
# store the final results for later analysis and prediction runs
# NOTE: I wrote the serializer mechanic prior to discovering tf.train.Saver.
self.weights = {
'hidden_layer': self.weight_variables['hidden_layer'].eval(),
'out': self.weight_variables['out'].eval()
}
self.biases = {
'hidden_layer': self.bias_variables['hidden_layer'].eval(),
'out': self.bias_variables['out'].eval()
}
os.system('say "{:.002f}%"'.format(self.validate_accuracy * 100))
if total_iterations - self.last_improvement > hyper_parameters.required_accuracy_improvement:
msg = 'No improvement found in a while, stopping optimization after {} iterations. Final accuracy, {}% at iteration {}.'.format(
total_iterations, str(int(self.validate_accuracy * 100)), self.last_improvement)
print(msg)
os.system('say "{}"'.format(msg))
break
print("Optimization Finished!")
def top_k(self, x, y, model_name, k=5):
features, labels, logits = self.__build_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
self.saver = tf.train.Saver()
self.saver.restore(sess, self.save_dir + '/' + model_name)
y_pred = tf.nn.softmax(logits)
# Calculate predictions.
# in_top_k_op = tf.nn.in_top_k(logits, true_labels, k)
# top_1_op = tf.nn.top_k(logits, 1)
# top_1_op = tf.nn.top_k(y_pred, 1)
# top_1 = sess.run(top_1_op, feed_dict={features: images})
top_k_op = tf.nn.top_k(y_pred, k)
top_k = sess.run(top_k_op, feed_dict={features: x, labels: y})
print('top {}:'.format(k))
print('')
print(top_k)
print('')
print(top_k.values)
print('')
print(top_k.indices)
print('')
print(top_k.values.shape)
print('')
print(top_k.indices.shape)
return top_k
def predict(self, images, true_labels, model_name):
features, labels, logits = self.__build_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# This seems to take A LOOOOOOONG time so not doing it right now.
# self.saver = tf.train.import_meta_graph(self.save_dir + '/' + model_name + '.meta')
# self.saver.restore(sess, self.save_dir + '/' + model_name)
self.saver = tf.train.Saver()
self.saver.restore(sess, self.save_dir + '/' + model_name)
# Number of images.
num_images = len(images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_images, dtype=np.int)
feed_dict = {features: images, labels: true_labels}
y_pred_cls = tf.argmax(logits, dimension=1)
# y_true_cls = tf.argmax(labels, 1)
# correct_prediction = tf.equal(y_pred_cls, y_true_cls)
cls_pred = sess.run(y_pred_cls, feed_dict=feed_dict)
# predicted_labels = tf.argmax(sess.run(labels, feed_dict=feed_dict), dimension=1).eval()
# sign_names = [self.config.data.sign_names_map[label] for label in predicted_labels]
# print(predicted_labels)
# print(sign_names)
# Calculate accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# predictions = tf.cast(correct_prediction.eval(feed_dict), "bool").eval()
# print(predictions)
correct = (np.argmax(true_labels, axis=1) == cls_pred)
print(" predict accuracy: {:004f}%".format(accuracy.eval(feed_dict) * 100))
return correct, cls_pred
def __build_graph(self):
"""
Builds a 2 layer network with a single hidden layer with n hidden layer neurons.
:return:
"""
data = self.config.data
hyper_parameters = self.config.hyper_parameters
image_size = data.train_flat.shape[1]
num_classes = data.num_classes
n_hidden_layer = hyper_parameters.hidden_layer_neuron_count
self.top_5 = {}
# Store layers weight & bias
self.weight_variables = {
'hidden_layer': tf.Variable(tf.random_normal([image_size, n_hidden_layer]), name='weights_hidden_layer'),
'out': tf.Variable(tf.random_normal([n_hidden_layer, num_classes]), name='weights_out')
}
self.bias_variables = {
'hidden_layer': tf.Variable(tf.zeros([n_hidden_layer]), name='biases_hidden_layer'),
'out': tf.Variable(tf.zeros([num_classes]), name='biases_out')
}
features = tf.placeholder("float", [None, image_size])
labels = tf.placeholder("float", [None, num_classes])
# Hidden layer with RELU activation
layer_1 = tf.add(
tf.matmul(
features,
self.weight_variables['hidden_layer']
),
self.bias_variables['hidden_layer']
)
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
logits = tf.matmul(layer_1, self.weight_variables['out']) + self.bias_variables['out']
return features, labels, logits
| mit |
petewarden/tensorflow_makefile | tensorflow/python/ops/state_grad.py | 26 | 1080 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in state_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
ops.NoGradient("Assign")
ops.NoGradient("AssignAdd")
ops.NoGradient("AssignSub")
ops.NoGradient("ScatterAdd")
ops.NoGradient("ScatterSub")
| apache-2.0 |
buddly27/champollion | source/champollion/parser/__init__.py | 1 | 4953 | # :coding: utf-8
"""Parser to fetch_environment all information from a :term:`Javascript` API in order
to document each element from a simple identifier.
"""
import os
from .js_module import fetch_environment as fetch_module_environment
from .js_file import fetch_environment as fetch_file_environment
def fetch_environment(path):
"""Return :term:`Javascript` environment dictionary from *path* structure.
Raises :exc:`OSError` if the directory is incorrect.
The environment is in the form of::
{
"module": {
"module.id": {
"id": "module.id",
"name": "module_name",
"file_id": "file/id/index.js"
"description": "A module."
...
},
...
},
"file": {
"file/id/index.js": {
"id": "file/id/index.js",
"module_id": "module_id",
"content": "...",
...
},
...
},
"class": {
"class_id": {
"id": "class_id",
"module_id": "module_id"
"description": "A class."
...
},
...
},
"method": {
"method_id": {
"id": "method_id",
"class_id": "class_id",
"module_id": "module_id",
"description": "A method."
...
},
...
},
"attribute": {
"attribute_id": {
"id": "attribute_id",
"class_id": "class_id",
"module_id": "module_id",
"description": "An attribute."
...
},
...
},
"function": {
"function_id": {
"id": "function_id",
"module_id": "module_id",
"description": "A function."
...
},
...
},
"data": {
"data_id": {
"id": "data_id",
"module_id": "module_id",
"description": "A variable."
...
},
...
}
}
"""
if not os.path.isdir(path) or not os.access(path, os.R_OK):
raise OSError(
"The javascript package directory is incorrect: {0}".format(path)
)
environment = {
"module": {},
"class": {},
"method": {},
"attribute": {},
"function": {},
"data": {},
"file": {}
}
repository_name = os.path.basename(path)
extensions = [".js", ".jsx"]
for root, dirs, files in os.walk(path):
root_folders = (
[repository_name] + root.split(path)[-1].split(os.sep)[1:]
)
files[:] = [
f for f in files
if os.path.splitext(f)[1] in extensions
and not f.startswith(".")
]
dirs[:] = [
d for d in dirs
if not d.startswith(".")
]
for _file in files:
file_id = "/".join(root_folders + [_file])
file_path = os.path.join(root, _file)
# Fetch module environment
_module_environment = fetch_module_environment(
file_id, files, module_names=[
_module["name"] for _module in
environment["module"].values()
]
)
module_id = _module_environment["id"]
environment["module"][module_id] = _module_environment
# Fetch file environment
_file_environment = fetch_file_environment(
file_path, file_id, _module_environment["id"]
)
file_id = _file_environment["id"]
method_environment = {}
attribute_environment = {}
# Extract methods and attributes from class environment to set it
# in the top level environment.
for _class in _file_environment["class"].values():
method_environment.update(_class["method"].copy())
attribute_environment.update(_class["attribute"].copy())
environment["file"][file_id] = _file_environment
environment["function"].update(_file_environment["function"])
environment["data"].update(_file_environment["data"])
environment["class"].update(_file_environment["class"])
environment["method"].update(method_environment)
environment["attribute"].update(attribute_environment)
return environment
| apache-2.0 |
mit-crpg/openmc | tests/regression_tests/source_file/test.py | 11 | 2869 | #!/usr/bin/env python
import glob
import os
from tests.testing_harness import *
settings1="""<?xml version="1.0"?>
<settings>
<state_point batches="10" />
<source_point separate="true" />
<eigenvalue>
<batches>10</batches>
<inactive>5</inactive>
<particles>1000</particles>
</eigenvalue>
<source>
<space type="box">
<parameters>-4 -4 -4 4 4 4</parameters>
</space>
</source>
</settings>
"""
settings2 = """<?xml version="1.0"?>
<settings>
<eigenvalue>
<batches>10</batches>
<inactive>5</inactive>
<particles>1000</particles>
</eigenvalue>
<source>
<file> source.10.{0} </file>
</source>
</settings>
"""
class SourceFileTestHarness(TestHarness):
def execute_test(self):
"""Run OpenMC with the appropriate arguments and check the outputs."""
try:
self._run_openmc()
self._test_output_created()
self._run_openmc_restart()
results = self._get_results()
self._write_results(results)
self._compare_results()
finally:
self._cleanup()
def update_results(self):
"""Update the results_true using the current version of OpenMC."""
try:
self._run_openmc()
self._test_output_created()
self._run_openmc_restart()
results = self._get_results()
self._write_results(results)
self._overwrite_results()
finally:
self._cleanup()
def _test_output_created(self):
"""Make sure statepoint and source files have been created."""
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))
assert len(statepoint) == 1, 'Either multiple or no statepoint files ' \
'exist.'
assert statepoint[0].endswith('h5'), \
'Statepoint file is not a HDF5 file.'
source = glob.glob(os.path.join(os.getcwd(), 'source.10.*'))
assert len(source) == 1, 'Either multiple or no source files exist.'
assert source[0].endswith('h5'), \
'Source file is not a HDF5 file.'
def _run_openmc_restart(self):
# Get the name of the source file.
source = glob.glob(os.path.join(os.getcwd(), 'source.10.*'))
# Write the new settings.xml file.
with open('settings.xml','w') as fh:
fh.write(settings2.format(source[0].split('.')[-1]))
# Run OpenMC.
self._run_openmc()
def _cleanup(self):
TestHarness._cleanup(self)
output = glob.glob(os.path.join(os.getcwd(), 'source.*'))
for f in output:
if os.path.exists(f):
os.remove(f)
with open('settings.xml','w') as fh:
fh.write(settings1)
def test_source_file():
harness = SourceFileTestHarness('statepoint.10.h5')
harness.main()
| mit |
oppia/oppia | scripts/setup.py | 2 | 9861 | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python execution environent set up for all scripts."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import os
import subprocess
import sys
import tarfile
import python_utils
from . import clean
from . import common
_PARSER = argparse.ArgumentParser(
description="""
Python execution environent set up for all scripts.
""")
def create_directory(directory_path):
"""Creates a new directory. Does not do anything if directory already
exists.
Args:
directory_path: str. Directory path to be created.
"""
if os.path.exists(directory_path):
return
os.makedirs(directory_path)
# This function takes a command for python as its only input.
# It checks this input for a specific version of python and returns false
# if it does not match the expected prefix.
def test_python_version():
running_python_version = '{0[0]}.{0[1]}'.format(sys.version_info)
if running_python_version != '2.7':
python_utils.PRINT('Please use Python2.7. Exiting...')
# If OS is Windows, print helpful error message about adding Python to
# path.
if common.is_windows_os():
common.print_each_string_after_two_new_lines([
'It looks like you are using Windows. If you have Python '
'installed,',
'make sure it is in your PATH and that PYTHONPATH is set.',
'If you have two versions of Python (ie, Python 2.7 and 3), '
'specify 2.7 before other versions of Python when setting the '
'PATH.',
'Here are some helpful articles:',
'http://docs.python-guide.org/en/latest/starting/install/win/',
'https://stackoverflow.com/questions/3701646/how-to-add-to-the-'
'pythonpath-in-windows-7'])
# Exit when no suitable Python environment can be found.
raise Exception('No suitable python version found.')
def download_and_install_package(url_to_retrieve, filename):
"""Downloads and installs package in Oppia tools directory.
Args:
url_to_retrieve: string. The url from which package is to be
downloaded.
filename: string. The name of the tar file.
"""
python_utils.url_retrieve(url_to_retrieve, filename=filename)
tar = tarfile.open(name=filename)
tar.extractall(path=common.OPPIA_TOOLS_DIR)
tar.close()
rename_yarn_folder(filename, common.OPPIA_TOOLS_DIR)
os.remove(filename)
def rename_yarn_folder(filename, path):
"""Removes the `v` from the yarn folder name.
Args:
filename: string. The name of the tar file.
path: string. The path of the yarn file.
"""
if 'yarn' in filename:
old_name = filename.split('.tar.gz')[0]
new_name = ''.join(old_name.split('v'))
os.rename(path + '/' + old_name, path + '/' + new_name)
def download_and_install_node():
"""Download and install node to Oppia tools directory."""
outfile_name = 'node-download'
if common.is_windows_os():
if common.is_x64_architecture():
architecture = 'x64'
else:
architecture = 'x86'
extension = '.zip'
node_file_name = 'node-v%s-win-%s' % (
common.NODE_VERSION, architecture)
url_to_retrieve = 'https://nodejs.org/dist/v%s/%s%s' % (
common.NODE_VERSION, node_file_name, extension)
python_utils.url_retrieve(url_to_retrieve, filename=outfile_name)
subprocess.check_call(
['powershell.exe', '-c', 'expand-archive',
outfile_name, '-DestinationPath',
common.OPPIA_TOOLS_DIR])
else:
extension = '.tar.gz'
if common.is_x64_architecture():
if common.is_mac_os():
node_file_name = 'node-v%s-darwin-x64' % (common.NODE_VERSION)
elif common.is_linux_os():
node_file_name = 'node-v%s-linux-x64' % (common.NODE_VERSION)
else:
node_file_name = 'node-v%s' % common.NODE_VERSION
download_and_install_package(
'https://nodejs.org/dist/v%s/%s%s' % (
common.NODE_VERSION, node_file_name, extension),
outfile_name)
os.rename(
os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),
common.NODE_PATH)
if node_file_name == 'node-v%s' % common.NODE_VERSION:
with common.CD(common.NODE_PATH):
subprocess.check_call(['./configure'])
subprocess.check_call(['make'])
def main(args=None):
"""Runs the script to setup Oppia."""
unused_parsed_args = _PARSER.parse_args(args=args)
test_python_version()
# The second option allows this script to also be run from deployment
# folders.
if not os.getcwd().endswith('oppia') and not os.getcwd().endswith(
'deploy-'):
python_utils.PRINT('')
python_utils.PRINT(
'WARNING This script should be run from the oppia/ root folder.')
python_utils.PRINT('')
raise Exception('Invalid root directory.')
# Set COMMON_DIR to the absolute path of the directory above OPPIA_DIR. This
# is necessary becaue COMMON_DIR (or subsequent variables which refer to it)
# may use it in a situation where relative paths won't work as expected(such
# as $PYTHONPATH).
create_directory(common.OPPIA_TOOLS_DIR)
create_directory(common.THIRD_PARTY_DIR)
common.create_readme(
common.THIRD_PARTY_DIR,
'This folder contains third party libraries used in Oppia codebase.\n'
'You can regenerate this folder by deleting it and then running '
'the start.py script.\n')
create_directory(common.NODE_MODULES_PATH)
common.create_readme(
common.NODE_MODULES_PATH,
'This folder contains node utilities used in Oppia codebase.\n'
'You can regenerate this folder by deleting it and then running '
'the start.py script.\n')
# Download and install node.js.
python_utils.PRINT(
'Checking if node.js is installed in %s' % common.OPPIA_TOOLS_DIR)
if not os.path.exists(common.NODE_PATH):
python_utils.PRINT('Installing Node.js')
download_and_install_node()
# Change ownership of node_modules.
# Note: on some machines, these commands seem to take quite a long time.
if not common.is_windows_os():
common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)
common.recursive_chmod(common.NODE_MODULES_PATH, 0o744)
# Download and install yarn.
python_utils.PRINT(
'Checking if yarn is installed in %s' % common.OPPIA_TOOLS_DIR)
if not os.path.exists(common.YARN_PATH):
python_utils.PRINT('Removing package-lock.json')
clean.delete_file('package-lock.json')
common.print_each_string_after_two_new_lines([
'Installing yarn',
'WARNING: Please note that Oppia uses Yarn to manage node packages',
'do *NOT* use npm. For more information on how to use yarn,',
'visit https://yarnpkg.com/en/docs/usage.'])
# NB: Update .yarnrc if the yarn version below is changed.
yarn_file_name = 'yarn-v%s.tar.gz' % common.YARN_VERSION
download_and_install_package(
'https://github.com/yarnpkg/yarn/releases/download/v%s/%s'
% (common.YARN_VERSION, yarn_file_name), yarn_file_name)
# Adjust path to support the default Chrome locations for Unix, Windows and
# Mac OS.
if os.path.isfile('/usr/bin/google-chrome'):
# Unix.
chrome_bin = '/usr/bin/google-chrome'
elif os.path.isfile('/usr/bin/chromium-browser'):
# Unix.
chrome_bin = '/usr/bin/chromium-browser'
elif os.path.isfile('/usr/bin/chromium'):
# Arch Linux.
chrome_bin = '/usr/bin/chromium'
elif os.path.isfile(
'/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
# Windows.
chrome_bin = (
'/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
elif os.path.isfile(
'c:\\Program Files (x86)\\Google\\Chrome\\Application\\Chrome.exe'):
chrome_bin = (
'c:\\Program Files (x86)\\Google\\Chrome\\Application\\Chrome.exe')
elif os.path.isfile(
'/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
# WSL.
chrome_bin = (
'/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
elif os.path.isfile(
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'):
# Mac OS.
chrome_bin = (
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
else:
python_utils.PRINT('Chrome is not found, stopping ...')
raise Exception('Chrome not found.')
os.environ['CHROME_BIN'] = chrome_bin
python_utils.PRINT('Environment setup completed.')
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when setup.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
| apache-2.0 |
icejoywoo/tornado | tornado/test/httpserver_test.py | 68 | 41032 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import netutil
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
from tornado import gen
from tornado.http1connection import HTTP1Connection
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado.netutil import ssl_options_to_context
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
from contextlib import closing
import datetime
import gzip
import os
import shutil
import socket
import ssl
import sys
import tempfile
from io import BytesIO
def read_stream_body(stream, callback):
"""Reads an HTTP response from `stream` and runs callback with its
headers and body."""
chunks = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
self.headers = headers
def data_received(self, chunk):
chunks.append(chunk)
def finish(self):
callback((self.headers, b''.join(chunks)))
conn = HTTP1Connection(stream, True)
conn.read_response(Delegate())
class HandlerBaseTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
if self.request.protocol != self.expected_protocol:
raise Exception("unexpected protocol")
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
skipIfOldSSL = unittest.skipIf(
getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
"old version of ssl module and/or openssl")
class BaseSSLTest(AsyncHTTPSTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
class SSLTestMixin(object):
def get_ssl_options(self):
return dict(ssl_version=self.get_ssl_version(),
**AsyncHTTPSTestCase.get_ssl_options())
def get_ssl_version(self):
raise NotImplementedError()
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A' * 5000)
self.assertEqual(response.body, b"Got 5000 bytes in POST")
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
with ExpectLog(gen_log, 'Uncaught exception', required=False):
self.http_client.fetch(
self.get_url("/").replace('https:', 'http:'),
self.stop,
request_timeout=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
def test_error_logging(self):
# No stack traces are logged for SSL errors.
with ExpectLog(gen_log, 'SSL Error') as expect_log:
self.http_client.fetch(
self.get_url("/").replace("https:", "http:"),
self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertFalse(expect_log.logged_stack)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv23
@skipIfOldSSL
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv3
@skipIfOldSSL
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_TLSv1
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class SSLContextTest(BaseSSLTest, SSLTestMixin):
def get_ssl_options(self):
context = ssl_options_to_context(
AsyncHTTPSTestCase.get_ssl_options(self))
assert isinstance(context, ssl.SSLContext)
return context
class BadSSLOptionsTest(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(KeyError, HTTPServer, application, ssl_options={
"keyfile": "/__missing__.crt",
})
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, 'test.crt')
existing_key = os.path.join(module_dir, 'test.key')
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": "/__mising__.crt",
})
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key"
})
# This actually works because both files exist
HTTPServer(application, ssl_options={
"certfile": existing_certificate,
"keyfile": existing_key,
})
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body, newline=b"\r\n"):
with closing(IOStream(socket.socket())) as stream:
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
stream.write(
newline.join(headers +
[utf8("Content-Length: %d" % len(body))]) +
newline + newline + body)
read_stream_body(stream, self.stop)
headers, body = self.wait()
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9",
],
b"\r\n".join([
b"Content-Disposition: form-data; name=argument",
b"",
u("\u00e1").encode("utf-8"),
b"--1234567890",
u('Content-Disposition: form-data; name="files"; filename="\u00f3"').encode("utf8"),
b"",
u("\u00fa").encode("utf-8"),
b"--1234567890--",
b"",
]))
data = json_decode(response)
self.assertEqual(u("\u00e9"), data["header"])
self.assertEqual(u("\u00e1"), data["argument"])
self.assertEqual(u("\u00f3"), data["filename"])
self.assertEqual(u("\u00fa"), data["filebody"])
def test_newlines(self):
# We support both CRLF and bare LF as line separators.
for newline in (b"\r\n", b"\n"):
response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"",
newline=newline)
self.assertEqual(response, b'Hello world')
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket(), io_loop=self.io_loop)
stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
b"Content-Length: 1024",
b"Expect: 100-continue",
b"Connection: close",
b"\r\n"]), callback=self.stop)
self.wait()
stream.read_until(b"\r\n\r\n", self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
stream.write(b"a" * 1024)
stream.read_until(b"\r\n", self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
stream.read_until(b"\r\n\r\n", self.stop)
header_data = self.wait()
headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Got 1024 bytes in POST")
stream.close()
class EchoHandler(RequestHandler):
def get(self):
self.write(recursive_unicode(self.request.arguments))
def post(self):
self.write(recursive_unicode(self.request.arguments))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
fields = [
('method', str),
('uri', str),
('version', str),
('remote_ip', str),
('protocol', str),
('host', str),
('path', str),
('query', str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type('header_key', list(self.request.headers.keys())[0], str)
self.check_type('header_value', list(self.request.headers.values())[0], str)
self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
# secure cookies
self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
def post(self):
self.check_type('body', self.request.body, bytes)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class HTTPServerTest(AsyncHTTPTestCase):
def get_app(self):
return Application([("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
("//doubleslash", EchoHandler),
])
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u("\u00e9")]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u(""), u("")]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u("")], u("bar"): [u("")]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
def test_double_slash(self):
# urlparse.urlsplit (which tornado.httpserver used to use
# incorrectly) would parse paths beginning with "//" as
# protocol-relative urls.
response = self.fetch("//doubleslash")
self.assertEqual(200, response.code)
self.assertEqual(json_decode(response.body), {})
def test_malformed_body(self):
# parse_qs is pretty forgiving, but it will fail on python 3
# if the data is not utf8. On python 2 parse_qs will work,
# but then the recursive_unicode call in EchoHandler will
# fail.
if str is bytes:
return
with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
response = self.fetch(
'/echo', method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=b'\xe9')
self.assertEqual(200, response.code)
self.assertEqual(b'{}', response.body)
class HTTPServerRawTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/echo', EchoHandler),
])
def setUp(self):
super(HTTPServerRawTest, self).setUp()
self.stream = IOStream(socket.socket())
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def tearDown(self):
self.stream.close()
super(HTTPServerRawTest, self).tearDown()
def test_empty_request(self):
self.stream.close()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
def test_malformed_first_line(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
# TODO: need an async version of ExpectLog so we don't need
# hard-coded timeouts here.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
self.stop)
self.wait()
def test_malformed_headers(self):
with ExpectLog(gen_log, '.*Malformed HTTP headers'):
self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
self.stop)
self.wait()
def test_chunked_request_body(self):
# Chunked requests are not widely supported and we don't have a way
# to generate them in AsyncHTTPClient, but HTTPServer will read them.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
headers, response = self.wait()
self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
class XHeaderTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(remote_ip=self.request.remote_ip,
remote_protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(xheaders=True)
def test_ip_headers(self):
self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
"4.4.4.4")
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
invalid_chars = {"X-Real-IP": "4.4.4.4<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars)["remote_ip"],
"127.0.0.1")
invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars_list)["remote_ip"],
"127.0.0.1")
invalid_host = {"X-Real-IP": "www.google.com"}
self.assertEqual(
self.fetch_json("/", headers=invalid_host)["remote_ip"],
"127.0.0.1")
def test_scheme_headers(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "http")
https_scheme = {"X-Scheme": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_scheme)["remote_protocol"],
"https")
https_forwarded = {"X-Forwarded-Proto": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_forwarded)["remote_protocol"],
"https")
bad_forwarded = {"X-Forwarded-Proto": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_forwarded)["remote_protocol"],
"http")
class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase):
def get_app(self):
return Application([('/', XHeaderTest.Handler)])
def get_httpserver_options(self):
output = super(SSLXHeaderTest, self).get_httpserver_options()
output['xheaders'] = True
return output
def test_request_without_xprotocol(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "https")
http_scheme = {"X-Scheme": "http"}
self.assertEqual(
self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http")
bad_scheme = {"X-Scheme": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https")
class ManualProtocolTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(protocol='https')
def test_manual_protocol(self):
self.assertEqual(self.fetch_json('/')['protocol'], 'https')
@unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin',
"unix sockets not supported on this platform")
class UnixSocketTest(AsyncTestCase):
"""HTTPServers can listen on Unix sockets too.
Why would you want to do this? Nginx can proxy to backends listening
on unix sockets, for one thing (and managing a namespace for unix
sockets can be easier than managing a bunch of TCP port numbers).
Unfortunately, there's no way to specify a unix socket in a url for
an HTTP client, so we have to test this by hand.
"""
def setUp(self):
super(UnixSocketTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.sockfile = os.path.join(self.tmpdir, "test.sock")
sock = netutil.bind_unix_socket(self.sockfile)
app = Application([("/hello", HelloWorldRequestHandler)])
self.server = HTTPServer(app, io_loop=self.io_loop)
self.server.add_socket(sock)
self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
self.stream.connect(self.sockfile, self.stop)
self.wait()
def tearDown(self):
self.stream.close()
self.server.stop()
shutil.rmtree(self.tmpdir)
super(UnixSocketTest, self).tearDown()
def test_unix_socket(self):
self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
self.stream.read_until(b"\r\n", self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.1 200 OK\r\n")
self.stream.read_until(b"\r\n\r\n", self.stop)
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
self.stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Hello world")
def test_unix_socket_bad_request(self):
# Unix sockets don't have remote addresses so they just return an
# empty string.
with ExpectLog(gen_log, "Malformed HTTP message from"):
self.stream.write(b"garbage\r\n\r\n")
self.stream.read_until_close(self.stop)
response = self.wait()
self.assertEqual(response, b"")
class KeepAliveTest(AsyncHTTPTestCase):
"""Tests various scenarios for HTTP 1.1 keep-alive support.
These tests don't use AsyncHTTPClient because we want to control
connection reuse and closing.
"""
def get_app(self):
class HelloHandler(RequestHandler):
def get(self):
self.finish('Hello world')
def post(self):
self.finish('Hello world')
class LargeHandler(RequestHandler):
def get(self):
# 512KB should be bigger than the socket buffers so it will
# be written out in chunks.
self.write(''.join(chr(i % 256) * 1024 for i in range(512)))
class FinishOnCloseHandler(RequestHandler):
@asynchronous
def get(self):
self.flush()
def on_connection_close(self):
# This is not very realistic, but finishing the request
# from the close callback has the right timing to mimic
# some errors seen in the wild.
self.finish('closed')
return Application([('/', HelloHandler),
('/large', LargeHandler),
('/finish_on_close', FinishOnCloseHandler)])
def setUp(self):
super(KeepAliveTest, self).setUp()
self.http_version = b'HTTP/1.1'
def tearDown(self):
# We just closed the client side of the socket; let the IOLoop run
# once to make sure the server side got the message.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
if hasattr(self, 'stream'):
self.stream.close()
super(KeepAliveTest, self).tearDown()
# The next few methods are a crude manual http client
def connect(self):
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def read_headers(self):
self.stream.read_until(b'\r\n', self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b'HTTP/1.1 200'), first_line)
self.stream.read_until(b'\r\n\r\n', self.stop)
header_bytes = self.wait()
headers = HTTPHeaders.parse(header_bytes.decode('latin1'))
return headers
def read_response(self):
self.headers = self.read_headers()
self.stream.read_bytes(int(self.headers['Content-Length']), self.stop)
body = self.wait()
self.assertEqual(b'Hello world', body)
def close(self):
self.stream.close()
del self.stream
def test_two_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.close()
def test_request_close(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.close()
# keepalive is supported for http 1.0 too, but it's opt-in
def test_http10(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertTrue('Connection' not in self.headers)
self.close()
def test_http10_keepalive(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_http10_keepalive_extra_crlf(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_pipelined_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
self.read_response()
self.read_response()
self.close()
def test_pipelined_cancel(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
# only read once
self.read_response()
self.close()
def test_cancel_during_download(self):
self.connect()
self.stream.write(b'GET /large HTTP/1.1\r\n\r\n')
self.read_headers()
self.stream.read_bytes(1024, self.stop)
self.wait()
self.close()
def test_finish_while_closed(self):
self.connect()
self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n')
self.read_headers()
self.close()
def test_keepalive_chunked(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n0\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
class GzipBaseTest(object):
def get_app(self):
return Application([('/', EchoHandler)])
def post_gzip(self, body):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
gzip_file.write(utf8(body))
gzip_file.close()
compressed_body = bytesio.getvalue()
return self.fetch('/', method='POST', body=compressed_body,
headers={'Content-Encoding': 'gzip'})
def test_uncompressed(self):
response = self.fetch('/', method='POST', body='foo=bar')
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
def get_httpserver_options(self):
return dict(decompress_request=True)
def test_gzip(self):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
def test_gzip_unsupported(self):
# Gzip support is opt-in; without it the server fails to parse
# the body (but parsing form bodies is currently just a log message,
# not a fatal error).
with ExpectLog(gen_log, "Unsupported Content-Encoding"):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {})
class StreamingChunkSizeTest(AsyncHTTPTestCase):
# 50 characters long, and repetitive so it can be compressed.
BODY = b'01234567890123456789012345678901234567890123456789'
CHUNK_SIZE = 16
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def get_httpserver_options(self):
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def headers_received(self, start_line, headers):
self.chunk_lengths = []
def data_received(self, chunk):
self.chunk_lengths.append(len(chunk))
def finish(self):
response_body = utf8(json_encode(self.chunk_lengths))
self.connection.write_headers(
ResponseStartLine('HTTP/1.1', 200, 'OK'),
HTTPHeaders({'Content-Length': str(len(response_body))}))
self.connection.write(response_body)
self.connection.finish()
def get_app(self):
class App(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return StreamingChunkSizeTest.MessageDelegate(request_conn)
return App()
def fetch_chunk_sizes(self, **kwargs):
response = self.fetch('/', method='POST', **kwargs)
response.rethrow()
chunks = json_decode(response.body)
self.assertEqual(len(self.BODY), sum(chunks))
for chunk_size in chunks:
self.assertLessEqual(chunk_size, self.CHUNK_SIZE,
'oversized chunk: ' + str(chunks))
self.assertGreater(chunk_size, 0,
'empty chunk: ' + str(chunks))
return chunks
def compress(self, body):
bytesio = BytesIO()
gzfile = gzip.GzipFile(mode='w', fileobj=bytesio)
gzfile.write(body)
gzfile.close()
compressed = bytesio.getvalue()
if len(compressed) >= len(body):
raise Exception("body did not shrink when compressed")
return compressed
def test_regular_body(self):
chunks = self.fetch_chunk_sizes(body=self.BODY)
# Without compression we know exactly what to expect.
self.assertEqual([16, 16, 16, 2], chunks)
def test_compressed_body(self):
self.fetch_chunk_sizes(body=self.compress(self.BODY),
headers={'Content-Encoding': 'gzip'})
# Compression creates irregular boundaries so the assertions
# in fetch_chunk_sizes are as specific as we can get.
def test_chunked_body(self):
def body_producer(write):
write(self.BODY[:20])
write(self.BODY[20:])
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
# HTTP chunk boundaries translate to application-visible breaks
self.assertEqual([16, 4, 16, 14], chunks)
def test_chunked_compressed(self):
compressed = self.compress(self.BODY)
self.assertGreater(len(compressed), 20)
def body_producer(write):
write(compressed[:20])
write(compressed[20:])
self.fetch_chunk_sizes(body_producer=body_producer,
headers={'Content-Encoding': 'gzip'})
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(max_header_size=1024)
def test_small_headers(self):
response = self.fetch("/", headers={'X-Filler': 'a' * 100})
response.rethrow()
self.assertEqual(response.body, b"Hello world")
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch("/", headers={'X-Filler': 'a' * 1000})
self.assertEqual(response.code, 599)
@skipOnTravis
class IdleTimeoutTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(idle_connection_timeout=0.1)
def setUp(self):
super(IdleTimeoutTest, self).setUp()
self.streams = []
def tearDown(self):
super(IdleTimeoutTest, self).tearDown()
for stream in self.streams:
stream.close()
def connect(self):
stream = IOStream(socket.socket())
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
self.streams.append(stream)
return stream
def test_unused_connection(self):
stream = self.connect()
stream.set_close_callback(self.stop)
self.wait()
def test_idle_after_use(self):
stream = self.connect()
stream.set_close_callback(lambda: self.stop("closed"))
# Use the connection twice to make sure keep-alives are working
for i in range(2):
stream.write(b"GET / HTTP/1.1\r\n\r\n")
stream.read_until(b"\r\n\r\n", self.stop)
self.wait()
stream.read_bytes(11, self.stop)
data = self.wait()
self.assertEqual(data, b"Hello world")
# Now let the timeout trigger and close the connection.
data = self.wait()
self.assertEqual(data, "closed")
class BodyLimitsTest(AsyncHTTPTestCase):
def get_app(self):
class BufferedHandler(RequestHandler):
def put(self):
self.write(str(len(self.request.body)))
@stream_request_body
class StreamingHandler(RequestHandler):
def initialize(self):
self.bytes_read = 0
def prepare(self):
if 'expected_size' in self.request.arguments:
self.request.connection.set_max_body_size(
int(self.get_argument('expected_size')))
if 'body_timeout' in self.request.arguments:
self.request.connection.set_body_timeout(
float(self.get_argument('body_timeout')))
def data_received(self, data):
self.bytes_read += len(data)
def put(self):
self.write(str(self.bytes_read))
return Application([('/buffered', BufferedHandler),
('/streaming', StreamingHandler)])
def get_httpserver_options(self):
return dict(body_timeout=3600, max_body_size=4096)
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_small_body(self):
response = self.fetch('/buffered', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
response = self.fetch('/streaming', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
def test_large_body_buffered(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/buffered', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_buffered_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/buffered', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/streaming', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_streaming_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/streaming', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body=b'a' * 10240)
self.assertEqual(response.body, b'10240')
def test_large_body_streaming_chunked_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.body, b'10240')
@gen_test
def test_timeout(self):
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream because AsyncHTTPClient won't let us read a
# response without finishing a body.
stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n'
b'Content-Length: 42\r\n\r\n')
with ExpectLog(gen_log, 'Timeout reading body'):
response = yield stream.read_until_close()
self.assertEqual(response, b'')
finally:
stream.close()
@gen_test
def test_body_size_override_reset(self):
# The max_body_size override is reset between requests.
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream so we can make sure it's all on one connection.
stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
stream.write(b'a' * 10240)
headers, response = yield gen.Task(read_stream_body, stream)
self.assertEqual(response, b'10240')
# Without the ?expected_size parameter, we get the old default value
stream.write(b'PUT /streaming HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
with ExpectLog(gen_log, '.*Content-Length too long'):
data = yield stream.read_until_close()
self.assertEqual(data, b'')
finally:
stream.close()
class LegacyInterfaceTest(AsyncHTTPTestCase):
def get_app(self):
# The old request_callback interface does not implement the
# delegate interface, and writes its response via request.write
# instead of request.connection.write_headers.
def handle_request(request):
self.http1 = request.version.startswith("HTTP/1.")
if not self.http1:
# This test will be skipped if we're using HTTP/2,
# so just close it out cleanly using the modern interface.
request.connection.write_headers(
ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
message = b"Hello world"
request.write(utf8("HTTP/1.1 200 OK\r\n"
"Content-Length: %d\r\n\r\n" % len(message)))
request.write(message)
request.finish()
return handle_request
def test_legacy_interface(self):
response = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(response.body, b"Hello world")
| apache-2.0 |
Sup3Roque/Pancas | plugin.video.loganaddon/resources/lib/sources/mvsnap_mv_tv.py | 20 | 4443 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,json
from resources.lib.libraries import client
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://mvsnap.com'
self.search_link = '/v1/api/search?query=%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies'][0]['slug']
url = '/movies/%s' % result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = '%s (%s)' % (tvshowtitle, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
result = result['movies']
season = '%02d' % int(season)
episode = '%02d' % int(episode)
result = [(i['slug'], i['long_title']) for i in result]
result = [(i[0], re.compile('(\d*)$').findall(i[1])) for i in result]
result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
result = [i[0] for i in result if season == i[1]][0]
url = '/tv-shows/%s?S%sE%s' % (result, season, episode)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
query = urlparse.urlparse(url).query
try: query = '%02d' % int(re.compile('E(\d*)$').findall(query)[0])
except: query = ''
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
result = [i[0] for i in result if i[1].endswith(query) or query == ''][0]
direct = re.compile('(.+)[|](.+?)[,]').findall(result)
if len(direct) > 0:
quality = 'HD' if 'hd' in direct[0][0].lower() else 'SD'
sources.append({'source': 'MVsnap', 'quality': quality, 'provider': 'MVsnap', 'url': direct[0][1]})
return sources
url = urlparse.urljoin(self.base_link, result)
url = client.source(url, output='geturl')
if not 'google' in url: raise Exception()
url = url.split('get_video_info')[0]
url = resolvers.request(url)
for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})
return sources
except:
return sources
def resolve(self, url):
try:
if url.startswith('stack://'): return url
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| gpl-2.0 |
OscarPDR/projects_morelab | semantic_search/views.py | 1 | 3334 | # coding: utf-8
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.db.models import Sum, Min, Max
from funding_programs.models import FundingProgram
from projects.models import Project, AssignedEmployee
from semantic_search.forms import SemanticSearchForm
# Create your views here.
#########################
# View: semantic_search
#########################
def semantic_search(request):
active = False
title = ''
researchers = None
status = 'Any'
scope = 'All'
start_year = 2004
end_year = 2013
and_or = 'OR'
projects = None
if request.method == 'POST':
form = SemanticSearchForm(request.POST)
if form.is_valid():
active = True
cd = form.cleaned_data
title = cd['title']
researchers = cd['researchers']
status = cd['status']
scope = cd['scope']
start_year = int(cd['start_year'])
end_year = int(cd['end_year'])
and_or = cd['and_or']
probable_projects = Project.objects.all()
if title != '':
probable_projects = probable_projects.filter(title__contains = title)
if status != 'Any':
probable_projects = probable_projects.filter(status = status)
if scope != 'All':
funding_program_ids = FundingProgram.objects.filter(geographical_scope = scope).values("id")
probable_projects = probable_projects.filter(funding_program__in = funding_program_ids)
probable_projects = probable_projects.filter(start_year__gte = start_year)
probable_projects = probable_projects.filter(end_year__lte = end_year)
probable_projects = probable_projects.order_by('title')
# TODO: Researchers filter
if researchers == []:
projects = probable_projects
else:
projects = []
researcher_ids = []
for researcher in researchers:
researcher_ids.append(researcher.id)
for project in probable_projects:
assigned_employees = AssignedEmployee.objects.filter(project_id = project.id)
employees_ids = []
for employee in assigned_employees:
employees_ids.append(employee.employee_id)
if set(researcher_ids).issubset(employees_ids) and and_or == 'AND':
projects.append(project)
if (len(set(researcher_ids) & set(employees_ids)) > 0) and and_or == 'OR':
projects.append(project)
else:
form = SemanticSearchForm()
return render_to_response("semantic_search/searcher.html", {
'active': active,
'form': form,
'title': title,
'researchers': researchers,
'status': status,
'scope': scope,
'start_year': start_year,
'end_year': end_year,
'and_or': and_or,
'projects': projects,
},
context_instance = RequestContext(request))
| gpl-3.0 |
creasyw/IMTAphy | sandbox/default/lib/python2.4/site-packages/wnsbase/playground/plugins/Command.py | 3 | 2085 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import optparse
import wnsbase.playground.Core
class Command(object):
def __init__(self, name, rationale, usage):
self.name = name
self.usage = usage
self.rationale = rationale
self.numberOfArgs = 0
self.optParser = optparse.OptionParser(usage = usage)
def addOption(self, *args, **kwargs):
self.optParser.add_option(*args, **kwargs)
def startup(self, args):
self.options, self.args = self.optParser.parse_args(args)
if len(self.args) != self.numberOfArgs:
print "ERROR: Wrong number of arguments to command '%s' (%d instead of %d)" % (self.name, len(self.args), self.numberOfArgs)
print " args: %s" % (self.args)
self.optParser.print_help()
wnsbase.playground.Core.getCore().shutdown(1)
def run(self):
pass
def shutdown(self):
pass
| gpl-2.0 |
stackforge/poppy | poppy/manager/base/providers.py | 2 | 3390 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from poppy.common import errors
class ProviderWrapper(object):
""""ProviderWrapper class."""
def create(self, ext, service_obj):
"""Create a provider
:param ext
:param service_obj
:returns: ext.obj.service_controller.create(service_obj)
"""
return ext.obj.service_controller.create(service_obj)
def update(self, ext, provider_details, service_obj):
"""Update a provider
:param ext
:param provider_details
:param service_obj
"""
try:
provider_detail = provider_details[ext.obj.provider_name]
except KeyError:
raise errors.BadProviderDetail(
"No provider detail information."
"Perhaps service has not been created")
provider_service_id = provider_detail.provider_service_id
return ext.obj.service_controller.update(
provider_service_id, service_obj)
def delete(self, ext, provider_details, project_id):
try:
provider_detail = provider_details[ext.obj.provider_name]
except KeyError:
raise errors.BadProviderDetail(
"No provider detail information."
"Perhaps service has not been created")
provider_service_id = provider_detail.provider_service_id
return ext.obj.service_controller.delete(project_id,
provider_service_id)
def purge(self, ext, service_obj, provider_details,
hard=False, purge_url=None):
try:
provider_detail = provider_details[ext.obj.provider_name]
except KeyError:
raise errors.BadProviderDetail(
"No provider detail information."
"Perhaps service has not been created")
provider_service_id = provider_detail.provider_service_id
return ext.obj.service_controller.purge(
provider_service_id,
service_obj,
hard,
purge_url)
def create_certificate(self, ext, cert_obj, enqueue, https_upgrade):
"""Create a certificate.
:param ext
:param cert_obj
:param enqueue
:param https_upgrade
:returns: ext.obj.certificate_controller.create_certificate(cert_obj,
enqueue, https_upgrade)
"""
return ext.obj.certificate_controller.create_certificate(
cert_obj,
enqueue,
https_upgrade
)
def delete_certificate(self, ext, cert_obj):
"""Delete a certificate.
:param ext
:param cert_obj
:returns: ext.obj.service_controller.delete_certificate(cert_obj)
"""
return ext.obj.certificate_controller.delete_certificate(cert_obj)
| apache-2.0 |
jendap/tensorflow | tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py | 21 | 11027 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Laplace distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import laplace
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"VectorLaplaceLinearOperator"
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorLaplaceLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate VectorLaplace with some desired covariance.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
# Divide scale by sqrt(2) so that the final covariance will be what we want.
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale / tf.sqrt(2.)))
# Covariance agrees with cholesky(cov) parameterization.
vla.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
vla.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Laplace's.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
vla.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceLinearOperator"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorLaplaceLinearOperator, self).__init__(
distribution=laplace.Laplace(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorLaplaceLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorLaplaceLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Laplace(0, 1).
# Then this distribution is
# X = loc + LW,
# and since E[X] = loc,
# Cov(X) = E[LW W^T L^T] = L E[W W^T] L^T.
# Since E[wi wj] = 0 if i != j, and 2 if i == j, we have
# Cov(X) = 2 LL^T
if distribution_util.is_diagonal_scale(self.scale):
return 2. * array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return 2. * self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
2. * self.scale.matmul(self.scale.to_dense()))
else:
return 2. * array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return np.sqrt(2) * math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
| apache-2.0 |
hsuchie4/TACTIC | src/pyasm/deprecated/flash/widget/flash_background_upload_wdg.py | 6 | 1239 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['FlashBackgroundUploadWdg']
from pyasm.web import Table, DivWdg
from pyasm.widget import BaseInputWdg, SelectWdg
class FlashBackgroundUploadWdg(BaseInputWdg):
def get_display(my):
div = DivWdg()
table = Table()
table.set_class("minimal")
table.add_style("font-size: 0.8em")
table.add_row()
table.add_cell("File")
table.add_cell('<input type="file" name="%s"/>' % (my.get_input_name())
)
table.add_row()
table.add_cell("Context")
select = SelectWdg("%s|context" % my.get_input_name() )
select.set_option("values", "publish|roughDesign|colorFinal|colorKey")
table.add_cell(select)
table.add_row()
table.add_cell("Description")
table.add_cell('<textarea name="%s|description"></textarea>' % my.get_input_name())
div.add(table)
return div
| epl-1.0 |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/tests/test_dist.py | 49 | 10335 | # -*- coding: latin-1 -*-
"""Tests for distutils.dist."""
import distutils.cmd
import distutils.dist
import os
import StringIO
import sys
import unittest
import warnings
from test.test_support import TESTFN
class test_dist(distutils.cmd.Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(distutils.dist.Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(unittest.TestCase):
def setUp(self):
self.argv = sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv[:] = self.argv
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
from distutils.tests.test_dist import test_dist
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assert_(isinstance(cmd, test_dist))
self.assertEqual(cmd.sample_option, "sometext")
def test_command_packages_configfile(self):
sys.argv.append("build")
f = open(TESTFN, "w")
try:
print >>f, "[global]"
print >>f, "command_packages = foo.bar, splat"
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
finally:
os.unlink(TESTFN)
def test_write_pkg_file(self):
# Check DistributionMetadata handling of Unicode fields
my_file = os.path.join(os.path.dirname(__file__), 'f')
klass = distutils.dist.Distribution
dist = klass(attrs={'author': u'Mister Café',
'name': 'my.package',
'maintainer': u'Café Junior',
'description': u'Café torréfié',
'long_description': u'Héhéhé'})
# let's make sure the file can be written
# with Unicode fields. they are encoded with
# PKG_INFO_ENCODING
try:
dist.metadata.write_pkg_file(open(my_file, 'w'))
finally:
if os.path.exists(my_file):
os.remove(my_file)
# regular ascii is of course always usable
dist = klass(attrs={'author': 'Mister Cafe',
'name': 'my.package',
'maintainer': 'Cafe Junior',
'description': 'Cafe torrefie',
'long_description': 'Hehehe'})
try:
dist.metadata.write_pkg_file(open(my_file, 'w'))
finally:
if os.path.exists(my_file):
os.remove(my_file)
def test_empty_options(self):
# an empty options dictionary should not stay in the
# list of attributes
klass = distutils.dist.Distribution
# catching warnings
warns = []
def _warn(msg):
warns.append(msg)
old_warn = warnings.warn
warnings.warn = _warn
try:
dist = klass(attrs={'author': 'xxx',
'name': 'xxx',
'version': 'xxx',
'url': 'xxxx',
'options': {}})
finally:
warnings.warn = old_warn
self.assertEquals(len(warns), 0)
class MetadataTestCase(unittest.TestCase):
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = distutils.dist.Distribution(attrs)
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.0" in meta)
self.assert_("provides:" not in meta.lower())
self.assert_("requires:" not in meta.lower())
self.assert_("obsoletes:" not in meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = distutils.dist.Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.1" in meta)
self.assert_("requires:" not in meta.lower())
self.assert_("obsoletes:" not in meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError,
distutils.dist.Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = distutils.dist.Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.1" in meta)
self.assert_("provides:" not in meta.lower())
self.assert_("Requires: other" in meta)
self.assert_("Requires: another (==1.0)" in meta)
self.assert_("obsoletes:" not in meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError,
distutils.dist.Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = distutils.dist.Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.1" in meta)
self.assert_("provides:" not in meta.lower())
self.assert_("requires:" not in meta.lower())
self.assert_("Obsoletes: other" in meta)
self.assert_("Obsoletes: another (<1.0)" in meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError,
distutils.dist.Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def format_metadata(self, dist):
sio = StringIO.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_custom_pydistutils(self):
# fixes #2166
# make sure pydistutils.cfg is found
old = {}
for env in ('HOME', 'HOMEPATH', 'HOMEDRIVE'):
value = os.environ.get(env)
old[env] = value
if value is not None:
del os.environ[env]
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
curdir = os.path.dirname(__file__)
user_filename = os.path.join(curdir, user_filename)
f = open(user_filename, 'w')
f.write('.')
f.close()
try:
dist = distutils.dist.Distribution()
# linux-style
if sys.platform in ('linux', 'darwin'):
os.environ['HOME'] = curdir
files = dist.find_config_files()
self.assert_(user_filename in files)
# win32-style
if sys.platform == 'win32':
# home drive should be found
os.environ['HOME'] = curdir
files = dist.find_config_files()
self.assert_(user_filename in files,
'%r not found in %r' % (user_filename, files))
finally:
for key, value in old.items():
if value is None:
continue
os.environ[key] = value
os.remove(user_filename)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| apache-2.0 |
ojengwa/django-1 | tests/utils_tests/test_autoreload.py | 23 | 3566 | import os
import tempfile
from importlib import import_module
from django import conf
from django.contrib import admin
from django.test import TestCase, override_settings
from django.test.utils import extend_sys_path
from django.utils._os import npath, upath
from django.utils.autoreload import gen_filenames
LOCALE_PATH = os.path.join(os.path.dirname(__file__), 'locale')
class TestFilenameGenerator(TestCase):
def setUp(self):
# Empty cached variables
from django.utils import autoreload
autoreload._cached_modules = set()
autoreload._cached_filenames = []
def test_django_locales(self):
"""
Test that gen_filenames() also yields the built-in django locale files.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(os.path.dirname(conf.__file__), 'locale',
'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(LOCALE_PATHS=[LOCALE_PATH])
def test_locale_paths_setting(self):
"""
Test that gen_filenames also yields from LOCALE_PATHS locales.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(INSTALLED_APPS=[])
def test_project_root_locale(self):
"""
Test that gen_filenames also yields from the current directory (project
root).
"""
old_cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
try:
filenames = list(gen_filenames())
self.assertIn(
os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
finally:
os.chdir(old_cwd)
@override_settings(INSTALLED_APPS=['django.contrib.admin'])
def test_app_locales(self):
"""
Test that gen_filenames also yields from locale dirs in installed apps.
"""
filenames = list(gen_filenames())
self.assertIn(
os.path.join(os.path.dirname(upath(admin.__file__)), 'locale', 'nl', 'LC_MESSAGES', 'django.mo'),
filenames
)
@override_settings(USE_I18N=False)
def test_no_i18n(self):
"""
If i18n machinery is disabled, there is no need for watching the
locale files.
"""
filenames = list(gen_filenames())
self.assertNotIn(
os.path.join(os.path.dirname(upath(conf.__file__)), 'locale', 'nl', 'LC_MESSAGES', 'django.mo'),
filenames
)
def test_only_new_files(self):
"""
When calling a second time gen_filenames with only_new = True, only
files from newly loaded modules should be given.
"""
list(gen_filenames())
from fractions import Fraction # NOQA
filenames2 = list(gen_filenames(only_new=True))
self.assertEqual(len(filenames2), 1)
self.assertTrue(filenames2[0].endswith('fractions.py'))
self.assertFalse(any(f.endswith('.pyc') for f in gen_filenames()))
def test_deleted_removed(self):
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_deleted_removed_module.py')
with open(filename, 'w'):
pass
with extend_sys_path(dirname):
import_module('test_deleted_removed_module')
self.assertIn(npath(filename), gen_filenames())
os.unlink(filename)
self.assertNotIn(filename, gen_filenames())
| bsd-3-clause |
SkyPicker/Skywall | skywall/core/frontend.py | 1 | 3011 | import os
import html
import json
import subprocess
import aiohttp.web
from skywall.core.config import config
from skywall.core.constants import API_ROUTE, BUILD_ROUTE
from skywall.core.modules import import_enabled_modules
def get_frontend(request):
devel = config.get('devel')
host = config.get('webpack.host')
port = config.get('webpack.port')
data = dict(
devel=devel,
api=API_ROUTE,
)
if devel:
style = ''
script = 'http://{host}:{port}{build}/app.js'.format(host=host, port=port, build=BUILD_ROUTE)
else:
style = '<link href="{build}/app.css" rel="stylesheet" />'.format(build=html.escape(BUILD_ROUTE))
script = '{build}/app.js'.format(build=BUILD_ROUTE)
content = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta content="IE=edge" http-equiv="X-UA-Compatible">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Skywall</title>
{style}
</head>
<body data="{data}">
<div id="app"></div>
<script>
(function() {{
var body = document.getElementsByTagName('body')[0];
var app = document.createElement('script');
app.type = 'text/javascript';
app.async = true;
app.src = {script};
body.appendChild(app);
}})()
</script>
</body>
</html>
""".format(
style=style,
script=html.escape(json.dumps(script), quote=False),
data=html.escape(json.dumps(data)),
)
return aiohttp.web.Response(text=content, content_type='text/html')
def install_frontend():
paths = []
if os.path.isfile(os.path.join(os.path.dirname(__file__), '../javascript/package.json')):
paths.append(os.path.join(os.path.dirname(__file__), '../javascript'))
for module in import_enabled_modules():
path = os.path.dirname(module.__file__)
if os.path.isfile(os.path.join(path, 'javascript/package.json')):
paths.append(os.path.join(path, 'javascript'))
if paths:
subprocess.run(['npm', 'install'] + paths)
def build_frontend():
host = config.get('webpack.host')
port = config.get('webpack.port')
modules = ','.join(['skywall'] + [module.__name__ for module in import_enabled_modules()])
env = dict(os.environ, WEBPACK_HOST=host, WEBPACK_PORT=str(port), FRONTEND_ENTRIES=modules)
subprocess.run(['node', '-e', 'require("skywall/webpack/build")'], env=env)
def run_webpack():
host = config.get('webpack.host')
port = config.get('webpack.port')
modules = ','.join(['skywall'] + [module.__name__ for module in import_enabled_modules()])
env = dict(os.environ, WEBPACK_HOST=host, WEBPACK_PORT=str(port), FRONTEND_ENTRIES=modules)
return subprocess.Popen(['node', '-e', 'require("skywall/webpack/server")'], env=env)
| gpl-3.0 |
suizokukan/anceps | anceps/hypotheses/hypothesis.py | 1 | 2479 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
################################################################################
# Anceps Copyright (C) 2014 Suizokukan
# Contact: suizokukan _A.T._ orange dot fr
#
# This file is part of Anceps.
# Anceps is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Anceps is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Anceps. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
❏Anceps❏ : anceps/anceps/hypotheses/hypothesis.py
o Hypothesis class : base class
"""
import treelib
################################################################################
class Hypothesis(treelib.Node):
"""
Hypothesis class : mother class of H0, H1, ...
"""
#///////////////////////////////////////////////////////////////////////////
def __init__(self,
tag,
identifier,
add_a_new_hypothesis,
parent):
"""
Hypothesis.__init__
PARAMETERS :
o tag : (str) somehow equivalent to "identifier"
but used to display informations.
-> debug oriented.
o identifier : (str) key in the tree, must be unique.
o add_a_new_hypothesis : (func) see the Scansion class
o parent : (str) parent's identifier(=key) in the tree
"""
treelib.Node.__init__(self,
tag,
identifier)
# setting the parent :
self._bpointer = parent
# see Scansion class
self.add_a_new_hypothesis = add_a_new_hypothesis
self.note = 100 # percentage
# (bool) True if the hypothesis may produce new hypotheses (=add soons)
self.active = True
| gpl-3.0 |
leppa/home-assistant | homeassistant/components/influxdb/sensor.py | 1 | 6880 | """InfluxDB component which allows you to get data from an Influx database."""
from datetime import timedelta
import logging
from influxdb import InfluxDBClient, exceptions
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
STATE_UNKNOWN,
)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from . import CONF_DB_NAME
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8086
DEFAULT_DATABASE = "home_assistant"
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_GROUP_FUNCTION = "mean"
DEFAULT_FIELD = "value"
CONF_QUERIES = "queries"
CONF_GROUP_FUNCTION = "group_function"
CONF_FIELD = "field"
CONF_MEASUREMENT_NAME = "measurement"
CONF_WHERE = "where"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_MEASUREMENT_NAME): cv.string,
vol.Required(CONF_WHERE): cv.template,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_GROUP_FUNCTION, default=DEFAULT_GROUP_FUNCTION): cv.string,
vol.Optional(CONF_FIELD, default=DEFAULT_FIELD): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_QUERIES): [_QUERY_SCHEME],
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the InfluxDB component."""
influx_conf = {
"host": config[CONF_HOST],
"password": config.get(CONF_PASSWORD),
"port": config.get(CONF_PORT),
"ssl": config.get(CONF_SSL),
"username": config.get(CONF_USERNAME),
"verify_ssl": config.get(CONF_VERIFY_SSL),
}
dev = []
for query in config.get(CONF_QUERIES):
sensor = InfluxSensor(hass, influx_conf, query)
if sensor.connected:
dev.append(sensor)
add_entities(dev, True)
class InfluxSensor(Entity):
"""Implementation of a Influxdb sensor."""
def __init__(self, hass, influx_conf, query):
"""Initialize the sensor."""
self._name = query.get(CONF_NAME)
self._unit_of_measurement = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
self._value_template = value_template
self._value_template.hass = hass
else:
self._value_template = None
database = query.get(CONF_DB_NAME)
self._state = None
self._hass = hass
where_clause = query.get(CONF_WHERE)
where_clause.hass = hass
influx = InfluxDBClient(
host=influx_conf["host"],
port=influx_conf["port"],
username=influx_conf["username"],
password=influx_conf["password"],
database=database,
ssl=influx_conf["ssl"],
verify_ssl=influx_conf["verify_ssl"],
)
try:
influx.query("SHOW SERIES LIMIT 1;")
self.connected = True
self.data = InfluxSensorData(
influx,
query.get(CONF_GROUP_FUNCTION),
query.get(CONF_FIELD),
query.get(CONF_MEASUREMENT_NAME),
where_clause,
)
except exceptions.InfluxDBClientError as exc:
_LOGGER.error(
"Database host is not accessible due to '%s', please"
" check your entries in the configuration file and"
" that the database exists and is READ/WRITE",
exc,
)
self.connected = False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return True
def update(self):
"""Get the latest data from Influxdb and updates the states."""
self.data.update()
value = self.data.value
if value is None:
value = STATE_UNKNOWN
if self._value_template is not None:
value = self._value_template.render_with_possible_json_value(
str(value), STATE_UNKNOWN
)
self._state = value
class InfluxSensorData:
"""Class for handling the data retrieval."""
def __init__(self, influx, group, field, measurement, where):
"""Initialize the data object."""
self.influx = influx
self.group = group
self.field = field
self.measurement = measurement
self.where = where
self.value = None
self.query = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data with a shell command."""
_LOGGER.info("Rendering where: %s", self.where)
try:
where_clause = self.where.render()
except TemplateError as ex:
_LOGGER.error("Could not render where clause template: %s", ex)
return
self.query = "select {}({}) as value from {} where {}".format(
self.group, self.field, self.measurement, where_clause
)
_LOGGER.info("Running query: %s", self.query)
points = list(self.influx.query(self.query).get_points())
if not points:
_LOGGER.warning(
"Query returned no points, sensor state set " "to UNKNOWN: %s",
self.query,
)
self.value = None
else:
if len(points) > 1:
_LOGGER.warning(
"Query returned multiple points, only first " "one shown: %s",
self.query,
)
self.value = points[0].get("value")
| apache-2.0 |
mrshu/scikit-learn | sklearn/covariance/__init__.py | 10 | 1197 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope, EllipticEnvelop
__all__ = ['EllipticEnvelop',
'EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
BlackstoneEngineering/yotta | yotta/version.py | 3 | 2082 | # Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import argparse
import logging
import os
# version, , represent versions and specifications, internal
from .lib import version
# Component, , represents an installed component, internal
from .lib import component
# Target, , represents an installed target, internal
from .lib import target
# vcs, , represent version controlled directories, internal
from .lib import vcs
def addOptions(parser):
def patchType(s):
if s.lower() in ('major', 'minor', 'patch'):
return s.lower()
try:
return version.Version(s)
except:
raise argparse.ArgumentTypeError(
'"%s" is not a valid version (expected patch, major, minor, or something like 1.2.3)' % s
)
parser.add_argument('action', type=patchType, nargs='?', help='[patch | minor | major | <version>]')
def execCommand(args, following_args):
wd = os.getcwd()
c = component.Component(wd)
# skip testing for target if we already found a component
t = None if c else target.Target(wd)
if not (c or t):
logging.debug(str(c.getError()))
logging.debug(str(t.getError()))
logging.error('The current directory does not contain a valid module or target.')
return 1
else:
# only needed separate objects in order to display errors
p = (c or t)
if args.action:
try:
if not p.vcsIsClean():
logging.error('The working directory is not clean')
return 1
v = p.getVersion()
if args.action in ('major', 'minor', 'patch'):
v.bump(args.action)
else:
v = args.action
logging.info('@%s' % v)
p.setVersion(v)
p.writeDescription()
p.commitVCS(tag='v'+str(v))
except vcs.VCSError as e:
logging.error(e)
else:
logging.info(str(p.getVersion()))
| apache-2.0 |
eahneahn/free | lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/storage.py | 211 | 2794 | from datetime import datetime
from django.http import HttpRequest
from django.conf import settings
from django.utils.importlib import import_module
from django.contrib.auth.models import User
def get_request():
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class TestStorage(object):
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
def test_current_step(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
my_step = 2
self.assertEqual(storage.current_step, None)
storage.current_step = my_step
self.assertEqual(storage.current_step, my_step)
storage.reset()
self.assertEqual(storage.current_step, None)
storage.current_step = my_step
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.current_step, None)
def test_step_data(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
step1 = 'start'
step_data1 = {'field1': 'data1',
'field2': 'data2',
'field3': datetime.now(),
'field4': self.testuser}
self.assertEqual(storage.get_step_data(step1), None)
storage.set_step_data(step1, step_data1)
self.assertEqual(storage.get_step_data(step1), step_data1)
storage.reset()
self.assertEqual(storage.get_step_data(step1), None)
storage.set_step_data(step1, step_data1)
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.get_step_data(step1), None)
def test_extra_context(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
extra_context = {'key1': 'data1',
'key2': 'data2',
'key3': datetime.now(),
'key4': self.testuser}
self.assertEqual(storage.extra_data, {})
storage.extra_data = extra_context
self.assertEqual(storage.extra_data, extra_context)
storage.reset()
self.assertEqual(storage.extra_data, {})
storage.extra_data = extra_context
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.extra_data, {})
def test_extra_context_key_persistence(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
self.assertFalse('test' in storage.extra_data)
storage.extra_data['test'] = True
self.assertTrue('test' in storage.extra_data)
| agpl-3.0 |
acsone/odoo | openerp/service/websrv_lib.py | 380 | 7780 | # -*- coding: utf-8 -*-
#
# Copyright P. Christeas <p_christ@hol.gr> 2008-2010
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
###############################################################################
""" Framework for generic http servers
This library contains *no* OpenERP-specific functionality. It should be
usable in other projects, too.
"""
import logging
import SocketServer
from BaseHTTPServer import *
from SimpleHTTPServer import SimpleHTTPRequestHandler
_logger = logging.getLogger(__name__)
class AuthRequiredExc(Exception):
def __init__(self,atype,realm):
Exception.__init__(self)
self.atype = atype
self.realm = realm
class AuthRejectedExc(Exception):
pass
class AuthProvider:
def __init__(self,realm):
self.realm = realm
def authenticate(self, user, passwd, client_address):
return False
def log(self, msg):
print msg
def checkRequest(self,handler,path = '/'):
""" Check if we are allowed to process that request
"""
pass
class HTTPHandler(SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
SimpleHTTPRequestHandler.__init__(self,request,client_address,server)
# print "Handler for %s inited" % str(client_address)
self.protocol_version = 'HTTP/1.1'
self.connection = dummyconn()
def handle(self):
""" Classes here should NOT handle inside their constructor
"""
pass
def finish(self):
pass
def setup(self):
pass
# A list of HTTPDir.
handlers = []
class HTTPDir:
""" A dispatcher class, like a virtual folder in httpd
"""
def __init__(self, path, handler, auth_provider=None, secure_only=False):
self.path = path
self.handler = handler
self.auth_provider = auth_provider
self.secure_only = secure_only
def matches(self, request):
""" Test if some request matches us. If so, return
the matched path. """
if request.startswith(self.path):
return self.path
return False
def instanciate_handler(self, request, client_address, server):
handler = self.handler(noconnection(request), client_address, server)
if self.auth_provider:
handler.auth_provider = self.auth_provider()
return handler
def reg_http_service(path, handler, auth_provider=None, secure_only=False):
""" Register a HTTP handler at a given path.
The auth_provider will be instanciated and set on the handler instances.
"""
global handlers
service = HTTPDir(path, handler, auth_provider, secure_only)
pos = len(handlers)
lastpos = pos
while pos > 0:
pos -= 1
if handlers[pos].matches(service.path):
lastpos = pos
# we won't break here, but search all way to the top, to
# ensure there is no lesser entry that will shadow the one
# we are inserting.
handlers.insert(lastpos, service)
def list_http_services(protocol=None):
global handlers
ret = []
for svc in handlers:
if protocol is None or protocol == 'http' or svc.secure_only:
ret.append((svc.path, str(svc.handler)))
return ret
def find_http_service(path, secure=False):
global handlers
for vdir in handlers:
p = vdir.matches(path)
if p == False or (vdir.secure_only and not secure):
continue
return vdir
return None
class noconnection(object):
""" a class to use instead of the real connection
"""
def __init__(self, realsocket=None):
self.__hidden_socket = realsocket
def makefile(self, mode, bufsize):
return None
def close(self):
pass
def getsockname(self):
""" We need to return info about the real socket that is used for the request
"""
if not self.__hidden_socket:
raise AttributeError("No-connection class cannot tell real socket")
return self.__hidden_socket.getsockname()
class dummyconn:
def shutdown(self, tru):
pass
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class FixSendError:
#error_message_format = """ """
def send_error(self, code, message=None):
#overriden from BaseHTTPRequestHandler, we also send the content-length
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
_logger.error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.send_header('Content-Length', len(content) or 0)
self.end_headers()
if hasattr(self, '_flush'):
self._flush()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
class HttpOptions:
_HTTP_OPTIONS = {'Allow': ['OPTIONS' ] }
def do_OPTIONS(self):
"""return the list of capabilities """
opts = self._HTTP_OPTIONS
nopts = self._prep_OPTIONS(opts)
if nopts:
opts = nopts
self.send_response(200)
self.send_header("Content-Length", 0)
if 'Microsoft' in self.headers.get('User-Agent', ''):
self.send_header('MS-Author-Via', 'DAV')
# Microsoft's webdav lib ass-umes that the server would
# be a FrontPage(tm) one, unless we send a non-standard
# header that we are not an elephant.
# http://www.ibm.com/developerworks/rational/library/2089.html
for key, value in opts.items():
if isinstance(value, basestring):
self.send_header(key, value)
elif isinstance(value, (tuple, list)):
self.send_header(key, ', '.join(value))
self.end_headers()
def _prep_OPTIONS(self, opts):
"""Prepare the OPTIONS response, if needed
Sometimes, like in special DAV folders, the OPTIONS may contain
extra keywords, perhaps also dependant on the request url.
:param opts: MUST be copied before being altered
:returns: the updated options.
"""
return opts
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
daniponi/django | django/contrib/admin/sites.py | 3 | 19794 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured(
'The model %s is abstract, so it cannot be registered with admin.' % model.__name__
)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
admin_obj = admin_class(model, self)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_obj.check())
self._registry[model] = admin_obj
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
For sites running on a subpath, use the SCRIPT_NAME value if site_url
hasn't been customized.
"""
script_name = request.META['SCRIPT_NAME']
site_url = script_name if self.site_url == '/' and script_name else self.site_url
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'extra_context': dict(
self.each_context(request),
# Since the user isn't logged out at this point, the value of
# has_permission must be overridden.
has_permission=False,
**(extra_context or {})
),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(
self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return login(request, **defaults)
def _build_app_dict(self, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
if label:
raise PermissionDenied
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or 'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(
self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
x303597316/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/update_permissions.py | 35 | 1239 | from django.core.management.base import BaseCommand
from django.contrib.auth.management import create_permissions as _create_permissions
from django_extensions.management.utils import signalcommand
try:
from django.apps import apps as django_apps
get_models = lambda: None
get_app = django_apps.get_app_config
get_all_apps = django_apps.get_app_configs
def create_permissions(app, models, verbosity):
_create_permissions(app, verbosity)
except ImportError:
from django.db.models import get_models, get_app
django_apps = None
def get_all_apps():
apps = set()
for model in get_models():
apps.add(get_app(model._meta.app_label))
return apps
create_permissions = _create_permissions
class Command(BaseCommand):
args = '<app app ...>'
help = 'reloads permissions for specified apps, or all apps if no args are specified'
@signalcommand
def handle(self, *args, **options):
apps = set()
if not args:
apps = get_all_apps()
else:
for arg in args:
apps.add(get_app(arg))
for app in apps:
create_permissions(app, get_models(), int(options.get('verbosity', 3)))
| apache-2.0 |
ThorbenJensen/wifi-locator | src/utils_classification.py | 1 | 2437 | """Module provides classification of signals and evaluates models."""
from random import randrange
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# classification models
classifiers = {'K-Nearest Neighbors (Braycurtis norm)':
KNeighborsClassifier(n_neighbors=3, algorithm='auto',
metric='braycurtis'),
'Random Forest':
RandomForestClassifier(n_estimators=80, n_jobs=1),
'SVM': SVC(gamma=2, C=1),
'Linear Support Vector Machine': SVC(kernel="linear", C=0.025),
'Decision Tree': DecisionTreeClassifier(max_depth=5),
'Ada Boost': AdaBoostClassifier(n_estimators=80,
learning_rate=0.4),
'Naive Bayes': GaussianNB(),
}
vc = VotingClassifier(estimators=list(classifiers.items()), voting='hard')
def evaluate_model(model_name, model, x, y):
"""Evaluate model accuracy via cross validation."""
print('%s:' % model_name)
model.fit(x, y.values.ravel())
print('CV f1_micro (not reusing data): %s' % np.mean(cross_val_score(model,
x, y.values.ravel(), cv=5, scoring='f1_micro')))
def predict(x, y, signal_matrix, verbose=1):
"""Predict current location, based on hard voting among classifiers."""
# TODO: classify based on *balanced* sample (repeated sampling strategy)
# report for models within VotingClassifier
for key in classifiers.keys():
model = classifiers[key]
model.fit(x, y.values.ravel())
location = model.predict(signal_matrix)[0]
if verbose > 0:
print('Model "%s": %s' % (key, location))
# report for VotingClassifier
vc.fit(x, y.values.ravel())
vc_locations = vc.predict(signal_matrix)
# in case VotingClassifier returns more than one result: draw random
rand_index = randrange(0, len(vc_locations))
vc_location = vc_locations[rand_index]
if verbose > 0:
print('VotingClassifier result: %s' % vc_location)
return vc_location
| apache-2.0 |
dhalleine/tensorflow | tensorflow/python/platform/logging_test.py | 211 | 1133 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
class EventLoaderTest(googletest.TestCase):
def test_log(self):
# Just check that logging works without raising an exception.
logging.error("test log message")
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
GoogleCloudPlatform/PerfKitBenchmarker | tests/windows_packages/psping_test.py | 1 | 3148 | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for psping_benchmark."""
import collections
import json
import unittest
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.windows_packages import psping
psping_results = """
PsPing v2.10 - PsPing - ping, latency, bandwidth measurement utility
Copyright (C) 2012-2016 Mark Russinovich
Sysinternals - www.sysinternals.com
TCP latency test connecting to 10.138.0.2:47001: Connected
15 iterations (warmup 5) sending 8192 bytes TCP latency test: 0%
Connected
15 iterations (warmup 5) sending 8192 bytes TCP latency test: 100%
TCP roundtrip latency statistics (post warmup):
Sent = 10, Size = 8192, Total Bytes: 81920,
Minimum = 0.19ms, Maxiumum = 0.58ms, Average = 0.27ms
Latency Count
0.30\t688
0.51\t292
0.71\t15
0.92\t2
1.13\t0
"""
class PspingBenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def testPspingParsing(self):
minimum = 0.19
maximum = 0.58
average = 0.27
use_internal_ip = True
machine = collections.namedtuple('machine', 'zone machine_type')
client = machine(machine_type='cA', zone='cZ')
server = machine(machine_type='sB', zone='sZ')
samples = psping.ParsePspingResults(psping_results, client, server,
use_internal_ip)
expected_metadata = {
'internal_ip_used': use_internal_ip,
'sending_zone': client.zone,
'sending_machine_type': client.machine_type,
'receiving_zone': server.zone,
'receiving_machine_type': server.machine_type,
}
histogram = json.dumps([
{'latency': 0.3, 'count': 688, 'bucket_number': 1},
{'latency': 0.51, 'count': 292, 'bucket_number': 2},
{'latency': 0.71, 'count': 15, 'bucket_number': 3},
{'latency': 0.92, 'count': 2, 'bucket_number': 4},
{'latency': 1.13, 'count': 0, 'bucket_number': 5},
])
expected_samples = [
sample.Sample('latency', average, 'ms', expected_metadata),
sample.Sample('latency:maximum', maximum, 'ms', expected_metadata),
sample.Sample('latency:minimum', minimum, 'ms', expected_metadata),
]
expected_histogram_metadata = expected_metadata.copy()
expected_histogram_metadata['histogram'] = histogram
expected_samples.append(sample.Sample('latency:histogram', 0, 'ms',
expected_histogram_metadata))
self.assertSampleListsEqualUpToTimestamp(expected_samples, samples)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.