code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import csv
import itertools
import os.path
import re
import numpy as np
import pandas as pd
from qiime2.core.util import find_duplicates
from .base import SUPPORTED_COLUMN_TYPES, FORMATTED_ID_HEADERS, is_id_header
from .metadata import Metadata, MetadataColumn
class MetadataFileError(Exception):
_suffix = (
"There may be more errors present in the metadata file. To get a full "
"report, sample/feature metadata files can be validated with Keemei: "
"https://keemei.qiime2.org\n\nFind details on QIIME 2 metadata "
"requirements here: https://docs.qiime2.org/%s/tutorials/metadata/")
def __init__(self, message, include_suffix=True):
# Lazy import because `qiime2.__release__` is available at runtime but
# not at import time (otherwise the release value could be interpolated
# into `_suffix` in the class definition above).
import qiime2
if include_suffix:
message = message + '\n\n' + self._suffix % qiime2.__release__
super().__init__(message)
class MetadataReader:
def __init__(self, filepath):
if not os.path.isfile(filepath):
raise MetadataFileError(
"Metadata file path doesn't exist, or the path points to "
"something other than a file. Please check that the path "
"exists, has read permissions, and points to a regular file "
"(not a directory): %s" % filepath)
self._filepath = filepath
# Used by `read()` to store an iterator yielding rows with
# leading/trailing whitespace stripped from their cells (this is a
# preprocessing step that should happen with *every* row). The iterator
# protocol is the only guaranteed API on this object.
self._reader = None
def read(self, into, column_types=None):
if column_types is None:
column_types = {}
try:
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Ignore BOM on read (but do not write BOM)
with open(self._filepath,
'r', newline='', encoding='utf-8-sig') as fh:
tsv_reader = csv.reader(fh, dialect='excel-tab', strict=True)
self._reader = (self._strip_cell_whitespace(row)
for row in tsv_reader)
header = self._read_header()
directives = self._read_directives(header)
ids, data = self._read_data(header)
except UnicodeDecodeError as e:
if ('0xff in position 0' in str(e)
or '0xfe in position 0' in str(e)):
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII, found "
"UTF-16. If this file is from Microsoft Excel, save "
"as a plain text file, not 'UTF-16 Unicode'")
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII. The "
"following error occurred when decoding the file:\n\n%s" % e)
finally:
self._reader = None
index = pd.Index(ids, name=header[0], dtype=object)
df = pd.DataFrame(data, columns=header[1:], index=index, dtype=object)
for name, type in column_types.items():
if name not in df.columns:
raise MetadataFileError(
"Column name %r specified in `column_types` is not a "
"column in the metadata file." % name)
if type not in SUPPORTED_COLUMN_TYPES:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column name %r specified in `column_types` has an "
"unrecognized column type %r. Supported column types: %s" %
(name, type, fmt_column_types))
resolved_column_types = directives.get('types', {})
resolved_column_types.update(column_types)
try:
# Cast each column to the appropriate dtype based on column type.
df = df.apply(self._cast_column, axis='index',
column_types=resolved_column_types)
except MetadataFileError as e:
# HACK: If an exception is raised within `DataFrame.apply`, pandas
# adds an extra tuple element to `e.args`, making the original
# error message difficult to read because a tuple is repr'd instead
# of a string. To work around this, we catch and reraise a
# MetadataFileError with the original error message. We use
# `include_suffix=False` to avoid adding another suffix to the
# error message we're reraising.
msg = e.args[0]
raise MetadataFileError(msg, include_suffix=False)
try:
return into(df)
except Exception as e:
raise MetadataFileError(
"There was an issue with loading the metadata file:\n\n%s" % e)
def _read_header(self):
header = None
for row in self._reader:
if self._is_header(row):
header = row
break
elif self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r while searching for header. "
"Directives may only appear immediately after the header."
% row[0])
else:
raise MetadataFileError(
"Found unrecognized ID column name %r while searching for "
"header. The first column name in the header defines the "
"ID column, and must be one of these values:\n\n%s\n\n"
"NOTE: Metadata files must contain tab-separated values." %
(row[0], FORMATTED_ID_HEADERS))
if header is None:
raise MetadataFileError(
"Failed to locate header. The metadata file may be empty, or "
"consists only of comments or empty rows.")
# Trim trailing empty cells from header.
data_extent = None
for idx, cell in enumerate(header):
if cell != '':
data_extent = idx
header = header[:data_extent+1]
# Basic validation to 1) fail early before processing entire file; and
# 2) make some basic guarantees about the header for things in this
# class that use the header as part of reading the file.
column_names = set(header)
if '' in column_names:
raise MetadataFileError(
"Found at least one column without a name in the header. Each "
"column must be named.")
elif len(header) != len(column_names):
duplicates = find_duplicates(header)
raise MetadataFileError(
"Column names must be unique. The following column names are "
"duplicated: %s" %
(', '.join(repr(e) for e in sorted(duplicates))))
# Skip the first element of the header because we know it is a valid ID
# header. The other column names are validated to ensure they *aren't*
# valid ID headers.
for column_name in header[1:]:
if is_id_header(column_name):
raise MetadataFileError(
"Metadata column name %r conflicts with a name reserved "
"for the ID column header. Reserved ID column headers:"
"\n\n%s" % (column_name, FORMATTED_ID_HEADERS))
return header
def _read_directives(self, header):
directives = {}
for row in self._reader:
if not self._is_directive(row):
self._reader = itertools.chain([row], self._reader)
break
if not self._is_column_types_directive(row):
raise MetadataFileError(
"Unrecognized directive %r. Only the #q2:types "
"directive is supported at this time." % row[0])
if 'types' in directives:
raise MetadataFileError(
"Found duplicate directive %r. Each directive may "
"only be specified a single time." % row[0])
row = self._match_header_len(row, header)
column_types = {}
for column_name, column_type in zip(header[1:], row[1:]):
if column_type:
type_nocase = column_type.lower()
if type_nocase in SUPPORTED_COLUMN_TYPES:
column_types[column_name] = type_nocase
else:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column %r has an unrecognized column type %r "
"specified in its #q2:types directive. "
"Supported column types (case-insensitive): %s"
% (column_name, column_type, fmt_column_types))
directives['types'] = column_types
return directives
def _read_data(self, header):
ids = []
data = []
for row in self._reader:
if self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r outside of the directives section of "
"the file. Directives may only appear immediately after "
"the header." % row[0])
elif self._is_header(row):
raise MetadataFileError(
"Metadata ID %r conflicts with a name reserved for the ID "
"column header. Reserved ID column headers:\n\n%s" %
(row[0], FORMATTED_ID_HEADERS))
row = self._match_header_len(row, header)
ids.append(row[0])
data.append(row[1:])
return ids, data
def _strip_cell_whitespace(self, row):
return [cell.strip() for cell in row]
def _match_header_len(self, row, header):
row_len = len(row)
header_len = len(header)
if row_len < header_len:
# Pad row with empty cells to match header length.
row = row + [''] * (header_len - row_len)
elif row_len > header_len:
trailing_row = row[header_len:]
if not self._is_empty(trailing_row):
raise MetadataFileError(
"Metadata row contains more cells than are declared by "
"the header. The row has %d cells, while the header "
"declares %d cells." % (row_len, header_len))
row = row[:header_len]
return row
def _is_empty(self, row):
# `all` returns True for an empty iterable, so this check works for a
# row of zero elements (corresponds to a blank line in the file).
return all((cell == '' for cell in row))
def _is_comment(self, row):
return (
len(row) > 0 and
row[0].startswith('#') and
not self._is_directive(row) and
not self._is_header(row)
)
def _is_header(self, row):
if len(row) == 0:
return False
return is_id_header(row[0])
def _is_directive(self, row):
return len(row) > 0 and row[0].startswith('#q2:')
def _is_column_types_directive(self, row):
return len(row) > 0 and row[0] == '#q2:types'
def _cast_column(self, series, column_types):
if series.name in column_types:
if column_types[series.name] == 'numeric':
return self._to_numeric(series)
else: # 'categorical'
return self._to_categorical(series)
else:
# Infer type
try:
return self._to_numeric(series)
except MetadataFileError:
return self._to_categorical(series)
def _to_categorical(self, series):
# Replace empty strings with `None` to force the series to remain
# dtype=object (this only matters if the series consists solely of
# missing data). Replacing with np.nan and casting to dtype=object
# won't retain the correct dtype in the resulting dataframe
# (`DataFrame.apply` seems to force series consisting solely of np.nan
# to dtype=float64, even if dtype=object is specified.
#
# To replace a value with `None`, the following invocation of
# `Series.replace` must be used because `None` is a sentinel:
# https://stackoverflow.com/a/17097397/3776794
return series.replace([''], [None])
def _to_numeric(self, series):
series = series.replace('', np.nan)
is_numeric = series.apply(self._is_numeric)
if is_numeric.all():
return pd.to_numeric(series, errors='raise')
else:
non_numerics = series[~is_numeric].unique()
raise MetadataFileError(
"Cannot convert metadata column %r to numeric. The following "
"values could not be interpreted as numeric: %s" %
(series.name,
', '.join(repr(e) for e in sorted(non_numerics))))
def _is_numeric(self, value):
return (isinstance(value, float) or
len(_numeric_regex.findall(value)) == 1)
class MetadataWriter:
def __init__(self, metadata):
self._metadata = metadata
def write(self, filepath):
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Do NOT write a BOM, hence utf-8 not utf-8-sig
with open(filepath, 'w', newline='', encoding='utf-8') as fh:
tsv_writer = csv.writer(fh, dialect='excel-tab', strict=True)
md = self._metadata
header = [md.id_header]
types_directive = ['#q2:types']
if isinstance(md, Metadata):
for name, props in md.columns.items():
header.append(name)
types_directive.append(props.type)
elif isinstance(md, MetadataColumn):
header.append(md.name)
types_directive.append(md.type)
else:
raise NotImplementedError
tsv_writer.writerow(header)
tsv_writer.writerow(types_directive)
df = md.to_dataframe()
df.fillna('', inplace=True)
df = df.applymap(self._format)
tsv_writer.writerows(df.itertuples(index=True))
def _format(self, value):
if isinstance(value, str):
return value
elif isinstance(value, float):
# Use fixed precision or scientific notation as necessary (both are
# roundtrippable in the metadata file format), with up to 15 digits
# *total* precision (i.e. before and after the decimal point),
# rounding if necessary. Trailing zeros or decimal points will not
# be included in the formatted string (e.g. 42.0 will be formatted
# as "42"). A precision of 15 digits is used because that is within
# the 64-bit floating point spec (things get weird after that).
#
# Using repr() and str() each have their own predefined precision
# which varies across Python versions. Using the string formatting
# presentation types (e.g. %g, %f) without specifying a precision
# will usually default to 6 digits past the decimal point, which
# seems a little low.
#
# References:
#
# - https://stackoverflow.com/a/2440786/3776794
# - https://stackoverflow.com/a/2440708/3776794
# - https://docs.python.org/3/library/string.html#
# format-specification-mini-language
# - https://stackoverflow.com/a/20586479/3776794
# - https://drj11.wordpress.com/2007/07/03/python-poor-printing-
# of-floating-point/
return '{0:.15g}'.format(value)
else:
raise NotImplementedError
# Credit: https://stackoverflow.com/a/4703508/3776794
_numeric_pattern = r"""
^[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?$
"""
_numeric_regex = re.compile(_numeric_pattern, re.VERBOSE)
|
qiime2/qiime2
|
qiime2/metadata/io.py
|
Python
|
bsd-3-clause
| 17,510
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "metpy-"
cfg.versionfile_source = "metpy/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
jrleeman/MetPy
|
metpy/_version.py
|
Python
|
bsd-3-clause
| 18,449
|
from __future__ import division
from . import opnames
def parseInstructions(bytestream, isConstructor):
data = bytestream
assert(data.off == 0)
instructions = {}
while data.size() > 0:
address = data.off
inst = getNextInstruction(data, address)
#replace constructor invocations with synthetic op invokeinit to simplfy things later
if inst[0] == opnames.INVOKESPECIAL and isConstructor(inst[1]):
inst = (opnames.INVOKEINIT,) + inst[1:]
instructions[address] = inst
assert(data.size() == 0)
return instructions
simpleOps = {0x00:opnames.NOP, 0x01:opnames.CONSTNULL, 0x94:opnames.LCMP,
0xbe:opnames.ARRLEN, 0xbf:opnames.THROW, 0xc2:opnames.MONENTER,
0xc3:opnames.MONEXIT, 0x57:opnames.POP, 0x58:opnames.POP2, 0x59:opnames.DUP,
0x5a:opnames.DUPX1, 0x5b:opnames.DUPX2, 0x5c:opnames.DUP2,
0x5d:opnames.DUP2X1, 0x5e:opnames.DUP2X2, 0x5f:opnames.SWAP}
singleIndexOps = {0xb2:opnames.GETSTATIC,0xb3:opnames.PUTSTATIC,0xb4:opnames.GETFIELD,
0xb5:opnames.PUTFIELD,0xb6:opnames.INVOKEVIRTUAL,0xb7:opnames.INVOKESPECIAL,
0xb8:opnames.INVOKESTATIC, 0xbb:opnames.NEW,0xbd:opnames.ANEWARRAY,
0xc0:opnames.CHECKCAST,0xc1:opnames.INSTANCEOF}
def getNextInstruction(data, address):
byte = data.get('>B')
#typecode - B,C,S, and Bool are only used for array types and sign extension
A,B,C,D,F,I,L,S = "ABCDFIJS"
Bool = "Z"
if byte in simpleOps:
inst = (simpleOps[byte],)
elif byte in singleIndexOps:
inst = (singleIndexOps[byte], data.get('>H'))
elif byte <= 0x11:
op = opnames.CONST
if byte <= 0x08:
t, val = I, byte - 0x03
elif byte <= 0x0a:
t, val = L, byte - 0x09
elif byte <= 0x0d:
t, val = F, float(byte - 0x0b)
elif byte <= 0x0f:
t, val = D, float(byte - 0x0e)
elif byte == 0x10:
t, val = I, data.get('>b')
else:
t, val = I, data.get('>h')
inst = op, t, val
elif byte == 0x12:
inst = opnames.LDC, data.get('>B'), 1
elif byte == 0x13:
inst = opnames.LDC, data.get('>H'), 1
elif byte == 0x14:
inst = opnames.LDC, data.get('>H'), 2
elif byte <= 0x2d:
op = opnames.LOAD
if byte <= 0x19:
t = [I,L,F,D,A][byte - 0x15]
val = data.get('>B')
else:
temp = byte - 0x1a
t = [I,L,F,D,A][temp // 4]
val = temp % 4
inst = op, t, val
elif byte <= 0x35:
op = opnames.ARRLOAD
t = [I,L,F,D,A,B,C,S][byte - 0x2e]
inst = (op, t) if t != A else (opnames.ARRLOAD_OBJ,) #split object case into seperate op name to simplify things later
elif byte <= 0x4e:
op = opnames.STORE
if byte <= 0x3a:
t = [I,L,F,D,A][byte - 0x36]
val = data.get('>B')
else:
temp = byte - 0x3b
t = [I,L,F,D,A][temp // 4]
val = temp % 4
inst = op, t, val
elif byte <= 0x56:
op = opnames.ARRSTORE
t = [I,L,F,D,A,B,C,S][byte - 0x4f]
inst = (op, t) if t != A else (opnames.ARRSTORE_OBJ,) #split object case into seperate op name to simplify things later
elif byte <= 0x77:
temp = byte - 0x60
opt = (opnames.ADD,opnames.SUB,opnames.MUL,opnames.DIV,opnames.REM,opnames.NEG)[temp//4]
t = (I,L,F,D)[temp % 4]
inst = opt, t
elif byte <= 0x83:
temp = byte - 0x78
opt = (opnames.SHL,opnames.SHR,opnames.USHR,opnames.AND,opnames.OR,opnames.XOR)[temp//2]
t = (I,L)[temp % 2]
inst = opt, t
elif byte == 0x84:
inst = opnames.IINC, data.get('>B'), data.get('>b')
elif byte <= 0x90:
op = opnames.CONVERT
pairs = ((I,L),(I,F),(I,D),(L,I),(L,F),(L,D),(F,I),(F,L),(F,D),
(D,I),(D,L),(D,F))
src_t, dest_t = pairs[byte - 0x85]
inst = op, src_t, dest_t
elif byte <= 0x93:
op = opnames.TRUNCATE
dest_t = [B,C,S][byte - 0x91]
inst = op, dest_t
elif byte <= 0x98:
op = opnames.FCMP
temp = byte - 0x95
t = (F,D)[temp//2]
NaN_val = (-1,1)[temp % 2]
inst = op, t, NaN_val
elif byte <= 0x9e:
op = opnames.IF_I
cmp_t = ('eq','ne','lt','ge','gt','le')[byte - 0x99]
jumptarget = data.get('>h') + address
inst = op, cmp_t, jumptarget
elif byte <= 0xa4:
op = opnames.IF_ICMP
cmp_t = ('eq','ne','lt','ge','gt','le')[byte - 0x9f]
jumptarget = data.get('>h') + address
inst = op, cmp_t, jumptarget
elif byte <= 0xa6:
op = opnames.IF_ACMP
cmp_t = ('eq','ne')[byte - 0xa5]
jumptarget = data.get('>h') + address
inst = op, cmp_t, jumptarget
elif byte == 0xa7:
inst = opnames.GOTO, data.get('>h') + address
elif byte == 0xa8:
inst = opnames.JSR, data.get('>h') + address
elif byte == 0xa9:
inst = opnames.RET, data.get('>B')
elif byte == 0xaa: #Table Switch
padding = (3-address) % 4
padding = data.getRaw(padding)
#OpenJDK requires padding to be 0
default = data.get('>i') + address
low = data.get('>i')
high = data.get('>i')
assert(high >= low)
numpairs = high - low + 1
offsets = [data.get('>i') + address for _ in range(numpairs)]
jumps = zip(range(low, high+1), offsets)
inst = opnames.SWITCH, default, jumps, padding
elif byte == 0xab: #Lookup Switch
padding = (3-address) % 4
padding = data.getRaw(padding)
#OpenJDK requires padding to be 0
default = data.get('>i') + address
numpairs = data.get('>i')
assert(numpairs >= 0)
pairs = [data.get('>ii') for _ in range(numpairs)]
keys = [k for k,v in pairs]
jumps = [(x,(y + address)) for x,y in pairs]
inst = opnames.SWITCH, default, jumps, padding
elif byte <= 0xb1:
op = opnames.RETURN
t = (I,L,F,D,A,None)[byte - 0xac]
inst = op, t
elif byte == 0xb9:
op = opnames.INVOKEINTERFACE
index = data.get('>H')
count, zero = data.get('>B'), data.get('>B')
inst = op, index, count, zero
elif byte == 0xba:
op = opnames.INVOKEDYNAMIC
index = data.get('>H')
zero = data.get('>H')
inst = op, index, zero
elif byte == 0xbc:
typecode = data.get('>b')
types = {4:Bool, 5:C, 6:F, 7:D, 8:B, 9:S, 10:I, 11:L}
t = types.get(typecode)
inst = opnames.NEWARRAY, t
elif byte == 0xc4: #wide
realbyte = data.get('>B')
if realbyte >= 0x15 and realbyte < 0x1a:
t = [I,L,F,D,A][realbyte - 0x15]
inst = opnames.LOAD, t, data.get('>H')
elif realbyte >= 0x36 and realbyte < 0x3b:
t = [I,L,F,D,A][realbyte - 0x36]
inst = opnames.STORE, t, data.get('>H')
elif realbyte == 0xa9:
inst = opnames.RET, data.get('>H')
elif realbyte == 0x84:
inst = opnames.IINC, data.get('>H'), data.get('>h')
else:
assert(0)
elif byte == 0xc5:
op = opnames.MULTINEWARRAY
index = data.get('>H')
dim = data.get('>B')
inst = op, index, dim
elif byte <= 0xc7:
op = opnames.IF_A
cmp_t = ('eq','ne')[byte - 0xc6]
jumptarget = data.get('>h') + address
inst = op, cmp_t, jumptarget
elif byte == 0xc8:
inst = opnames.GOTO, data.get('>i') + address
elif byte == 0xc9:
inst = opnames.JSR, data.get('>i') + address
else:
assert(0)
return inst
def printInstruction(instr):
if len(instr) == 1:
return instr[0]
elif len(instr) == 2:
return '{}({})'.format(*instr)
else:
return '{}{}'.format(instr[0], instr[1:])
|
alexkasko/krakatau-java
|
krakatau-lib/src/main/resources/Lib/Krakatau/bytecode.py
|
Python
|
gpl-3.0
| 7,976
|
#!/usr/bin/env python
# Copyright (c) 2007 Ruben Reifenberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
@summary:
Demonstrates the basic usage.
@author: Ruben Reifenberg
"""
from rrlog.server import filewriter
logServer = filewriter.createRotatingServer(
filePathPattern = "./demo-log-%s.txt", # "pattern" because %s (or %d) is required for the rotate-number
rotateCount=3,
rotateLineMin=10,
)
# Start the server as an XMLRPC server:
from rrlog.server import xmlrpc
xmlrpc.startServer(
logServer,
ports=(9804,9805,9806,), # try in this order, use the first port available
)
# The server waits for requests now.
|
shful/python-rrlog
|
doc/demo/demo_xmlrpcserverfiles.py
|
Python
|
mit
| 1,657
|
# -*- coding: utf-8 -*-
"""Utility functions to get information about the
git or mercurial repository in the working directory
"""
import subprocess
import os
def get_current_git_branch():
command = ["git", "symbolic-ref", "--short", "-q", "HEAD"]
try:
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = proc.communicate()
return out.strip().decode("utf-8")
except subprocess.CalledProcessError:
pass
return ""
# We'll avoid shelling out to hg for speed.
def find_hg_root():
def get_parent_dir(directory):
return os.path.abspath(os.path.join(directory, os.pardir))
cwd = os.getcwd()
while True:
pardir = get_parent_dir(cwd)
hgroot = os.path.join(cwd, ".hg")
if os.path.isdir(hgroot):
return hgroot
if cwd == pardir:
break
cwd = pardir
return ""
def get_current_hg_branch():
try:
hgroot = find_hg_root()
with open(os.path.join(hgroot, "branch")) as f:
branch = f.read().rstrip()
except IOError:
branch = ""
return branch
def get_current_hg_bookmark():
try:
hgroot = find_hg_root()
with open(os.path.join(hgroot, "bookmarks.current")) as f:
bookmark = f.read()
except IOError:
bookmark = ""
return bookmark
def get_current_hg_id():
branch = get_current_hg_branch()
bookmark = get_current_hg_bookmark()
if bookmark:
# If we have a bookmark, the default branch is no longer
# an interesting name.
if branch == "default":
branch = ""
branch += " " + bookmark
return branch
def get_current_vcs_branch():
return get_current_git_branch() + get_current_hg_id()
|
sloria/doitlive
|
doitlive/version_control.py
|
Python
|
mit
| 1,805
|
from pyedas.portal.edas import *
request_port = 5670
response_port = 5671
edas_server = "localhost"
portal = None
try:
portal = EDASPortal( edas_server, request_port, response_port)
rId = portal.sendMessage( "quit", [] )
except Exception, err:
traceback.print_exc()
finally:
if( portal ): portal.shutdown()
|
nasa-nccs-cds/EDAS
|
python/src/pyedas/shutdownServer.py
|
Python
|
gpl-2.0
| 332
|
import hashlib
from functools import wraps
from flask import request, Response, current_app
def check_auth(username, password):
pwd_hash = hashlib.md5(password.encode()).hexdigest()
return (
username == current_app.config['USERNAME'] and
pwd_hash == current_app.config['PASSWORD_HASH']
)
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
|
MrLeeh/flaskhab
|
app/auth.py
|
Python
|
mit
| 816
|
import typing
from falsy.netboy.curl_loop import CurlLoop
from falsy.netboy.fetch import net_boy
from falsy.netboy.run import run
import pycurl
class NetBoy:
class Exception(Exception):
pass
class Dict(typing.Dict[str, typing.Any]):
def __getattr__(self, name):
# type: (str) -> Any
try:
return self[name]
except KeyError:
# raise NetBoy.Exception('netboy key error: ' + name)
return None # '!netboy key [' + name + '] does not exist'
except Exception:
raise NetBoy.Exception('netboy exception: ' + name)
def __setattr__(self, name, value):
# type: (str, Any) -> None
self[name] = value
def __init__(self, payload=None, share=None):
self.payload = payload
if share:
s = pycurl.CurlShare()
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
self.share = s
else:
self.share = None
def run(self, payload=None, loop=None):
real_payload = payload
if self.payload is None:
real_payload = payload
elif payload is None:
real_payload = self.payload
else:
real_payload = self.payload + payload
ress = run(net_boy(real_payload, self.share), loop=loop)
obj_ress = []
for v in ress:
if type(v) == CurlLoop.CurlException:
boy = NetBoy.Dict(v.data)
# boy['payload'] = real_payload
obj_ress.append(boy)
elif type(v) == dict:
boy = NetBoy.Dict(v)
obj_ress.append(boy)
# else:
# boy = NetBoy.Dict({
# 'state': 'critical',
# 'spider': 'pycurl',
# 'error_code': -1,
# 'error_desc': "{} - {}".format(type(v), str(v)),
# 'payload': real_payload
# })
# obj_ress.append(boy)
return obj_ress
|
pingf/falsy
|
falsy/netboy/netboy.py
|
Python
|
mit
| 2,251
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for a testing an existing SDK.
This script is normally run immediately after build_sdk.py.
"""
import argparse
import os
import subprocess
import sys
import buildbot_common
import build_projects
import build_sdk
import build_version
import parse_dsc
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SDK_SRC_DIR = os.path.dirname(SCRIPT_DIR)
SDK_LIBRARY_DIR = os.path.join(SDK_SRC_DIR, 'libraries')
SDK_DIR = os.path.dirname(SDK_SRC_DIR)
SRC_DIR = os.path.dirname(SDK_DIR)
OUT_DIR = os.path.join(SRC_DIR, 'out')
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
def StepBuildExamples(pepperdir):
for config in ('Debug', 'Release'):
build_sdk.BuildStepMakeAll(pepperdir, 'getting_started',
'Build Getting Started (%s)' % config,
deps=False, config=config)
build_sdk.BuildStepMakeAll(pepperdir, 'examples',
'Build Examples (%s)' % config,
deps=False, config=config)
def StepCopyTests(pepperdir, toolchains, build_experimental):
buildbot_common.BuildStep('Copy Tests')
# Update test libraries and test apps
filters = {
'DEST': ['tests']
}
if not build_experimental:
filters['EXPERIMENTAL'] = False
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateHelpers(pepperdir, clobber=False)
build_projects.UpdateProjects(pepperdir, tree, clobber=False,
toolchains=toolchains)
def StepBuildLibraries(pepperdir, sanitizer):
for config in ('Debug', 'Release'):
title = 'Build Libs (%s)[sanitizer=%s]' % (config, sanitizer)
build_sdk.BuildStepMakeAll(pepperdir, 'src', title, config=config,
args=GetSanitizerArgs(sanitizer))
def StepBuildTests(pepperdir, sanitizer):
for config in ('Debug', 'Release'):
title = 'Build Tests (%s)' % config
if sanitizer:
title += '[sanitizer=%s]' % sanitizer
build_sdk.BuildStepMakeAll(pepperdir, 'tests', title, deps=False,
config=config, args=GetSanitizerArgs(sanitizer))
def GetSanitizerArgs(sanitizer):
if sanitizer == 'valgrind':
return ['TOOLCHAIN=linux', 'RUN_UNDER=valgrind']
elif sanitizer == 'address':
return ['TOOLCHAIN=linux', 'ASAN=1']
elif sanitizer == 'thread':
return ['TOOLCHAIN=linux', 'TSAN=1']
return []
def StepRunSelLdrTests(pepperdir, sanitizer):
filters = {
'SEL_LDR': True
}
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
def RunTest(test, toolchain, config, arch=None):
args = ['STANDALONE=1', 'TOOLCHAIN=%s' % toolchain]
args += GetSanitizerArgs(sanitizer)
if arch is not None:
args.append('NACL_ARCH=%s' % arch)
build_projects.BuildProjectsBranch(pepperdir, test, clean=False,
deps=False, config=config,
args=args + ['run'])
if getos.GetPlatform() == 'win':
# On win32 we only support running on the system
# arch
archs = (getos.GetSystemArch('win'),)
elif getos.GetPlatform() == 'mac':
# We only ship 32-bit version of sel_ldr on mac.
archs = ('x86_32',)
else:
# On linux we can run both 32 and 64-bit, and arm (via qemu)
archs = ('x86_64', 'x86_32', 'arm')
for root, projects in tree.iteritems():
for project in projects:
if sanitizer:
sanitizer_name = '[sanitizer=%s]' % sanitizer
else:
sanitizer_name = ''
title = 'standalone test%s: %s' % (sanitizer_name,
os.path.basename(project['NAME']))
location = os.path.join(root, project['NAME'])
buildbot_common.BuildStep(title)
configs = ('Debug', 'Release')
# On linux we can run the standalone tests natively using the host
# compiler.
if getos.GetPlatform() == 'linux':
if sanitizer:
configs = ('Debug',)
for config in configs:
RunTest(location, 'linux', config)
if sanitizer:
continue
for toolchain in ('clang-newlib', 'glibc', 'pnacl'):
for arch in archs:
for config in configs:
RunTest(location, toolchain, config, arch)
def StepRunBrowserTests(toolchains, experimental):
buildbot_common.BuildStep('Run Tests')
args = [
sys.executable,
os.path.join(SCRIPT_DIR, 'test_projects.py'),
'--retry-times=3',
]
if experimental:
args.append('-x')
for toolchain in toolchains:
args.extend(['-t', toolchain])
try:
subprocess.check_call(args)
except subprocess.CalledProcessError:
buildbot_common.ErrorExit('Error running tests.')
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--experimental', help='build experimental tests',
action='store_true')
parser.add_argument('--sanitizer',
help='Run sanitizer (asan/tsan/valgrind) tests',
action='store_true')
parser.add_argument('--verbose', '-v', help='Verbose output',
action='store_true')
parser.add_argument('phases', nargs="*")
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete test_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options = parser.parse_args(args)
pepper_ver = str(int(build_version.ChromeMajorVersion()))
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
toolchains = ['clang-newlib', 'glibc', 'pnacl']
toolchains.append(getos.GetPlatform())
if options.verbose:
build_projects.verbose = True
phases = [
('build_examples', StepBuildExamples, pepperdir),
('copy_tests', StepCopyTests, pepperdir, toolchains, options.experimental),
('build_tests', StepBuildTests, pepperdir, None),
]
if options.sanitizer:
if getos.GetPlatform() != 'linux':
buildbot_common.ErrorExit('sanitizer tests only run on linux.')
clang_dir = os.path.join(SRC_DIR, 'third_party', 'llvm-build',
'Release+Asserts', 'bin')
os.environ['PATH'] = clang_dir + os.pathsep + os.environ['PATH']
phases += [
('build_libs_asan', StepBuildLibraries, pepperdir, 'address'),
('build_libs_tsan', StepBuildLibraries, pepperdir, 'thread'),
('build_tests_asan', StepBuildTests, pepperdir, 'address'),
('build_tests_tsan', StepBuildTests, pepperdir, 'thread'),
('sel_ldr_tests_asan', StepRunSelLdrTests, pepperdir, 'address'),
('sel_ldr_tests_tsan', StepRunSelLdrTests, pepperdir, 'thread'),
# TODO(sbc): get valgrind installed on the bots to enable this
# configuration
#('sel_ldr_tests_valgrind', StepRunSelLdrTests, pepperdir, 'valgrind')
]
else:
phases += [
('sel_ldr_tests', StepRunSelLdrTests, pepperdir, None),
('browser_tests', StepRunBrowserTests, toolchains, options.experimental),
]
if options.phases:
phase_names = [p[0] for p in phases]
for arg in options.phases:
if arg not in phase_names:
msg = 'Invalid argument: %s\n' % arg
msg += 'Possible arguments:\n'
for name in phase_names:
msg += ' %s\n' % name
parser.error(msg.strip())
for phase in phases:
phase_name = phase[0]
if options.phases and phase_name not in options.phases:
continue
phase_func = phase[1]
phase_args = phase[2:]
phase_func(*phase_args)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('test_sdk: interrupted')
|
heke123/chromium-crosswalk
|
native_client_sdk/src/build_tools/test_sdk.py
|
Python
|
bsd-3-clause
| 8,061
|
#!/usr/bin/env python2
"""poisson.py
Author: Jonah Miller (jonah.maxwell.miller@gmail.com)
Time-stamp: <2017-06-28 20:27:28 (jmiller)>
This is an example script that solves the Poisson equation using
pyballd.
"""
from __future__ import print_function
import matplotlib as mpl
mpl.use("Agg")
from matplotlib import pyplot as plt
import pyballd
import numpy as np
r_h = 1.0
k = 4
a = 2
order_X = 36
order_theta = 12
exclude_last=1
theta_max = np.pi/2
rmax = 1.5
USE_FIGS_DIR=True
def residual(r,theta,u,d):
out = np.empty_like(u)
out[0] = (2*np.sin(theta)*r*d(u[0],1,0)
+ r*r*np.sin(theta)*d(u[0],2,0)
+ np.cos(theta)*d(u[0],0,1)
+ np.sin(theta)*d(u[1],0,2))
out[1] = (2*np.sin(theta)*r*d(u[1],1,0)
+ r*r*np.sin(theta)*d(u[1],2,0)
+ np.cos(theta)*d(u[1],0,1)
+ np.sin(theta)*d(u[1],0,2))
return out
def bdry_X_inner(theta,u,d):
out = u - np.array([a*np.cos(k*theta),
a*np.cos((k)*theta)])
return out
def initial_guess(r,theta):
out = np.array([1./r,1./r])
return out
if __name__ == "__main__":
SOLN,s = pyballd.pde_solve_once(residual,
r_h = r_h,
order_X = order_X,
order_theta = order_theta,
bdry_X_inner = bdry_X_inner,
initial_guess = initial_guess,
theta_min = 0,
theta_max = theta_max,
method = 'hybr',
f_tol=1e-13)
SOLN = SOLN[1]
r = np.linspace(r_h,2*rmax,200)
theta = np.linspace(0,theta_max,200)
R,THETA = np.meshgrid(r,theta,indexing='ij')
mx,mz = R*np.sin(THETA), R*np.cos(THETA)
s_interpolator = s.get_interpolator_of_r(SOLN)
soln_interp = s_interpolator(R,THETA)
plt.pcolor(mx,
mz,
soln_interp)
plt.xlabel('x',fontsize=16)
plt.ylabel('z',fontsize=16)
cb =plt.colorbar()
cb.set_label(label='solution to Poisson Eqn',
fontsize=16)
plt.axis('scaled')
plt.xlim(0,rmax)
plt.ylim(0,rmax)
#plt.ylim(-rmax/,rmax/2)
for postfix in ['.png','.pdf']:
name = 'poisson_vec_solution'+postfix
if USE_FIGS_DIR:
name = 'figs/' + name
plt.savefig(name,
bbox_inches='tight')
plt.clf()
|
Yurlungur/pyballd
|
examples/poisson_vec.py
|
Python
|
lgpl-3.0
| 2,537
|
__doc__ = """
An easy to use app that provides Stack Overflow style badges with a minimum ammount of effort in django
See the README file for details, usage info, and a list of gotchas.
"""
from setuptools import setup
setup(
name='django-badges',
version='0.1.9',
author='James Robert',
author_email='jiaaro@gmail.com',
description=('An easy to use app that provides Stack Overflow style badges'
'with a minimum ammount of effort in django'),
license='GPLv3',
keywords='django badges social',
url='http://djangobadges.com',
packages=['badges', 'badges.templatetags'],
package_data={'badges': ['badges/templates/badges/*.html']},
install_requires=[
"django >= 1.0",
"Pillow",
],
long_description=__doc__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
'Framework :: Django',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities'
]
)
|
igorkramaric/django-badges
|
setup.py
|
Python
|
gpl-3.0
| 1,215
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
import unittest
import tempfile
from mock.mock import patch, MagicMock, call
import StringIO
import sys
import multiprocessing
from ambari_agent.RecoveryManager import RecoveryManager
from ambari_agent.StatusCommandsExecutor import SingleProcessStatusCommandsExecutor
from ambari_commons import subprocess32
with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
from ambari_agent.Hardware import Hardware
from ambari_agent.Heartbeat import Heartbeat
from ambari_agent.ActionQueue import ActionQueue
from ambari_agent.LiveStatus import LiveStatus
from ambari_agent import AmbariConfig
from ambari_agent.HostInfo import HostInfoLinux
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestHeartbeat(TestCase):
def setUp(self):
# disable stdout
out = StringIO.StringIO()
sys.stdout = out
def tearDown(self):
# enable stdout
sys.stdout = sys.__stdout__
def test_build(self):
config = AmbariConfig.AmbariConfig()
config.set('agent', 'prefix', 'tmp')
config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
config.set('agent', 'tolerate_download_failures', "true")
dummy_controller = MagicMock()
dummy_controller.recovery_manager.recovery_timestamp = -1
actionQueue = ActionQueue(config, dummy_controller)
heartbeat = Heartbeat(actionQueue)
result = heartbeat.build(100)
print "Heartbeat: " + str(result)
self.assertEquals(result['hostname'] != '', True, "hostname should not be empty")
self.assertEquals(result['responseId'], 100)
self.assertEquals(result['componentStatus'] is not None, True, "Heartbeat should contain componentStatus")
self.assertEquals(result['reports'] is not None, True, "Heartbeat should contain reports")
self.assertEquals(result['timestamp'] >= 1353679373880L, True)
self.assertEquals(result['recoveryTimestamp'], -1)
self.assertEquals(len(result['nodeStatus']), 2)
self.assertEquals(result['nodeStatus']['cause'], "NONE")
self.assertEquals(result['nodeStatus']['status'], "HEALTHY")
# result may or may NOT have an agentEnv structure in it
self.assertEquals((len(result) is 7) or (len(result) is 8), True)
self.assertEquals(not heartbeat.reports, True, "Heartbeat should not contain task in progress")
@patch.object(subprocess32, "Popen")
@patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
@patch.object(ActionQueue, "result")
@patch.object(HostInfoLinux, "register")
def test_no_mapping(self, register_mock, result_mock, Popen_mock):
result_mock.return_value = {
'reports': [{'status': 'IN_PROGRESS',
'stderr': 'Read from /tmp/errors-3.txt',
'stdout': 'Read from /tmp/output-3.txt',
'clusterName': u'cc',
'roleCommand': u'INSTALL',
'serviceName': u'HDFS',
'role': u'DATANODE',
'actionId': '1-1',
'taskId': 3,
'exitCode': 777}],
'componentStatus': [{'status': 'HEALTHY', 'componentName': 'NAMENODE'}]
}
config = AmbariConfig.AmbariConfig()
config.set('agent', 'prefix', 'tmp')
config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
config.set('agent', 'tolerate_download_failures', "true")
dummy_controller = MagicMock()
actionQueue = ActionQueue(config, dummy_controller)
heartbeat = Heartbeat(actionQueue)
hb = heartbeat.build(id = 10, add_state=True, componentsMapped=True)
self.assertEqual(register_mock.call_args_list[0][0][1], True)
register_mock.reset_mock()
hb = heartbeat.build(id = 0, add_state=True, componentsMapped=True)
self.assertEqual(register_mock.call_args_list[0][0][1], False)
@patch.object(ActionQueue, "result")
def test_build_long_result(self, result_mock):
config = AmbariConfig.AmbariConfig()
config.set('agent', 'prefix', 'tmp')
config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
config.set('agent', 'tolerate_download_failures', "true")
dummy_controller = MagicMock()
dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
actionQueue = ActionQueue(config, dummy_controller)
result_mock.return_value = {
'reports': [{'status': 'IN_PROGRESS',
'stderr': 'Read from /tmp/errors-3.txt',
'stdout': 'Read from /tmp/output-3.txt',
'clusterName': u'cc',
'roleCommand': u'INSTALL',
'serviceName': u'HDFS',
'role': u'DATANODE',
'actionId': '1-1',
'taskId': 3,
'exitCode': 777},
{'status': 'COMPLETED',
'stderr': 'stderr',
'stdout': 'out',
'clusterName': 'clusterName',
'roleCommand': 'UPGRADE',
'serviceName': 'serviceName',
'role': 'role',
'actionId': 17,
'taskId': 'taskId',
'exitCode': 0},
{'status': 'FAILED',
'stderr': 'stderr',
'stdout': 'out',
'clusterName': u'cc',
'roleCommand': u'INSTALL',
'serviceName': u'HDFS',
'role': u'DATANODE',
'actionId': '1-1',
'taskId': 3,
'exitCode': 13},
{'status': 'COMPLETED',
'stderr': 'stderr',
'stdout': 'out',
'clusterName': u'cc',
'configurationTags': {'global': {'tag': 'v1'}},
'roleCommand': u'INSTALL',
'serviceName': u'HDFS',
'role': u'DATANODE',
'actionId': '1-1',
'taskId': 3,
'exitCode': 0}
],
'componentStatus': [
{'status': 'HEALTHY', 'componentName': 'DATANODE'},
{'status': 'UNHEALTHY', 'componentName': 'NAMENODE'},
],
}
heartbeat = Heartbeat(actionQueue)
hb = heartbeat.build(10)
hb['hostname'] = 'hostname'
hb['timestamp'] = 'timestamp'
expected = {'nodeStatus':
{'status': 'HEALTHY',
'cause': 'NONE'},
'recoveryReport': {'summary': 'DISABLED'},
'recoveryTimestamp': -1,
'timestamp': 'timestamp', 'hostname': 'hostname',
'responseId': 10, 'reports': [
{'status': 'IN_PROGRESS', 'roleCommand': u'INSTALL',
'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1',
'stderr': 'Read from /tmp/errors-3.txt',
'stdout': 'Read from /tmp/output-3.txt', 'clusterName': u'cc',
'taskId': 3, 'exitCode': 777},
{'status': 'COMPLETED', 'roleCommand': 'UPGRADE',
'serviceName': 'serviceName', 'role': 'role', 'actionId': 17,
'stderr': 'stderr', 'stdout': 'out', 'clusterName': 'clusterName',
'taskId': 'taskId', 'exitCode': 0},
{'status': 'FAILED', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS',
'role': u'DATANODE', 'actionId': '1-1', 'stderr': 'stderr',
'stdout': 'out', 'clusterName': u'cc', 'taskId': 3, 'exitCode': 13},
{'status': 'COMPLETED', 'stdout': 'out',
'configurationTags': {'global': {'tag': 'v1'}}, 'taskId': 3,
'exitCode': 0, 'roleCommand': u'INSTALL', 'clusterName': u'cc',
'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1',
'stderr': 'stderr'}], 'componentStatus': [
{'status': 'HEALTHY', 'componentName': 'DATANODE'},
{'status': 'UNHEALTHY', 'componentName': 'NAMENODE'}]}
self.assertEqual.__self__.maxDiff = None
self.assertEquals(hb, expected)
@patch.object(subprocess32, "Popen")
@patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
@patch.object(HostInfoLinux, 'register')
def test_heartbeat_no_host_check_cmd_in_queue(self, register_mock, Popen_mock):
config = AmbariConfig.AmbariConfig()
config.set('agent', 'prefix', 'tmp')
config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
config.set('agent', 'tolerate_download_failures', "true")
dummy_controller = MagicMock()
actionQueue = ActionQueue(config, dummy_controller)
actionQueue.statusCommandQueue = multiprocessing.Queue()
statusCommand = {
"serviceName" : 'HDFS',
"commandType" : "STATUS_COMMAND",
"clusterName" : "c1",
"componentName" : "DATANODE",
"role" : "DATANODE",
'configurations':{'global' : {}}
}
actionQueue.put_status([statusCommand])
heartbeat = Heartbeat(actionQueue)
heartbeat.build(12, 6)
self.assertTrue(register_mock.called)
args, kwargs = register_mock.call_args_list[0]
self.assertFalse(args[2])
self.assertFalse(args[1])
@patch.object(subprocess32, "Popen")
@patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
@patch.object(HostInfoLinux, 'register')
def test_status_commands_does_not_stack_up(self, register_mock, Popen_mock):
config = AmbariConfig.AmbariConfig()
config.set('agent', 'prefix', 'tmp')
config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
config.set('agent', 'tolerate_download_failures', "true")
dummy_controller = MagicMock()
actionQueue = ActionQueue(config, dummy_controller)
dummy_controller.statusCommandsExecutor = SingleProcessStatusCommandsExecutor(config, actionQueue)
statusCommands = [{
"serviceName" : 'HDFS',
"commandType" : "STATUS_COMMAND",
"clusterName" : "c1",
"componentName" : "DATANODE",
"role" : "DATANODE",
'configurations':{'cluster-env' : {}}
},
{
"serviceName" : 'HDFS',
"commandType" : "STATUS_COMMAND",
"clusterName" : "c1",
"componentName" : "NAMENODE",
"role" : "NAMENODE",
'configurations':{'cluster-env' : {}}
}
]
# add commands ten times
for i in range(10):
actionQueue.put_status(statusCommands)
# status commands should not stack up. Size should be 2 not 20.
self.assertEquals(len(dummy_controller.statusCommandsExecutor.statusCommandQueue.queue), 2)
@patch.object(subprocess32, "Popen")
@patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
@patch.object(HostInfoLinux, 'register')
def test_heartbeat_host_check_no_cmd(self, register_mock, Popen_mock):
config = AmbariConfig.AmbariConfig()
config.set('agent', 'prefix', 'tmp')
config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
config.set('agent', 'tolerate_download_failures', "true")
dummy_controller = MagicMock()
actionQueue = ActionQueue(config, dummy_controller)
heartbeat = Heartbeat(actionQueue)
heartbeat.build(12, 6)
self.assertTrue(register_mock.called)
args, kwargs = register_mock.call_args_list[0]
self.assertFalse(args[1])
self.assertFalse(args[2])
if __name__ == "__main__":
unittest.main(verbosity=2)
|
arenadata/ambari
|
ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py
|
Python
|
apache-2.0
| 11,829
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The deep heap profiler script for Chrome."""
import copy
import datetime
import json
import logging
import optparse
import os
import re
import subprocess
import sys
import tempfile
import time
import zipfile
from range_dict import ExclusiveRangeDict
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
FIND_RUNTIME_SYMBOLS_PATH = os.path.join(
BASE_PATH, os.pardir, 'find_runtime_symbols')
sys.path.append(FIND_RUNTIME_SYMBOLS_PATH)
import find_runtime_symbols
import prepare_symbol_info
import proc_maps
from find_runtime_symbols import FUNCTION_SYMBOLS
from find_runtime_symbols import SOURCEFILE_SYMBOLS
from find_runtime_symbols import TYPEINFO_SYMBOLS
BUCKET_ID = 5
VIRTUAL = 0
COMMITTED = 1
ALLOC_COUNT = 2
FREE_COUNT = 3
NULL_REGEX = re.compile('')
LOGGER = logging.getLogger('dmprof')
POLICIES_JSON_PATH = os.path.join(BASE_PATH, 'policies.json')
CHROME_SRC_PATH = os.path.join(BASE_PATH, os.pardir, os.pardir)
# Heap Profile Dump versions
# DUMP_DEEP_[1-4] are obsolete.
# DUMP_DEEP_2+ distinct mmap regions and malloc chunks.
# DUMP_DEEP_3+ don't include allocation functions in their stack dumps.
# DUMP_DEEP_4+ support comments with '#' and global stats "nonprofiled-*".
# DUMP_DEEP_[1-2] should be processed by POLICY_DEEP_1.
# DUMP_DEEP_[3-4] should be processed by POLICY_DEEP_2 or POLICY_DEEP_3.
DUMP_DEEP_1 = 'DUMP_DEEP_1'
DUMP_DEEP_2 = 'DUMP_DEEP_2'
DUMP_DEEP_3 = 'DUMP_DEEP_3'
DUMP_DEEP_4 = 'DUMP_DEEP_4'
DUMP_DEEP_OBSOLETE = (DUMP_DEEP_1, DUMP_DEEP_2, DUMP_DEEP_3, DUMP_DEEP_4)
# DUMP_DEEP_5 doesn't separate sections for malloc and mmap.
# malloc and mmap are identified in bucket files.
# DUMP_DEEP_5 should be processed by POLICY_DEEP_4.
DUMP_DEEP_5 = 'DUMP_DEEP_5'
# DUMP_DEEP_6 adds a mmap list to DUMP_DEEP_5.
DUMP_DEEP_6 = 'DUMP_DEEP_6'
# Heap Profile Policy versions
# POLICY_DEEP_1 DOES NOT include allocation_type columns.
# mmap regions are distincted w/ mmap frames in the pattern column.
POLICY_DEEP_1 = 'POLICY_DEEP_1'
# POLICY_DEEP_2 DOES include allocation_type columns.
# mmap regions are distincted w/ the allocation_type column.
POLICY_DEEP_2 = 'POLICY_DEEP_2'
# POLICY_DEEP_3 is in JSON format.
POLICY_DEEP_3 = 'POLICY_DEEP_3'
# POLICY_DEEP_3 contains typeinfo.
POLICY_DEEP_4 = 'POLICY_DEEP_4'
class EmptyDumpException(Exception):
def __init__(self, value=''):
super(EmptyDumpException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
class ParsingException(Exception):
def __init__(self, value=''):
super(ParsingException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
class InvalidDumpException(ParsingException):
def __init__(self, value):
super(InvalidDumpException, self).__init__()
self.value = value
def __str__(self):
return "invalid heap profile dump: %s" % repr(self.value)
class ObsoleteDumpVersionException(ParsingException):
def __init__(self, value):
super(ObsoleteDumpVersionException, self).__init__()
self.value = value
def __str__(self):
return "obsolete heap profile dump version: %s" % repr(self.value)
class ListAttribute(ExclusiveRangeDict.RangeAttribute):
"""Represents a list for an attribute in range_dict.ExclusiveRangeDict."""
def __init__(self):
super(ListAttribute, self).__init__()
self._list = []
def __str__(self):
return str(self._list)
def __repr__(self):
return 'ListAttribute' + str(self._list)
def __len__(self):
return len(self._list)
def __iter__(self):
for x in self._list:
yield x
def __getitem__(self, index):
return self._list[index]
def __setitem__(self, index, value):
if index >= len(self._list):
self._list.extend([None] * (index + 1 - len(self._list)))
self._list[index] = value
def copy(self):
new_list = ListAttribute()
for index, item in enumerate(self._list):
new_list[index] = copy.deepcopy(item)
return new_list
class ProcMapsEntryAttribute(ExclusiveRangeDict.RangeAttribute):
"""Represents an entry of /proc/maps in range_dict.ExclusiveRangeDict."""
_DUMMY_ENTRY = proc_maps.ProcMapsEntry(
0, # begin
0, # end
'-', # readable
'-', # writable
'-', # executable
'-', # private
0, # offset
'00', # major
'00', # minor
0, # inode
'' # name
)
def __init__(self):
super(ProcMapsEntryAttribute, self).__init__()
self._entry = self._DUMMY_ENTRY.as_dict()
def __str__(self):
return str(self._entry)
def __repr__(self):
return 'ProcMapsEntryAttribute' + str(self._entry)
def __getitem__(self, key):
return self._entry[key]
def __setitem__(self, key, value):
if key not in self._entry:
raise KeyError(key)
self._entry[key] = value
def copy(self):
new_entry = ProcMapsEntryAttribute()
for key, value in self._entry.iteritems():
new_entry[key] = copy.deepcopy(value)
return new_entry
def skip_while(index, max_index, skipping_condition):
"""Increments |index| until |skipping_condition|(|index|) is False.
Returns:
A pair of an integer indicating a line number after skipped, and a
boolean value which is True if found a line which skipping_condition
is False for.
"""
while skipping_condition(index):
index += 1
if index >= max_index:
return index, False
return index, True
class SymbolDataSources(object):
"""Manages symbol data sources in a process.
The symbol data sources consist of maps (/proc/<pid>/maps), nm, readelf and
so on. They are collected into a directory '|prefix|.symmap' from the binary
files by 'prepare()' with tools/find_runtime_symbols/prepare_symbol_info.py.
Binaries are not mandatory to profile. The prepared data sources work in
place of the binary even if the binary has been overwritten with another
binary.
Note that loading the symbol data sources takes a long time. They are often
very big. So, the 'dmprof' profiler is designed to use 'SymbolMappingCache'
which caches actually used symbols.
"""
def __init__(self, prefix, alternative_dirs=None):
self._prefix = prefix
self._prepared_symbol_data_sources_path = None
self._loaded_symbol_data_sources = None
self._alternative_dirs = alternative_dirs or {}
def prepare(self):
"""Prepares symbol data sources by extracting mapping from a binary.
The prepared symbol data sources are stored in a directory. The directory
name is stored in |self._prepared_symbol_data_sources_path|.
Returns:
True if succeeded.
"""
LOGGER.info('Preparing symbol mapping...')
self._prepared_symbol_data_sources_path, used_tempdir = (
prepare_symbol_info.prepare_symbol_info(
self._prefix + '.maps',
output_dir_path=self._prefix + '.symmap',
alternative_dirs=self._alternative_dirs,
use_tempdir=True,
use_source_file_name=True))
if self._prepared_symbol_data_sources_path:
LOGGER.info(' Prepared symbol mapping.')
if used_tempdir:
LOGGER.warn(' Using a temporary directory for symbol mapping.')
LOGGER.warn(' Delete it by yourself.')
LOGGER.warn(' Or, move the directory by yourself to use it later.')
return True
else:
LOGGER.warn(' Failed to prepare symbol mapping.')
return False
def get(self):
"""Returns the prepared symbol data sources.
Returns:
The prepared symbol data sources. None if failed.
"""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
if not self._loaded_symbol_data_sources:
LOGGER.info('Loading symbol mapping...')
self._loaded_symbol_data_sources = (
find_runtime_symbols.RuntimeSymbolsInProcess.load(
self._prepared_symbol_data_sources_path))
return self._loaded_symbol_data_sources
def path(self):
"""Returns the path of the prepared symbol data sources if possible."""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
return self._prepared_symbol_data_sources_path
class SymbolFinder(object):
"""Finds corresponding symbols from addresses.
This class does only 'find()' symbols from a specified |address_list|.
It is introduced to make a finder mockable.
"""
def __init__(self, symbol_type, symbol_data_sources):
self._symbol_type = symbol_type
self._symbol_data_sources = symbol_data_sources
def find(self, address_list):
return find_runtime_symbols.find_runtime_symbols(
self._symbol_type, self._symbol_data_sources.get(), address_list)
class SymbolMappingCache(object):
"""Caches mapping from actually used addresses to symbols.
'update()' updates the cache from the original symbol data sources via
'SymbolFinder'. Symbols can be looked up by the method 'lookup()'.
"""
def __init__(self):
self._symbol_mapping_caches = {
FUNCTION_SYMBOLS: {},
SOURCEFILE_SYMBOLS: {},
TYPEINFO_SYMBOLS: {},
}
def update(self, symbol_type, bucket_set, symbol_finder, cache_f):
"""Updates symbol mapping cache on memory and in a symbol cache file.
It reads cached symbol mapping from a symbol cache file |cache_f| if it
exists. Unresolved addresses are then resolved and added to the cache
both on memory and in the symbol cache file with using 'SymbolFinder'.
A cache file is formatted as follows:
<Address> <Symbol>
<Address> <Symbol>
<Address> <Symbol>
...
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
bucket_set: A BucketSet object.
symbol_finder: A SymbolFinder object to find symbols.
cache_f: A readable and writable IO object of the symbol cache file.
"""
cache_f.seek(0, os.SEEK_SET)
self._load(cache_f, symbol_type)
unresolved_addresses = sorted(
address for address in bucket_set.iter_addresses(symbol_type)
if address not in self._symbol_mapping_caches[symbol_type])
if not unresolved_addresses:
LOGGER.info('No need to resolve any more addresses.')
return
cache_f.seek(0, os.SEEK_END)
LOGGER.info('Loading %d unresolved addresses.' %
len(unresolved_addresses))
symbol_dict = symbol_finder.find(unresolved_addresses)
for address, symbol in symbol_dict.iteritems():
stripped_symbol = symbol.strip() or '?'
self._symbol_mapping_caches[symbol_type][address] = stripped_symbol
cache_f.write('%x %s\n' % (address, stripped_symbol))
def lookup(self, symbol_type, address):
"""Looks up a symbol for a given |address|.
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
address: An integer that represents an address.
Returns:
A string that represents a symbol.
"""
return self._symbol_mapping_caches[symbol_type].get(address)
def _load(self, cache_f, symbol_type):
try:
for line in cache_f:
items = line.rstrip().split(None, 1)
if len(items) == 1:
items.append('??')
self._symbol_mapping_caches[symbol_type][int(items[0], 16)] = items[1]
LOGGER.info('Loaded %d entries from symbol cache.' %
len(self._symbol_mapping_caches[symbol_type]))
except IOError as e:
LOGGER.info('The symbol cache file is invalid: %s' % e)
class Rule(object):
"""Represents one matching rule in a policy file."""
def __init__(self,
name,
allocator_type,
stackfunction_pattern=None,
stacksourcefile_pattern=None,
typeinfo_pattern=None,
mappedpathname_pattern=None,
mappedpermission_pattern=None):
self._name = name
self._allocator_type = allocator_type
self._stackfunction_pattern = None
if stackfunction_pattern:
self._stackfunction_pattern = re.compile(
stackfunction_pattern + r'\Z')
self._stacksourcefile_pattern = None
if stacksourcefile_pattern:
self._stacksourcefile_pattern = re.compile(
stacksourcefile_pattern + r'\Z')
self._typeinfo_pattern = None
if typeinfo_pattern:
self._typeinfo_pattern = re.compile(typeinfo_pattern + r'\Z')
self._mappedpathname_pattern = None
if mappedpathname_pattern:
self._mappedpathname_pattern = re.compile(mappedpathname_pattern + r'\Z')
self._mappedpermission_pattern = None
if mappedpermission_pattern:
self._mappedpermission_pattern = re.compile(
mappedpermission_pattern + r'\Z')
@property
def name(self):
return self._name
@property
def allocator_type(self):
return self._allocator_type
@property
def stackfunction_pattern(self):
return self._stackfunction_pattern
@property
def stacksourcefile_pattern(self):
return self._stacksourcefile_pattern
@property
def typeinfo_pattern(self):
return self._typeinfo_pattern
@property
def mappedpathname_pattern(self):
return self._mappedpathname_pattern
@property
def mappedpermission_pattern(self):
return self._mappedpermission_pattern
class Policy(object):
"""Represents a policy, a content of a policy file."""
def __init__(self, rules, version, components):
self._rules = rules
self._version = version
self._components = components
@property
def rules(self):
return self._rules
@property
def version(self):
return self._version
@property
def components(self):
return self._components
def find_rule(self, component_name):
"""Finds a rule whose name is |component_name|. """
for rule in self._rules:
if rule.name == component_name:
return rule
return None
def find_malloc(self, bucket):
"""Finds a matching component name which a given |bucket| belongs to.
Args:
bucket: A Bucket object to be searched for.
Returns:
A string representing a component name.
"""
assert not bucket or bucket.allocator_type == 'malloc'
if not bucket:
return 'no-bucket'
if bucket.component_cache:
return bucket.component_cache
stackfunction = bucket.symbolized_joined_stackfunction
stacksourcefile = bucket.symbolized_joined_stacksourcefile
typeinfo = bucket.symbolized_typeinfo
if typeinfo.startswith('0x'):
typeinfo = bucket.typeinfo_name
for rule in self._rules:
if (rule.allocator_type == 'malloc' and
(not rule.stackfunction_pattern or
rule.stackfunction_pattern.match(stackfunction)) and
(not rule.stacksourcefile_pattern or
rule.stacksourcefile_pattern.match(stacksourcefile)) and
(not rule.typeinfo_pattern or rule.typeinfo_pattern.match(typeinfo))):
bucket.component_cache = rule.name
return rule.name
assert False
def find_mmap(self, region, bucket_set):
"""Finds a matching component which a given mmap |region| belongs to.
It uses |bucket_set| to match with backtraces.
NOTE: Don't use Bucket's |component_cache| for mmap regions because they're
classified not only with bucket information (mappedpathname for example).
Args:
region: A tuple representing a memory region.
bucket_set: A BucketSet object to look up backtraces.
Returns:
A string representing a component name.
"""
assert region[0] == 'hooked'
bucket = bucket_set.get(region[1]['bucket_id'])
assert not bucket or bucket.allocator_type == 'mmap'
if not bucket:
return 'no-bucket', None
stackfunction = bucket.symbolized_joined_stackfunction
stacksourcefile = bucket.symbolized_joined_stacksourcefile
for rule in self._rules:
if (rule.allocator_type == 'mmap' and
(not rule.stackfunction_pattern or
rule.stackfunction_pattern.match(stackfunction)) and
(not rule.stacksourcefile_pattern or
rule.stacksourcefile_pattern.match(stacksourcefile)) and
(not rule.mappedpathname_pattern or
rule.mappedpathname_pattern.match(region[1]['vma']['name'])) and
(not rule.mappedpermission_pattern or
rule.mappedpermission_pattern.match(
region[1]['vma']['readable'] +
region[1]['vma']['writable'] +
region[1]['vma']['executable'] +
region[1]['vma']['private']))):
return rule.name, bucket
assert False
def find_unhooked(self, region):
"""Finds a matching component which a given unhooked |region| belongs to.
Args:
region: A tuple representing a memory region.
Returns:
A string representing a component name.
"""
assert region[0] == 'unhooked'
for rule in self._rules:
if (rule.allocator_type == 'unhooked' and
(not rule.mappedpathname_pattern or
rule.mappedpathname_pattern.match(region[1]['vma']['name'])) and
(not rule.mappedpermission_pattern or
rule.mappedpermission_pattern.match(
region[1]['vma']['readable'] +
region[1]['vma']['writable'] +
region[1]['vma']['executable'] +
region[1]['vma']['private']))):
return rule.name
assert False
@staticmethod
def load(filename, filetype):
"""Loads a policy file of |filename| in a |format|.
Args:
filename: A filename to be loaded.
filetype: A string to specify a type of the file. Only 'json' is
supported for now.
Returns:
A loaded Policy object.
"""
with open(os.path.join(BASE_PATH, filename)) as policy_f:
return Policy.parse(policy_f, filetype)
@staticmethod
def parse(policy_f, filetype):
"""Parses a policy file content in a |format|.
Args:
policy_f: An IO object to be loaded.
filetype: A string to specify a type of the file. Only 'json' is
supported for now.
Returns:
A loaded Policy object.
"""
if filetype == 'json':
return Policy._parse_json(policy_f)
else:
return None
@staticmethod
def _parse_json(policy_f):
"""Parses policy file in json format.
A policy file contains component's names and their stacktrace pattern
written in regular expression. Those patterns are matched against each
symbols of each stacktraces in the order written in the policy file
Args:
policy_f: A File/IO object to read.
Returns:
A loaded policy object.
"""
policy = json.load(policy_f)
rules = []
for rule in policy['rules']:
stackfunction = rule.get('stackfunction') or rule.get('stacktrace')
stacksourcefile = rule.get('stacksourcefile')
rules.append(Rule(
rule['name'],
rule['allocator'], # allocator_type
stackfunction,
stacksourcefile,
rule['typeinfo'] if 'typeinfo' in rule else None,
rule.get('mappedpathname'),
rule.get('mappedpermission')))
return Policy(rules, policy['version'], policy['components'])
class PolicySet(object):
"""Represents a set of policies."""
def __init__(self, policy_directory):
self._policy_directory = policy_directory
@staticmethod
def load(labels=None):
"""Loads a set of policies via the "default policy directory".
The "default policy directory" contains pairs of policies and their labels.
For example, a policy "policy.l0.json" is labeled "l0" in the default
policy directory "policies.json".
All policies in the directory are loaded by default. Policies can be
limited by |labels|.
Args:
labels: An array that contains policy labels to be loaded.
Returns:
A PolicySet object.
"""
default_policy_directory = PolicySet._load_default_policy_directory()
if labels:
specified_policy_directory = {}
for label in labels:
if label in default_policy_directory:
specified_policy_directory[label] = default_policy_directory[label]
# TODO(dmikurube): Load an un-labeled policy file.
return PolicySet._load_policies(specified_policy_directory)
else:
return PolicySet._load_policies(default_policy_directory)
def __len__(self):
return len(self._policy_directory)
def __iter__(self):
for label in self._policy_directory:
yield label
def __getitem__(self, label):
return self._policy_directory[label]
@staticmethod
def _load_default_policy_directory():
with open(POLICIES_JSON_PATH, mode='r') as policies_f:
default_policy_directory = json.load(policies_f)
return default_policy_directory
@staticmethod
def _load_policies(directory):
LOGGER.info('Loading policy files.')
policies = {}
for label in directory:
LOGGER.info(' %s: %s' % (label, directory[label]['file']))
loaded = Policy.load(directory[label]['file'], directory[label]['format'])
if loaded:
policies[label] = loaded
return PolicySet(policies)
class Bucket(object):
"""Represents a bucket, which is a unit of memory block classification."""
def __init__(self, stacktrace, allocator_type, typeinfo, typeinfo_name):
self._stacktrace = stacktrace
self._allocator_type = allocator_type
self._typeinfo = typeinfo
self._typeinfo_name = typeinfo_name
self._symbolized_stackfunction = stacktrace
self._symbolized_joined_stackfunction = ''
self._symbolized_stacksourcefile = stacktrace
self._symbolized_joined_stacksourcefile = ''
self._symbolized_typeinfo = typeinfo_name
self.component_cache = ''
def __str__(self):
result = []
result.append(self._allocator_type)
if self._symbolized_typeinfo == 'no typeinfo':
result.append('tno_typeinfo')
else:
result.append('t' + self._symbolized_typeinfo)
result.append('n' + self._typeinfo_name)
result.extend(['%s(@%s)' % (function, sourcefile)
for function, sourcefile
in zip(self._symbolized_stackfunction,
self._symbolized_stacksourcefile)])
return ' '.join(result)
def symbolize(self, symbol_mapping_cache):
"""Makes a symbolized stacktrace and typeinfo with |symbol_mapping_cache|.
Args:
symbol_mapping_cache: A SymbolMappingCache object.
"""
# TODO(dmikurube): Fill explicitly with numbers if symbol not found.
self._symbolized_stackfunction = [
symbol_mapping_cache.lookup(FUNCTION_SYMBOLS, address)
for address in self._stacktrace]
self._symbolized_joined_stackfunction = ' '.join(
self._symbolized_stackfunction)
self._symbolized_stacksourcefile = [
symbol_mapping_cache.lookup(SOURCEFILE_SYMBOLS, address)
for address in self._stacktrace]
self._symbolized_joined_stacksourcefile = ' '.join(
self._symbolized_stacksourcefile)
if not self._typeinfo:
self._symbolized_typeinfo = 'no typeinfo'
else:
self._symbolized_typeinfo = symbol_mapping_cache.lookup(
TYPEINFO_SYMBOLS, self._typeinfo)
if not self._symbolized_typeinfo:
self._symbolized_typeinfo = 'no typeinfo'
def clear_component_cache(self):
self.component_cache = ''
@property
def stacktrace(self):
return self._stacktrace
@property
def allocator_type(self):
return self._allocator_type
@property
def typeinfo(self):
return self._typeinfo
@property
def typeinfo_name(self):
return self._typeinfo_name
@property
def symbolized_stackfunction(self):
return self._symbolized_stackfunction
@property
def symbolized_joined_stackfunction(self):
return self._symbolized_joined_stackfunction
@property
def symbolized_stacksourcefile(self):
return self._symbolized_stacksourcefile
@property
def symbolized_joined_stacksourcefile(self):
return self._symbolized_joined_stacksourcefile
@property
def symbolized_typeinfo(self):
return self._symbolized_typeinfo
class BucketSet(object):
"""Represents a set of bucket."""
def __init__(self):
self._buckets = {}
self._code_addresses = set()
self._typeinfo_addresses = set()
def load(self, prefix):
"""Loads all related bucket files.
Args:
prefix: A prefix string for bucket file names.
"""
LOGGER.info('Loading bucket files.')
n = 0
skipped = 0
while True:
path = '%s.%04d.buckets' % (prefix, n)
if not os.path.exists(path) or not os.stat(path).st_size:
if skipped > 10:
break
n += 1
skipped += 1
continue
LOGGER.info(' %s' % path)
with open(path, 'r') as f:
self._load_file(f)
n += 1
skipped = 0
def _load_file(self, bucket_f):
for line in bucket_f:
words = line.split()
typeinfo = None
typeinfo_name = ''
stacktrace_begin = 2
for index, word in enumerate(words):
if index < 2:
continue
if word[0] == 't':
typeinfo = int(word[1:], 16)
self._typeinfo_addresses.add(typeinfo)
elif word[0] == 'n':
typeinfo_name = word[1:]
else:
stacktrace_begin = index
break
stacktrace = [int(address, 16) for address in words[stacktrace_begin:]]
for frame in stacktrace:
self._code_addresses.add(frame)
self._buckets[int(words[0])] = Bucket(
stacktrace, words[1], typeinfo, typeinfo_name)
def __iter__(self):
for bucket_id, bucket_content in self._buckets.iteritems():
yield bucket_id, bucket_content
def __getitem__(self, bucket_id):
return self._buckets[bucket_id]
def get(self, bucket_id):
return self._buckets.get(bucket_id)
def symbolize(self, symbol_mapping_cache):
for bucket_content in self._buckets.itervalues():
bucket_content.symbolize(symbol_mapping_cache)
def clear_component_cache(self):
for bucket_content in self._buckets.itervalues():
bucket_content.clear_component_cache()
def iter_addresses(self, symbol_type):
if symbol_type in [FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS]:
for function in self._code_addresses:
yield function
else:
for function in self._typeinfo_addresses:
yield function
class Dump(object):
"""Represents a heap profile dump."""
_PATH_PATTERN = re.compile(r'^(.*)\.([0-9]+)\.([0-9]+)\.heap$')
_HOOK_PATTERN = re.compile(
r'^ ([ \(])([a-f0-9]+)([ \)])-([ \(])([a-f0-9]+)([ \)])\s+'
r'(hooked|unhooked)\s+(.+)$', re.IGNORECASE)
_HOOKED_PATTERN = re.compile(r'(?P<TYPE>.+ )?(?P<COMMITTED>[0-9]+) / '
'(?P<RESERVED>[0-9]+) @ (?P<BUCKETID>[0-9]+)')
_UNHOOKED_PATTERN = re.compile(r'(?P<TYPE>.+ )?(?P<COMMITTED>[0-9]+) / '
'(?P<RESERVED>[0-9]+)')
_OLD_HOOKED_PATTERN = re.compile(r'(?P<TYPE>.+) @ (?P<BUCKETID>[0-9]+)')
_OLD_UNHOOKED_PATTERN = re.compile(r'(?P<TYPE>.+) (?P<COMMITTED>[0-9]+)')
_TIME_PATTERN_FORMAT = re.compile(
r'^Time: ([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+)(\.[0-9]+)?')
_TIME_PATTERN_SECONDS = re.compile(r'^Time: ([0-9]+)$')
def __init__(self, path, modified_time):
self._path = path
matched = self._PATH_PATTERN.match(path)
self._pid = int(matched.group(2))
self._count = int(matched.group(3))
self._time = modified_time
self._map = {}
self._procmaps = ExclusiveRangeDict(ProcMapsEntryAttribute)
self._stacktrace_lines = []
self._global_stats = {} # used only in apply_policy
self._version = ''
self._lines = []
@property
def path(self):
return self._path
@property
def count(self):
return self._count
@property
def time(self):
return self._time
@property
def iter_map(self):
for region in sorted(self._map.iteritems()):
yield region[0], region[1]
def iter_procmaps(self):
for begin, end, attr in self._map.iter_range():
yield begin, end, attr
@property
def iter_stacktrace(self):
for line in self._stacktrace_lines:
yield line
def global_stat(self, name):
return self._global_stats[name]
@staticmethod
def load(path, log_header='Loading a heap profile dump: '):
"""Loads a heap profile dump.
Args:
path: A file path string to load.
log_header: A preceding string for log messages.
Returns:
A loaded Dump object.
Raises:
ParsingException for invalid heap profile dumps.
"""
dump = Dump(path, os.stat(path).st_mtime)
with open(path, 'r') as f:
dump.load_file(f, log_header)
return dump
def load_file(self, f, log_header):
self._lines = [line for line in f
if line and not line.startswith('#')]
try:
self._version, ln = self._parse_version()
self._parse_meta_information()
if self._version == DUMP_DEEP_6:
self._parse_mmap_list()
self._parse_global_stats()
self._extract_stacktrace_lines(ln)
except EmptyDumpException:
LOGGER.info('%s%s ...ignored an empty dump.' % (log_header, self._path))
except ParsingException, e:
LOGGER.error('%s%s ...error %s' % (log_header, self._path, e))
raise
else:
LOGGER.info('%s%s (version:%s)' % (log_header, self._path, self._version))
def _parse_version(self):
"""Parses a version string in self._lines.
Returns:
A pair of (a string representing a version of the stacktrace dump,
and an integer indicating a line number next to the version string).
Raises:
ParsingException for invalid dump versions.
"""
version = ''
# Skip until an identifiable line.
headers = ('STACKTRACES:\n', 'MMAP_STACKTRACES:\n', 'heap profile: ')
if not self._lines:
raise EmptyDumpException('Empty heap dump file.')
(ln, found) = skip_while(
0, len(self._lines),
lambda n: not self._lines[n].startswith(headers))
if not found:
raise InvalidDumpException('No version header.')
# Identify a version.
if self._lines[ln].startswith('heap profile: '):
version = self._lines[ln][13:].strip()
if version in (DUMP_DEEP_5, DUMP_DEEP_6):
(ln, _) = skip_while(
ln, len(self._lines),
lambda n: self._lines[n] != 'STACKTRACES:\n')
elif version in DUMP_DEEP_OBSOLETE:
raise ObsoleteDumpVersionException(version)
else:
raise InvalidDumpException('Invalid version: %s' % version)
elif self._lines[ln] == 'STACKTRACES:\n':
raise ObsoleteDumpVersionException(DUMP_DEEP_1)
elif self._lines[ln] == 'MMAP_STACKTRACES:\n':
raise ObsoleteDumpVersionException(DUMP_DEEP_2)
return (version, ln)
def _parse_global_stats(self):
"""Parses lines in self._lines as global stats."""
(ln, _) = skip_while(
0, len(self._lines),
lambda n: self._lines[n] != 'GLOBAL_STATS:\n')
global_stat_names = [
'total', 'absent', 'file-exec', 'file-nonexec', 'anonymous', 'stack',
'other', 'nonprofiled-absent', 'nonprofiled-anonymous',
'nonprofiled-file-exec', 'nonprofiled-file-nonexec',
'nonprofiled-stack', 'nonprofiled-other',
'profiled-mmap', 'profiled-malloc']
for prefix in global_stat_names:
(ln, _) = skip_while(
ln, len(self._lines),
lambda n: self._lines[n].split()[0] != prefix)
words = self._lines[ln].split()
self._global_stats[prefix + '_virtual'] = int(words[-2])
self._global_stats[prefix + '_committed'] = int(words[-1])
def _parse_meta_information(self):
"""Parses lines in self._lines for meta information."""
(ln, found) = skip_while(
0, len(self._lines),
lambda n: self._lines[n] != 'META:\n')
if not found:
return
ln += 1
while True:
if self._lines[ln].startswith('Time:'):
matched_seconds = self._TIME_PATTERN_SECONDS.match(self._lines[ln])
matched_format = self._TIME_PATTERN_FORMAT.match(self._lines[ln])
if matched_format:
self._time = time.mktime(datetime.datetime.strptime(
matched_format.group(1), '%Y/%m/%d %H:%M:%S').timetuple())
if matched_format.group(2):
self._time += float(matched_format.group(2)[1:]) / 1000.0
elif matched_seconds:
self._time = float(matched_seconds.group(1))
elif self._lines[ln].startswith('Reason:'):
pass # Nothing to do for 'Reason:'
else:
break
ln += 1
def _parse_mmap_list(self):
"""Parses lines in self._lines as a mmap list."""
(ln, found) = skip_while(
0, len(self._lines),
lambda n: self._lines[n] != 'MMAP_LIST:\n')
if not found:
return {}
ln += 1
self._map = {}
current_vma = dict()
while True:
entry = proc_maps.ProcMaps.parse_line(self._lines[ln])
if entry:
current_vma = dict()
for _, _, attr in self._procmaps.iter_range(entry.begin, entry.end):
for key, value in entry.as_dict().iteritems():
attr[key] = value
current_vma[key] = value
ln += 1
continue
matched = self._HOOK_PATTERN.match(self._lines[ln])
if not matched:
break
# 2: starting address
# 5: end address
# 7: hooked or unhooked
# 8: additional information
if matched.group(7) == 'hooked':
submatched = self._HOOKED_PATTERN.match(matched.group(8))
if not submatched:
submatched = self._OLD_HOOKED_PATTERN.match(matched.group(8))
elif matched.group(7) == 'unhooked':
submatched = self._UNHOOKED_PATTERN.match(matched.group(8))
if not submatched:
submatched = self._OLD_UNHOOKED_PATTERN.match(matched.group(8))
else:
assert matched.group(7) in ['hooked', 'unhooked']
submatched_dict = submatched.groupdict()
region_info = { 'vma': current_vma }
if submatched_dict.get('TYPE'):
region_info['type'] = submatched_dict['TYPE'].strip()
if submatched_dict.get('COMMITTED'):
region_info['committed'] = int(submatched_dict['COMMITTED'])
if submatched_dict.get('RESERVED'):
region_info['reserved'] = int(submatched_dict['RESERVED'])
if submatched_dict.get('BUCKETID'):
region_info['bucket_id'] = int(submatched_dict['BUCKETID'])
if matched.group(1) == '(':
start = current_vma['begin']
else:
start = int(matched.group(2), 16)
if matched.group(4) == '(':
end = current_vma['end']
else:
end = int(matched.group(5), 16)
self._map[(start, end)] = (matched.group(7), region_info)
ln += 1
def _extract_stacktrace_lines(self, line_number):
"""Extracts the position of stacktrace lines.
Valid stacktrace lines are stored into self._stacktrace_lines.
Args:
line_number: A line number to start parsing in lines.
Raises:
ParsingException for invalid dump versions.
"""
if self._version in (DUMP_DEEP_5, DUMP_DEEP_6):
(line_number, _) = skip_while(
line_number, len(self._lines),
lambda n: not self._lines[n].split()[0].isdigit())
stacktrace_start = line_number
(line_number, _) = skip_while(
line_number, len(self._lines),
lambda n: self._check_stacktrace_line(self._lines[n]))
self._stacktrace_lines = self._lines[stacktrace_start:line_number]
elif self._version in DUMP_DEEP_OBSOLETE:
raise ObsoleteDumpVersionException(self._version)
else:
raise InvalidDumpException('Invalid version: %s' % self._version)
@staticmethod
def _check_stacktrace_line(stacktrace_line):
"""Checks if a given stacktrace_line is valid as stacktrace.
Args:
stacktrace_line: A string to be checked.
Returns:
True if the given stacktrace_line is valid.
"""
words = stacktrace_line.split()
if len(words) < BUCKET_ID + 1:
return False
if words[BUCKET_ID - 1] != '@':
return False
return True
class DumpList(object):
"""Represents a sequence of heap profile dumps."""
def __init__(self, dump_list):
self._dump_list = dump_list
@staticmethod
def load(path_list):
LOGGER.info('Loading heap dump profiles.')
dump_list = []
for path in path_list:
dump_list.append(Dump.load(path, ' '))
return DumpList(dump_list)
def __len__(self):
return len(self._dump_list)
def __iter__(self):
for dump in self._dump_list:
yield dump
def __getitem__(self, index):
return self._dump_list[index]
class Command(object):
"""Subclasses are a subcommand for this executable.
See COMMANDS in main().
"""
_DEVICE_LIB_BASEDIRS = ['/data/data/', '/data/app-lib/', '/data/local/tmp']
def __init__(self, usage):
self._parser = optparse.OptionParser(usage)
@staticmethod
def load_basic_files(
dump_path, multiple, no_dump=False, alternative_dirs=None):
prefix = Command._find_prefix(dump_path)
# If the target process is estimated to be working on Android, converts
# a path in the Android device to a path estimated to be corresponding in
# the host. Use --alternative-dirs to specify the conversion manually.
if not alternative_dirs:
alternative_dirs = Command._estimate_alternative_dirs(prefix)
if alternative_dirs:
for device, host in alternative_dirs.iteritems():
LOGGER.info('Assuming %s on device as %s on host' % (device, host))
symbol_data_sources = SymbolDataSources(prefix, alternative_dirs)
symbol_data_sources.prepare()
bucket_set = BucketSet()
bucket_set.load(prefix)
if not no_dump:
if multiple:
dump_list = DumpList.load(Command._find_all_dumps(dump_path))
else:
dump = Dump.load(dump_path)
symbol_mapping_cache = SymbolMappingCache()
with open(prefix + '.cache.function', 'a+') as cache_f:
symbol_mapping_cache.update(
FUNCTION_SYMBOLS, bucket_set,
SymbolFinder(FUNCTION_SYMBOLS, symbol_data_sources), cache_f)
with open(prefix + '.cache.typeinfo', 'a+') as cache_f:
symbol_mapping_cache.update(
TYPEINFO_SYMBOLS, bucket_set,
SymbolFinder(TYPEINFO_SYMBOLS, symbol_data_sources), cache_f)
with open(prefix + '.cache.sourcefile', 'a+') as cache_f:
symbol_mapping_cache.update(
SOURCEFILE_SYMBOLS, bucket_set,
SymbolFinder(SOURCEFILE_SYMBOLS, symbol_data_sources), cache_f)
bucket_set.symbolize(symbol_mapping_cache)
if no_dump:
return bucket_set
elif multiple:
return (bucket_set, dump_list)
else:
return (bucket_set, dump)
@staticmethod
def _find_prefix(path):
return re.sub('\.[0-9][0-9][0-9][0-9]\.heap', '', path)
@staticmethod
def _estimate_alternative_dirs(prefix):
"""Estimates a path in host from a corresponding path in target device.
For Android, dmprof.py should find symbol information from binaries in
the host instead of the Android device because dmprof.py doesn't run on
the Android device. This method estimates a path in the host
corresponding to a path in the Android device.
Returns:
A dict that maps a path in the Android device to a path in the host.
If a file in Command._DEVICE_LIB_BASEDIRS is found in /proc/maps, it
assumes the process was running on Android and maps the path to
"out/Debug/lib" in the Chromium directory. An empty dict is returned
unless Android.
"""
device_lib_path_candidates = set()
with open(prefix + '.maps') as maps_f:
maps = proc_maps.ProcMaps.load(maps_f)
for entry in maps:
name = entry.as_dict()['name']
if any([base_dir in name for base_dir in Command._DEVICE_LIB_BASEDIRS]):
device_lib_path_candidates.add(os.path.dirname(name))
if len(device_lib_path_candidates) == 1:
return {device_lib_path_candidates.pop(): os.path.join(
CHROME_SRC_PATH, 'out', 'Debug', 'lib')}
else:
return {}
@staticmethod
def _find_all_dumps(dump_path):
prefix = Command._find_prefix(dump_path)
dump_path_list = [dump_path]
n = int(dump_path[len(dump_path) - 9 : len(dump_path) - 5])
n += 1
skipped = 0
while True:
p = '%s.%04d.heap' % (prefix, n)
if os.path.exists(p) and os.stat(p).st_size:
dump_path_list.append(p)
else:
if skipped > 10:
break
skipped += 1
n += 1
return dump_path_list
@staticmethod
def _find_all_buckets(dump_path):
prefix = Command._find_prefix(dump_path)
bucket_path_list = []
n = 0
while True:
path = '%s.%04d.buckets' % (prefix, n)
if not os.path.exists(path):
if n > 10:
break
n += 1
continue
bucket_path_list.append(path)
n += 1
return bucket_path_list
def _parse_args(self, sys_argv, required):
options, args = self._parser.parse_args(sys_argv)
if len(args) != required + 1:
self._parser.error('needs %d argument(s).\n' % required)
return None
return (options, args)
@staticmethod
def _parse_policy_list(options_policy):
if options_policy:
return options_policy.split(',')
else:
return None
class BucketsCommand(Command):
def __init__(self):
super(BucketsCommand, self).__init__('Usage: %prog buckets <first-dump>')
def do(self, sys_argv, out=sys.stdout):
_, args = self._parse_args(sys_argv, 1)
dump_path = args[1]
bucket_set = Command.load_basic_files(dump_path, True, True)
BucketsCommand._output(bucket_set, out)
return 0
@staticmethod
def _output(bucket_set, out):
"""Prints all buckets with resolving symbols.
Args:
bucket_set: A BucketSet object.
out: An IO object to output.
"""
for bucket_id, bucket in sorted(bucket_set):
out.write('%d: %s\n' % (bucket_id, bucket))
class StacktraceCommand(Command):
def __init__(self):
super(StacktraceCommand, self).__init__(
'Usage: %prog stacktrace <dump>')
def do(self, sys_argv):
_, args = self._parse_args(sys_argv, 1)
dump_path = args[1]
(bucket_set, dump) = Command.load_basic_files(dump_path, False)
StacktraceCommand._output(dump, bucket_set, sys.stdout)
return 0
@staticmethod
def _output(dump, bucket_set, out):
"""Outputs a given stacktrace.
Args:
bucket_set: A BucketSet object.
out: A file object to output.
"""
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket:
continue
for i in range(0, BUCKET_ID - 1):
out.write(words[i] + ' ')
for frame in bucket.symbolized_stackfunction:
out.write(frame + ' ')
out.write('\n')
class PolicyCommands(Command):
def __init__(self, command):
super(PolicyCommands, self).__init__(
'Usage: %%prog %s [-p POLICY] <first-dump>' % command)
self._parser.add_option('-p', '--policy', type='string', dest='policy',
help='profile with POLICY', metavar='POLICY')
self._parser.add_option('--alternative-dirs', dest='alternative_dirs',
metavar='/path/on/target@/path/on/host[:...]',
help='Read files in /path/on/host/ instead of '
'files in /path/on/target/.')
def _set_up(self, sys_argv):
options, args = self._parse_args(sys_argv, 1)
dump_path = args[1]
alternative_dirs_dict = {}
if options.alternative_dirs:
for alternative_dir_pair in options.alternative_dirs.split(':'):
target_path, host_path = alternative_dir_pair.split('@', 1)
alternative_dirs_dict[target_path] = host_path
(bucket_set, dumps) = Command.load_basic_files(
dump_path, True, alternative_dirs=alternative_dirs_dict)
policy_set = PolicySet.load(Command._parse_policy_list(options.policy))
return policy_set, dumps, bucket_set
@staticmethod
def _apply_policy(dump, policy, bucket_set, first_dump_time):
"""Aggregates the total memory size of each component.
Iterate through all stacktraces and attribute them to one of the components
based on the policy. It is important to apply policy in right order.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
first_dump_time: An integer representing time when the first dump is
dumped.
Returns:
A dict mapping components and their corresponding sizes.
"""
LOGGER.info(' %s' % dump.path)
sizes = dict((c, 0) for c in policy.components)
PolicyCommands._accumulate_malloc(dump, policy, bucket_set, sizes)
verify_global_stats = PolicyCommands._accumulate_maps(
dump, policy, bucket_set, sizes)
# TODO(dmikurube): Remove the verifying code when GLOBAL_STATS is removed.
# http://crbug.com/245603.
for verify_key, verify_value in verify_global_stats.iteritems():
dump_value = dump.global_stat('%s_committed' % verify_key)
if dump_value != verify_value:
LOGGER.warn('%25s: %12d != %d (%d)' % (
verify_key, dump_value, verify_value, dump_value - verify_value))
sizes['mmap-no-log'] = (
dump.global_stat('profiled-mmap_committed') -
sizes['mmap-total-log'])
sizes['mmap-total-record'] = dump.global_stat('profiled-mmap_committed')
sizes['mmap-total-record-vm'] = dump.global_stat('profiled-mmap_virtual')
sizes['tc-no-log'] = (
dump.global_stat('profiled-malloc_committed') -
sizes['tc-total-log'])
sizes['tc-total-record'] = dump.global_stat('profiled-malloc_committed')
sizes['tc-unused'] = (
sizes['mmap-tcmalloc'] -
dump.global_stat('profiled-malloc_committed'))
if sizes['tc-unused'] < 0:
LOGGER.warn(' Assuming tc-unused=0 as it is negative: %d (bytes)' %
sizes['tc-unused'])
sizes['tc-unused'] = 0
sizes['tc-total'] = sizes['mmap-tcmalloc']
# TODO(dmikurube): global_stat will be deprecated.
# See http://crbug.com/245603.
for key, value in {
'total': 'total_committed',
'filemapped': 'file_committed',
'absent': 'absent_committed',
'file-exec': 'file-exec_committed',
'file-nonexec': 'file-nonexec_committed',
'anonymous': 'anonymous_committed',
'stack': 'stack_committed',
'other': 'other_committed',
'unhooked-absent': 'nonprofiled-absent_committed',
'total-vm': 'total_virtual',
'filemapped-vm': 'file_virtual',
'anonymous-vm': 'anonymous_virtual',
'other-vm': 'other_virtual' }.iteritems():
if key in sizes:
sizes[key] = dump.global_stat(value)
if 'mustbezero' in sizes:
removed_list = (
'profiled-mmap_committed',
'nonprofiled-absent_committed',
'nonprofiled-anonymous_committed',
'nonprofiled-file-exec_committed',
'nonprofiled-file-nonexec_committed',
'nonprofiled-stack_committed',
'nonprofiled-other_committed')
sizes['mustbezero'] = (
dump.global_stat('total_committed') -
sum(dump.global_stat(removed) for removed in removed_list))
if 'total-exclude-profiler' in sizes:
sizes['total-exclude-profiler'] = (
dump.global_stat('total_committed') -
(sizes['mmap-profiler'] + sizes['mmap-type-profiler']))
if 'hour' in sizes:
sizes['hour'] = (dump.time - first_dump_time) / 60.0 / 60.0
if 'minute' in sizes:
sizes['minute'] = (dump.time - first_dump_time) / 60.0
if 'second' in sizes:
sizes['second'] = dump.time - first_dump_time
return sizes
@staticmethod
def _accumulate_malloc(dump, policy, bucket_set, sizes):
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
sizes[component_match] += int(words[COMMITTED])
assert not component_match.startswith('mmap-')
if component_match.startswith('tc-'):
sizes['tc-total-log'] += int(words[COMMITTED])
else:
sizes['other-total-log'] += int(words[COMMITTED])
@staticmethod
def _accumulate_maps(dump, policy, bucket_set, sizes):
# TODO(dmikurube): Remove the dict when GLOBAL_STATS is removed.
# http://crbug.com/245603.
global_stats = {
'total': 0,
'file-exec': 0,
'file-nonexec': 0,
'anonymous': 0,
'stack': 0,
'other': 0,
'nonprofiled-file-exec': 0,
'nonprofiled-file-nonexec': 0,
'nonprofiled-anonymous': 0,
'nonprofiled-stack': 0,
'nonprofiled-other': 0,
'profiled-mmap': 0,
}
for _, value in dump.iter_map:
# TODO(dmikurube): Remove the subtotal code when GLOBAL_STATS is removed.
# It's temporary verification code for transition described in
# http://crbug.com/245603.
committed = 0
if 'committed' in value[1]:
committed = value[1]['committed']
global_stats['total'] += committed
key = 'other'
name = value[1]['vma']['name']
if name.startswith('/'):
if value[1]['vma']['executable'] == 'x':
key = 'file-exec'
else:
key = 'file-nonexec'
elif name == '[stack]':
key = 'stack'
elif name == '':
key = 'anonymous'
global_stats[key] += committed
if value[0] == 'unhooked':
global_stats['nonprofiled-' + key] += committed
if value[0] == 'hooked':
global_stats['profiled-mmap'] += committed
if value[0] == 'unhooked':
component_match = policy.find_unhooked(value)
sizes[component_match] += int(value[1]['committed'])
elif value[0] == 'hooked':
component_match, _ = policy.find_mmap(value, bucket_set)
sizes[component_match] += int(value[1]['committed'])
assert not component_match.startswith('tc-')
if component_match.startswith('mmap-'):
sizes['mmap-total-log'] += int(value[1]['committed'])
else:
sizes['other-total-log'] += int(value[1]['committed'])
else:
LOGGER.error('Unrecognized mapping status: %s' % value[0])
return global_stats
class CSVCommand(PolicyCommands):
def __init__(self):
super(CSVCommand, self).__init__('csv')
def do(self, sys_argv):
policy_set, dumps, bucket_set = self._set_up(sys_argv)
return CSVCommand._output(policy_set, dumps, bucket_set, sys.stdout)
@staticmethod
def _output(policy_set, dumps, bucket_set, out):
max_components = 0
for label in policy_set:
max_components = max(max_components, len(policy_set[label].components))
for label in sorted(policy_set):
components = policy_set[label].components
if len(policy_set) > 1:
out.write('%s%s\n' % (label, ',' * (max_components - 1)))
out.write('%s%s\n' % (
','.join(components), ',' * (max_components - len(components))))
LOGGER.info('Applying a policy %s to...' % label)
for dump in dumps:
component_sizes = PolicyCommands._apply_policy(
dump, policy_set[label], bucket_set, dumps[0].time)
s = []
for c in components:
if c in ('hour', 'minute', 'second'):
s.append('%05.5f' % (component_sizes[c]))
else:
s.append('%05.5f' % (component_sizes[c] / 1024.0 / 1024.0))
out.write('%s%s\n' % (
','.join(s), ',' * (max_components - len(components))))
bucket_set.clear_component_cache()
return 0
class JSONCommand(PolicyCommands):
def __init__(self):
super(JSONCommand, self).__init__('json')
def do(self, sys_argv):
policy_set, dumps, bucket_set = self._set_up(sys_argv)
return JSONCommand._output(policy_set, dumps, bucket_set, sys.stdout)
@staticmethod
def _output(policy_set, dumps, bucket_set, out):
json_base = {
'version': 'JSON_DEEP_2',
'policies': {},
}
for label in sorted(policy_set):
json_base['policies'][label] = {
'legends': policy_set[label].components,
'snapshots': [],
}
LOGGER.info('Applying a policy %s to...' % label)
for dump in dumps:
component_sizes = PolicyCommands._apply_policy(
dump, policy_set[label], bucket_set, dumps[0].time)
component_sizes['dump_path'] = dump.path
component_sizes['dump_time'] = datetime.datetime.fromtimestamp(
dump.time).strftime('%Y-%m-%d %H:%M:%S')
json_base['policies'][label]['snapshots'].append(component_sizes)
bucket_set.clear_component_cache()
json.dump(json_base, out, indent=2, sort_keys=True)
return 0
class ListCommand(PolicyCommands):
def __init__(self):
super(ListCommand, self).__init__('list')
def do(self, sys_argv):
policy_set, dumps, bucket_set = self._set_up(sys_argv)
return ListCommand._output(policy_set, dumps, bucket_set, sys.stdout)
@staticmethod
def _output(policy_set, dumps, bucket_set, out):
for label in sorted(policy_set):
LOGGER.info('Applying a policy %s to...' % label)
for dump in dumps:
component_sizes = PolicyCommands._apply_policy(
dump, policy_set[label], bucket_set, dump.time)
out.write('%s for %s:\n' % (label, dump.path))
for c in policy_set[label].components:
if c in ['hour', 'minute', 'second']:
out.write('%40s %12.3f\n' % (c, component_sizes[c]))
else:
out.write('%40s %12d\n' % (c, component_sizes[c]))
bucket_set.clear_component_cache()
return 0
class MapCommand(Command):
def __init__(self):
super(MapCommand, self).__init__('Usage: %prog map <first-dump> <policy>')
def do(self, sys_argv, out=sys.stdout):
_, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
target_policy = args[2]
(bucket_set, dumps) = Command.load_basic_files(dump_path, True)
policy_set = PolicySet.load(Command._parse_policy_list(target_policy))
MapCommand._output(dumps, bucket_set, policy_set[target_policy], out)
return 0
@staticmethod
def _output(dumps, bucket_set, policy, out):
"""Prints all stacktraces in a given component of given depth.
Args:
dumps: A list of Dump objects.
bucket_set: A BucketSet object.
policy: A Policy object.
out: An IO object to output.
"""
max_dump_count = 0
range_dict = ExclusiveRangeDict(ListAttribute)
for dump in dumps:
max_dump_count = max(max_dump_count, dump.count)
for key, value in dump.iter_map:
for begin, end, attr in range_dict.iter_range(key[0], key[1]):
attr[dump.count] = value
max_dump_count_digit = len(str(max_dump_count))
for begin, end, attr in range_dict.iter_range():
out.write('%x-%x\n' % (begin, end))
if len(attr) < max_dump_count:
attr[max_dump_count] = None
for index, value in enumerate(attr[1:]):
out.write(' #%0*d: ' % (max_dump_count_digit, index + 1))
if not value:
out.write('None\n')
elif value[0] == 'hooked':
component_match, _ = policy.find_mmap(value, bucket_set)
out.write('hooked %s: %s @ %d\n' % (
value[1]['type'] if 'type' in value[1] else 'None',
component_match, value[1]['bucket_id']))
else:
region_info = value[1]
size = region_info['committed']
out.write('unhooked %s: %d bytes committed\n' % (
region_info['type'] if 'type' in region_info else 'None', size))
class ExpandCommand(Command):
def __init__(self):
super(ExpandCommand, self).__init__(
'Usage: %prog expand <dump> <policy> <component> <depth>')
def do(self, sys_argv):
_, args = self._parse_args(sys_argv, 4)
dump_path = args[1]
target_policy = args[2]
component_name = args[3]
depth = args[4]
(bucket_set, dump) = Command.load_basic_files(dump_path, False)
policy_set = PolicySet.load(Command._parse_policy_list(target_policy))
ExpandCommand._output(dump, policy_set[target_policy], bucket_set,
component_name, int(depth), sys.stdout)
return 0
@staticmethod
def _output(dump, policy, bucket_set, component_name, depth, out):
"""Prints all stacktraces in a given component of given depth.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
component_name: A name of component for filtering.
depth: An integer representing depth to be printed.
out: An IO object to output.
"""
sizes = {}
ExpandCommand._accumulate(
dump, policy, bucket_set, component_name, depth, sizes)
sorted_sizes_list = sorted(
sizes.iteritems(), key=(lambda x: x[1]), reverse=True)
total = 0
# TODO(dmikurube): Better formatting.
for size_pair in sorted_sizes_list:
out.write('%10d %s\n' % (size_pair[1], size_pair[0]))
total += size_pair[1]
LOGGER.info('total: %d\n' % total)
@staticmethod
def _add_size(precedence, bucket, depth, committed, sizes):
stacktrace_sequence = precedence
for function, sourcefile in zip(
bucket.symbolized_stackfunction[
0 : min(len(bucket.symbolized_stackfunction), 1 + depth)],
bucket.symbolized_stacksourcefile[
0 : min(len(bucket.symbolized_stacksourcefile), 1 + depth)]):
stacktrace_sequence += '%s(@%s) ' % (function, sourcefile)
if not stacktrace_sequence in sizes:
sizes[stacktrace_sequence] = 0
sizes[stacktrace_sequence] += committed
@staticmethod
def _accumulate(dump, policy, bucket_set, component_name, depth, sizes):
rule = policy.find_rule(component_name)
if not rule:
pass
elif rule.allocator_type == 'malloc':
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
if component_match == component_name:
precedence = ''
precedence += '(alloc=%d) ' % int(words[ALLOC_COUNT])
precedence += '(free=%d) ' % int(words[FREE_COUNT])
if bucket.typeinfo:
precedence += '(type=%s) ' % bucket.symbolized_typeinfo
precedence += '(type.name=%s) ' % bucket.typeinfo_name
ExpandCommand._add_size(precedence, bucket, depth,
int(words[COMMITTED]), sizes)
elif rule.allocator_type == 'mmap':
for _, region in dump.iter_map:
if region[0] != 'hooked':
continue
component_match, bucket = policy.find_mmap(region, bucket_set)
if component_match == component_name:
ExpandCommand._add_size('', bucket, depth,
region[1]['committed'], sizes)
class PProfCommand(Command):
def __init__(self):
super(PProfCommand, self).__init__(
'Usage: %prog pprof [-c COMPONENT] <dump> <policy>')
self._parser.add_option('-c', '--component', type='string',
dest='component',
help='restrict to COMPONENT', metavar='COMPONENT')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
target_policy = args[2]
component = options.component
(bucket_set, dump) = Command.load_basic_files(dump_path, False)
policy_set = PolicySet.load(Command._parse_policy_list(target_policy))
with open(Command._find_prefix(dump_path) + '.maps', 'r') as maps_f:
maps_lines = maps_f.readlines()
PProfCommand._output(
dump, policy_set[target_policy], bucket_set, maps_lines, component,
sys.stdout)
return 0
@staticmethod
def _output(dump, policy, bucket_set, maps_lines, component_name, out):
"""Converts the heap profile dump so it can be processed by pprof.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
maps_lines: A list of strings containing /proc/.../maps.
component_name: A name of component for filtering.
out: An IO object to output.
"""
out.write('heap profile: ')
com_committed, com_allocs = PProfCommand._accumulate(
dump, policy, bucket_set, component_name)
out.write('%6d: %8s [%6d: %8s] @ heapprofile\n' % (
com_allocs, com_committed, com_allocs, com_committed))
PProfCommand._output_stacktrace_lines(
dump, policy, bucket_set, component_name, out)
out.write('MAPPED_LIBRARIES:\n')
for line in maps_lines:
out.write(line)
@staticmethod
def _accumulate(dump, policy, bucket_set, component_name):
"""Accumulates size of committed chunks and the number of allocated chunks.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
component_name: A name of component for filtering.
Returns:
Two integers which are the accumulated size of committed regions and the
number of allocated chunks, respectively.
"""
com_committed = 0
com_allocs = 0
for _, region in dump.iter_map:
if region[0] != 'hooked':
continue
component_match, bucket = policy.find_mmap(region, bucket_set)
if (component_name and component_name != component_match) or (
region[1]['committed'] == 0):
continue
com_committed += region[1]['committed']
com_allocs += 1
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
if (not bucket or
(component_name and component_name != component_match)):
continue
com_committed += int(words[COMMITTED])
com_allocs += int(words[ALLOC_COUNT]) - int(words[FREE_COUNT])
return com_committed, com_allocs
@staticmethod
def _output_stacktrace_lines(dump, policy, bucket_set, component_name, out):
"""Prints information of stacktrace lines for pprof.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
component_name: A name of component for filtering.
out: An IO object to output.
"""
for _, region in dump.iter_map:
if region[0] != 'hooked':
continue
component_match, bucket = policy.find_mmap(region, bucket_set)
if (component_name and component_name != component_match) or (
region[1]['committed'] == 0):
continue
out.write(' 1: %8s [ 1: %8s] @' % (
region[1]['committed'], region[1]['committed']))
for address in bucket.stacktrace:
out.write(' 0x%016x' % address)
out.write('\n')
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
if (not bucket or
(component_name and component_name != component_match)):
continue
out.write('%6d: %8s [%6d: %8s] @' % (
int(words[ALLOC_COUNT]) - int(words[FREE_COUNT]),
words[COMMITTED],
int(words[ALLOC_COUNT]) - int(words[FREE_COUNT]),
words[COMMITTED]))
for address in bucket.stacktrace:
out.write(' 0x%016x' % address)
out.write('\n')
class UploadCommand(Command):
def __init__(self):
super(UploadCommand, self).__init__(
'Usage: %prog upload [--gsutil path/to/gsutil] '
'<first-dump> <destination-gs-path>')
self._parser.add_option('--gsutil', default='gsutil',
help='path to GSUTIL', metavar='GSUTIL')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
gs_path = args[2]
dump_files = Command._find_all_dumps(dump_path)
bucket_files = Command._find_all_buckets(dump_path)
prefix = Command._find_prefix(dump_path)
symbol_data_sources = SymbolDataSources(prefix)
symbol_data_sources.prepare()
symbol_path = symbol_data_sources.path()
handle_zip, filename_zip = tempfile.mkstemp('.zip', 'dmprof')
os.close(handle_zip)
try:
file_zip = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED)
for filename in dump_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
for filename in bucket_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
symbol_basename = os.path.basename(os.path.abspath(symbol_path))
for filename in os.listdir(symbol_path):
if not filename.startswith('.'):
file_zip.write(os.path.join(symbol_path, filename),
os.path.join(symbol_basename, os.path.basename(
os.path.abspath(filename))))
file_zip.close()
returncode = UploadCommand._run_gsutil(
options.gsutil, 'cp', '-a', 'public-read', filename_zip, gs_path)
finally:
os.remove(filename_zip)
return returncode
@staticmethod
def _run_gsutil(gsutil, *args):
"""Run gsutil as a subprocess.
Args:
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The return code from the process.
"""
command = [gsutil] + list(args)
LOGGER.info("Running: %s", command)
try:
return subprocess.call(command)
except OSError, e:
LOGGER.error('Error to run gsutil: %s', e)
def main():
COMMANDS = {
'buckets': BucketsCommand,
'csv': CSVCommand,
'expand': ExpandCommand,
'json': JSONCommand,
'list': ListCommand,
'map': MapCommand,
'pprof': PProfCommand,
'stacktrace': StacktraceCommand,
'upload': UploadCommand,
}
if len(sys.argv) < 2 or (not sys.argv[1] in COMMANDS):
sys.stderr.write("""Usage: dmprof <command> [options] [<args>]
Commands:
buckets Dump a bucket list with resolving symbols
csv Classify memory usage in CSV
expand Show all stacktraces contained in the specified component
json Classify memory usage in JSON
list Classify memory usage in simple listing format
map Show history of mapped regions
pprof Format the profile dump so that it can be processed by pprof
stacktrace Convert runtime addresses to symbol names
upload Upload dumped files
Quick Reference:
dmprof buckets <first-dump>
dmprof csv [-p POLICY] <first-dump>
dmprof expand <dump> <policy> <component> <depth>
dmprof json [-p POLICY] <first-dump>
dmprof list [-p POLICY] <first-dump>
dmprof map <first-dump> <policy>
dmprof pprof [-c COMPONENT] <dump> <policy>
dmprof stacktrace <dump>
dmprof upload [--gsutil path/to/gsutil] <first-dump> <destination-gs-path>
""")
sys.exit(1)
action = sys.argv.pop(1)
LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
try:
errorcode = COMMANDS[action]().do(sys.argv)
except ParsingException, e:
errorcode = 1
sys.stderr.write('Exit by parsing error: %s\n' % e)
return errorcode
if __name__ == '__main__':
sys.exit(main())
|
hujiajie/pa-chromium
|
tools/deep_memory_profiler/dmprof.py
|
Python
|
bsd-3-clause
| 68,768
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from functools import wraps
import six
from six.moves import urllib
from st2client.utils import httpclient
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_env(func):
@wraps(func)
def decorate(*args, **kwargs):
if not kwargs.get('token') and os.environ.get('ST2_AUTH_TOKEN', None):
kwargs['token'] = os.environ.get('ST2_AUTH_TOKEN')
return func(*args, **kwargs)
return decorate
class Resource(object):
# An alias to use for the resource if different than the class name.
_alias = None
# Display name of the resource. This may be different than its resource
# name specifically when the resource name is composed of multiple words.
_display_name = None
# URL path for the resource.
_url_path = None
# Plural form of the resource name. This will be used to build the
# latter part of the REST URL.
_plural = None
# Plural form of the resource display name.
_plural_display_name = None
# A list of class attributes which will be included in __repr__ return value
_repr_attributes = []
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def to_dict(self, exclude_attributes=None):
"""
Return a dictionary representation of this object.
:param exclude_attributes: Optional list of attributes to exclude.
:type exclude_attributes: ``list``
:rtype: ``dict``
"""
exclude_attributes = exclude_attributes or []
attributes = self.__dict__.keys()
attributes = [attr for attr in attributes if not attr.startswith('__') and
attr not in exclude_attributes]
result = {}
for attribute in attributes:
value = getattr(self, attribute, None)
result[attribute] = value
return result
@classmethod
def get_alias(cls):
return cls._alias if cls._alias else cls.__name__
@classmethod
def get_display_name(cls):
return cls._display_name if cls._display_name else cls.__name__
@classmethod
def get_plural_name(cls):
if not cls._plural:
raise Exception('The %s class is missing class attributes '
'in its definition.' % cls.__name__)
return cls._plural
@classmethod
def get_plural_display_name(cls):
return (cls._plural_display_name
if cls._plural_display_name
else cls._plural)
@classmethod
def get_url_path_name(cls):
if cls._url_path:
return cls._url_path
return cls.get_plural_name().lower()
def serialize(self):
return dict((k, v)
for k, v in six.iteritems(self.__dict__)
if not k.startswith('_'))
@classmethod
def deserialize(cls, doc):
if type(doc) is not dict:
doc = json.loads(doc)
return cls(**doc)
def __str__(self):
return str(self.__repr__())
def __repr__(self):
if not self._repr_attributes:
return super(Resource, self).__repr__()
attributes = []
for attribute in self._repr_attributes:
value = getattr(self, attribute, None)
attributes.append('%s=%s' % (attribute, value))
attributes = ','.join(attributes)
class_name = self.__class__.__name__
result = '<%s %s>' % (class_name, attributes)
return result
class ResourceManager(object):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(endpoint, cacert=cacert, debug=debug)
@staticmethod
def handle_error(response):
try:
content = response.json()
fault = content.get('faultstring', '') if content else ''
if fault:
response.reason += '\nMESSAGE: %s' % fault
except Exception as e:
response.reason += ('\nUnable to retrieve detailed message '
'from the HTTP response. %s\n' % str(e))
response.raise_for_status()
@add_auth_token_to_kwargs_from_env
def get_all(self, **kwargs):
# TODO: This is ugly, stop abusing kwargs
url = '/%s' % self.resource.get_url_path_name()
limit = kwargs.pop('limit', None)
pack = kwargs.pop('pack', None)
prefix = kwargs.pop('prefix', None)
user = kwargs.pop('user', None)
params = {}
if limit and limit <= 0:
limit = None
if limit:
params['limit'] = limit
if pack:
params['pack'] = pack
if prefix:
params['prefix'] = prefix
if user:
params['user'] = user
response = self.client.get(url=url, params=params, **kwargs)
if response.status_code != 200:
self.handle_error(response)
return [self.resource.deserialize(item)
for item in response.json()]
@add_auth_token_to_kwargs_from_env
def get_by_id(self, id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), id)
response = self.client.get(url, **kwargs)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
return self.resource.deserialize(response.json())
@add_auth_token_to_kwargs_from_env
def get_property(self, id_, property_name, self_deserialize=True, **kwargs):
"""
Gets a property of a Resource.
id_ : Id of the resource
property_name: Name of the property
self_deserialize: #Implies use the deserialize method implemented by this resource.
"""
token = None
if kwargs:
token = kwargs.pop('token', None)
url = '/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name,
urllib.parse.urlencode(kwargs))
else:
url = '/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name)
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
if self_deserialize:
return [self.resource.deserialize(item) for item in response.json()]
else:
return response.json()
@add_auth_token_to_kwargs_from_env
def get_by_ref_or_id(self, ref_or_id, **kwargs):
return self.get_by_id(id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_env
def query(self, **kwargs):
if not kwargs:
raise Exception('Query parameter is not provided.')
if 'limit' in kwargs and kwargs.get('limit') <= 0:
kwargs.pop('limit')
token = kwargs.get('token', None)
params = {}
for k, v in six.iteritems(kwargs):
if k != 'token':
params[k] = v
url = '/%s/?%s' % (self.resource.get_url_path_name(),
urllib.parse.urlencode(params))
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return []
if response.status_code != 200:
self.handle_error(response)
items = response.json()
instances = [self.resource.deserialize(item) for item in items]
return instances
@add_auth_token_to_kwargs_from_env
def get_by_name(self, name_or_id, **kwargs):
instances = self.query(name=name_or_id, **kwargs)
if not instances:
return None
else:
if len(instances) > 1:
raise Exception('More than one %s named "%s" are found.' %
(self.resource.__name__.lower(), name_or_id))
return instances[0]
@add_auth_token_to_kwargs_from_env
def create(self, instance, **kwargs):
url = '/%s' % self.resource.get_url_path_name()
response = self.client.post(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def update(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.put(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def delete(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
return True
@add_auth_token_to_kwargs_from_env
def delete_by_id(self, instance_id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance_id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
try:
resp_json = response.json()
if resp_json:
return resp_json
except:
pass
return True
class ActionAliasResourceManager(ResourceManager):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(root=endpoint, cacert=cacert, debug=debug)
class LiveActionResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_run(self, execution_id, parameters=None, **kwargs):
url = '/%s/%s/re_run' % (self.resource.get_url_path_name(), execution_id)
data = {}
if parameters:
data['parameters'] = parameters
response = self.client.post(url, data, **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
class TriggerInstanceResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_emit(self, trigger_instance_id, **kwargs):
url = '/%s/%s/re_emit' % (self.resource.get_url_path_name(), trigger_instance_id)
response = self.client.post(url, None)
if response.status_code != 200:
self.handle_error(response)
return response.json()
|
alfasin/st2
|
st2client/st2client/models/core.py
|
Python
|
apache-2.0
| 11,745
|
"""
Tests for the snoozy package.
"""
import unittest
import snoozy
class TestSnoozy(unittest.TestCase):
def test_evaluate(self):
# Attributes should be evaluated on access.
called = []
class Foo(object):
@snoozy.lazy_property
def foo(self):
called.append(True)
return True
foo = Foo()
self.assertEqual(foo.foo, True)
self.assertEqual(len(called), 1)
def test_evaluate_once(self):
# Attributes should be evaluated only once.
called = []
class Foo(object):
@snoozy.lazy_property
def foo(self):
called.append('foo')
return 1
foo = Foo()
self.assertEqual(foo.foo, 1)
self.assertEqual(foo.foo, 1)
self.assertEqual(foo.foo, 1)
self.assertEqual(len(called), 1)
def test_private_attribute(self):
# Create private lazy attributes.
called = []
class Foo(object):
@snoozy.lazy_property
def __foo(self):
called.append('foo')
return 1
def get_foo(self):
return self.__foo
foo = Foo()
self.assertEqual(foo.get_foo(), 1)
self.assertEqual(foo.get_foo(), 1)
self.assertEqual(foo.get_foo(), 1)
self.assertEqual(len(called), 1)
def test_reserved_attribute(self):
# Create reserved lazy attributes.
called = []
class Foo(object):
@snoozy.lazy_property
def __foo__(self):
called.append('foo')
return 1
foo = Foo()
self.assertEqual(foo.__foo__, 1)
self.assertEqual(foo.__foo__, 1)
self.assertEqual(foo.__foo__, 1)
self.assertEqual(len(called), 1)
def test_introspection(self):
# Supports basic introspection.
class Foo(object):
def foo(self):
"""foo func doc"""
@snoozy.lazy_property
def bar(self):
"""bar func doc"""
self.assertEqual(Foo.foo.__name__, "foo")
self.assertEqual(Foo.foo.__doc__, "foo func doc")
self.assertEqual(Foo.foo.__module__, "snoozy.tests.test_snoozy")
self.assertEqual(Foo.bar.__name__, "bar")
self.assertEqual(Foo.bar.__doc__, "bar func doc")
self.assertEqual(Foo.bar.__module__, "snoozy.tests.test_snoozy")
|
cngo-github/snoozy
|
snoozy/tests/test_snoozy.py
|
Python
|
mit
| 2,478
|
# -*- coding: utf-8 -*-
#
# The Biologist's Guide to Computing documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 03:59:07 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
numfig = True
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u"The Biologist's Guide to Computing"
copyright = u'2015, Tjelvar S. G. Olsson'
author = u'Tjelvar S. G. Olsson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.10.0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = "css/bgtc_theme.css"
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'BiologistsGuidetoComputing'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'preamble': r'''
\usepackage{charter}
\usepackage[defaultsans]{lato}
\usepackage{inconsolata}
\usepackage{perpage}
\MakePerPage{footnote}
''',
# Latex figure (float) alignment
#'figure_align': 'htbp',
'printindex': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BiologistsGuidetoComputing.tex', u"The Biologist's Guide to Computing",
u'Tjelvar S. G. Olsson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "images/cover-image.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = True
# If true, show URL addresses after external links.
latex_show_urls = "footnote"
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thebiologistsguidetocomputing', u"The Biologist's Guide to Computing",
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'BiologistsGuidetoComputing', u"The Biologist's Guide to Computing",
author, 'BiologistsGuidetoComputing', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
tjelvar-olsson/biologists-guide-to-computing
|
source/conf.py
|
Python
|
cc0-1.0
| 11,737
|
from hooks import hookutils
hiddenimports = ["PyQt4.QtCore", "PyQt4.QtGui", "PyQt4.QtSvg"]
if hookutils.qwt_numpy_support():
hiddenimports.append("numpy")
if hookutils.qwt_numeric_support():
hiddenimports.append("Numeric")
if hookutils.qwt_numarray_support():
hiddenimports.append("numarray")
|
pdubroy/kurt
|
build/MacOS/PyInstaller/pyinstaller-svn-r812/hooks/hook-PyQt4.Qwt5.py
|
Python
|
gpl-2.0
| 307
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ############################################################################
## ##
## This file is part of DPAPIck ##
## Windows DPAPI decryption & forensic toolkit ##
## ##
## ##
## Copyright (C) 2010, 2011 Cassidian SAS. All rights reserved. ##
## This document is the property of Cassidian SAS, it may not be copied or ##
## circulated without prior licence ##
## ##
## Author: Jean-Michel Picod <jmichel.p@gmail.com> ##
## ##
## This program is distributed under GPLv3 licence (see LICENCE.txt) ##
## ##
#############################################################################
import hashlib
import struct
import array
import M2Crypto
class CryptoAlgo(object):
"""This class is used to wrap Microsoft algorithm IDs with M2Crypto"""
class Algo(object):
def __init__(self, data):
self.data = data
def __getattr__(self, attr):
if attr in self.data:
return self.data[attr]
raise AttributeError(attr)
_crypto_data = { }
@classmethod
def add_algo(cls, algnum, **kargs):
cls._crypto_data[algnum] = cls.Algo(kargs)
@classmethod
def get_algo(cls, algnum):
return cls._crypto_data[algnum]
def __init__(self, i):
self.algnum = i
self.algo = CryptoAlgo.get_algo(i)
name = property(lambda self: self.algo.name)
m2name = property(lambda self: self.algo.m2)
keyLength = property(lambda self: self.algo.keyLength / 8)
ivLength = property(lambda self: self.algo.IVLength / 8)
blockSize = property(lambda self: self.algo.blockLength / 8)
digestLength = property(lambda self: self.algo.digestLength / 8)
def do_fixup_key(self, key):
try:
return self.algo.keyFixup.__call__(key)
except AttributeError:
return key
def __repr__(self):
return "%s [%#x]" % (self.algo.name, self.algnum)
def des_set_odd_parity(key):
_lut = [1, 1, 2, 2, 4, 4, 7, 7, 8, 8, 11, 11, 13, 13, 14, 14, 16, 16, 19,
19, 21, 21, 22, 22, 25, 25, 26, 26, 28, 28, 31, 31, 32, 32, 35, 35, 37,
37, 38, 38, 41, 41, 42, 42, 44, 44, 47, 47, 49, 49, 50, 50, 52, 52, 55,
55, 56, 56, 59, 59, 61, 61, 62, 62, 64, 64, 67, 67, 69, 69, 70, 70, 73,
73, 74, 74, 76, 76, 79, 79, 81, 81, 82, 82, 84, 84, 87, 87, 88, 88, 91,
91, 93, 93, 94, 94, 97, 97, 98, 98, 100, 100, 103, 103, 104, 104, 107,
107, 109, 109, 110, 110, 112, 112, 115, 115, 117, 117, 118, 118, 121,
121, 122, 122, 124, 124, 127, 127, 128, 128, 131, 131, 133, 133, 134,
134, 137, 137, 138, 138, 140, 140, 143, 143, 145, 145, 146, 146, 148,
148, 151, 151, 152, 152, 155, 155, 157, 157, 158, 158, 161, 161, 162,
162, 164, 164, 167, 167, 168, 168, 171, 171, 173, 173, 174, 174, 176,
176, 179, 179, 181, 181, 182, 182, 185, 185, 186, 186, 188, 188, 191,
191, 193, 193, 194, 194, 196, 196, 199, 199, 200, 200, 203, 203, 205,
205, 206, 206, 208, 208, 211, 211, 213, 213, 214, 214, 217, 217, 218,
218, 220, 220, 223, 223, 224, 224, 227, 227, 229, 229, 230, 230, 233,
233, 234, 234, 236, 236, 239, 239, 241, 241, 242, 242, 244, 244, 247,
247, 248, 248, 251, 251, 253, 253, 254, 254]
tmp = array.array("B")
tmp.fromstring(key)
for i, v in enumerate(tmp):
tmp[i] = _lut[v]
return tmp.tostring()
CryptoAlgo.add_algo(0x6603, name="DES3", keyLength=192, IVLength=64, blockLength=64, m2="des_ede3_cbc",
keyFixup=des_set_odd_parity)
CryptoAlgo.add_algo(0x6609, name="DES2", keyLength=128, IVLength=64, blockLength=64, m2="des_ede_cbc",
keyFixup=des_set_odd_parity)
CryptoAlgo.add_algo(0x6611, name="AES", keyLength=128, IVLength=128, blockLength=128, m2="aes_128_cbc")
CryptoAlgo.add_algo(0x660e, name="AES-128", keyLength=128, IVLength=128, blockLength=128, m2="aes_128_cbc")
CryptoAlgo.add_algo(0x660f, name="AES-192", keyLength=192, IVLength=128, blockLength=128, m2="aes_192_cbc")
CryptoAlgo.add_algo(0x6610, name="AES-256", keyLength=256, IVLength=128, blockLength=128, m2="aes_256_cbc")
CryptoAlgo.add_algo(0x6601, name="DES", keyLength=64, IVLength=64, blockLength=64, m2="des_cbc",
keyFixup=des_set_odd_parity)
CryptoAlgo.add_algo(0x8009, name="HMAC", digestLength=160, blockLength=512)
CryptoAlgo.add_algo(0x8001, name="md2", digestLength=128, blockLength=128)
CryptoAlgo.add_algo(0x8002, name="md4", digestLength=128, blockLength=512)
CryptoAlgo.add_algo(0x8003, name="md5", digestLength=128, blockLength=512)
CryptoAlgo.add_algo(0x8004, name="sha1", digestLength=160, blockLength=512)
CryptoAlgo.add_algo(0x800c, name="sha256", digestLength=256, blockLength=512)
CryptoAlgo.add_algo(0x800d, name="sha384", digestLength=384, blockLength=1024)
CryptoAlgo.add_algo(0x800e, name="sha512", digestLength=512, blockLength=1024)
def CryptSessionKeyXP(masterkey, nonce, hashAlgo, entropy=None, strongPassword=None, verifBlob=None):
"""Computes the decryption key for XP DPAPI blob, given the masterkey and optional information.
This implementation relies on a faulty implementation from Microsoft that does not respect the HMAC RFC.
Instead of updating the inner pad, we update the outer pad...
This algorithm is also used when checking the HMAC for integrity after decryption
:param masterkey: decrypted masterkey (should be 64 bytes long)
:param nonce: this is the nonce contained in the blob or the HMAC in the blob (integrity check)
:param entropy: this is the optional entropy from CryptProtectData() API
:param strongPassword: optional password used for decryption or the blob itself
:param verifBlob: optional encrypted blob used for integrity check
:returns: decryption key
:rtype : str
"""
if len(masterkey) > 20:
masterkey = hashlib.sha1(masterkey).digest()
masterkey += "\x00" * hashAlgo.blockSize
ipad = "".join(chr(ord(masterkey[i]) ^ 0x36) for i in range(hashAlgo.blockSize))
opad = "".join(chr(ord(masterkey[i]) ^ 0x5c) for i in range(hashAlgo.blockSize))
digest = hashlib.new(hashAlgo.name)
digest.update(ipad)
digest.update(nonce)
tmp = digest.digest()
digest = hashlib.new(hashAlgo.name)
digest.update(opad)
digest.update(tmp)
if entropy is not None:
digest.update(entropy)
if strongPassword is not None:
strongPassword = hashlib.sha1(strongPassword.rstrip("\x00").encode("UTF-16LE")).digest()
digest.update(strongPassword)
elif verifBlob is not None:
digest.update(verifBlob)
return digest.digest()
def CryptSessionKeyWin7(masterkey, nonce, hashAlgo, entropy=None, strongPassword=None, verifBlob=None):
"""Computes the decryption key for Win7+ DPAPI blob, given the masterkey and optional information.
This implementation relies on an RFC compliant HMAC implementation
This algorithm is also used when checking the HMAC for integrity after decryption
:param masterkey: decrypted masterkey (should be 64 bytes long)
:param nonce: this is the nonce contained in the blob or the HMAC in the blob (integrity check)
:param entropy: this is the optional entropy from CryptProtectData() API
:param strongPassword: optional password used for decryption or the blob itself
:param verifBlob: optional encrypted blob used for integrity check
:returns: decryption key
:rtype : str
"""
if len(masterkey) > 20:
masterkey = hashlib.sha1(masterkey).digest()
digest = M2Crypto.EVP.HMAC(masterkey, hashAlgo.name)
digest.update(nonce)
if entropy is not None:
digest.update(entropy)
if strongPassword is not None:
strongPassword = hashlib.sha512(strongPassword.rstrip("\x00").encode("UTF-16LE")).digest()
digest.update(strongPassword)
elif verifBlob is not None:
digest.update(verifBlob)
return digest.final()
def CryptDeriveKey(h, cipherAlgo, hashAlgo):
"""Internal use. Mimics the corresponding native Microsoft function"""
if len(h) > hashAlgo.blockSize:
h = hashlib.new(hashAlgo.name, h).digest()
if len(h) >= cipherAlgo.keyLength:
return h
h += "\x00" * hashAlgo.blockSize
ipad = "".join(chr(ord(h[i]) ^ 0x36) for i in range(hashAlgo.blockSize))
opad = "".join(chr(ord(h[i]) ^ 0x5c) for i in range(hashAlgo.blockSize))
k = hashlib.new(hashAlgo.name, ipad).digest() + hashlib.new(hashAlgo.name, opad).digest()
k = cipherAlgo.do_fixup_key(k)
return k
def decrypt_lsa_key_nt5(lsakey, syskey):
"""This function decrypts the LSA key using the syskey"""
dg = hashlib.md5()
dg.update(syskey)
for i in xrange(1000):
dg.update(lsakey[60:76])
arcfour = M2Crypto.RC4.RC4(dg.digest())
deskey = arcfour.update(lsakey[12:60]) + arcfour.final()
return [deskey[16 * x:16 * (x + 1)] for x in xrange(3)]
def decrypt_lsa_key_nt6(lsakey, syskey):
"""This function decrypts the LSA keys using the syskey"""
dg = hashlib.sha256()
dg.update(syskey)
for i in xrange(1000):
dg.update(lsakey[28:60])
c = M2Crypto.EVP.Cipher(alg="aes_256_ecb", key=dg.digest(), iv="", op=M2Crypto.decrypt)
c.set_padding(0)
keys = c.update(lsakey[60:]) + c.final()
size = struct.unpack_from("<L", keys)[0]
keys = keys[16:16 + size]
currentkey = "%0x-%0x-%0x-%0x%0x-%0x%0x%0x%0x%0x%0x" % struct.unpack("<L2H8B", keys[4:20])
nb = struct.unpack("<L", keys[24:28])[0]
off = 28
kd = {}
for i in xrange(nb):
g = "%0x-%0x-%0x-%0x%0x-%0x%0x%0x%0x%0x%0x" % struct.unpack("<L2H8B", keys[off:off + 16])
t, l = struct.unpack_from("<2L", keys[off + 16:])
k = keys[off + 24:off + 24 + l]
kd[g] = {"type": t, "key": k}
off += 24 + l
return (currentkey, kd)
def SystemFunction005(secret, key):
"""This function is used to decrypt LSA secrets.
Reproduces the corresponding Windows internal function.
Taken from creddump project https://code.google.com/p/creddump/
"""
decrypted_data = ''
j = 0
algo = CryptoAlgo(0x6603)
for i in range(0, len(secret), 8):
enc_block = secret[i:i + 8]
block_key = key[j:j + 7]
des_key = []
des_key.append(ord(block_key[0]) >> 1)
des_key.append(((ord(block_key[0]) & 0x01) << 6) | (ord(block_key[1]) >> 2))
des_key.append(((ord(block_key[1]) & 0x03) << 5) | (ord(block_key[2]) >> 3))
des_key.append(((ord(block_key[2]) & 0x07) << 4) | (ord(block_key[3]) >> 4))
des_key.append(((ord(block_key[3]) & 0x0F) << 3) | (ord(block_key[4]) >> 5))
des_key.append(((ord(block_key[4]) & 0x1F) << 2) | (ord(block_key[5]) >> 6))
des_key.append(((ord(block_key[5]) & 0x3F) << 1) | (ord(block_key[6]) >> 7))
des_key.append(ord(block_key[6]) & 0x7F)
des_key = algo.do_fixup_key("".join([chr(x << 1) for x in des_key]))
cipher = M2Crypto.EVP.Cipher(alg="des_ecb", key=des_key, iv="", op=M2Crypto.decrypt)
cipher.set_padding(0)
decrypted_data += cipher.update(enc_block) + cipher.final()
j += 7
if len(key[j:j + 7]) < 7:
j = len(key[j:j + 7])
dec_data_len = struct.unpack("<L", decrypted_data[:4])[0]
return decrypted_data[8:8 + dec_data_len]
def decrypt_lsa_secret(secret, lsa_keys):
"""This function replaces SystemFunction005 for newer Windows"""
keyid = "%0x-%0x-%0x-%0x%0x-%0x%0x%0x%0x%0x%0x" % struct.unpack("<L2H8B", secret[4:20])
if keyid not in lsa_keys:
return None
algo = struct.unpack("<L", secret[20:24])[0]
dg = hashlib.sha256()
dg.update(lsa_keys[keyid]["key"])
for i in xrange(1000):
dg.update(secret[28:60])
c = M2Crypto.EVP.Cipher(alg="aes_256_ecb", key=dg.digest(), iv="", op=M2Crypto.decrypt)
c.set_padding(0)
clear = c.update(secret[60:]) + c.final()
size = struct.unpack_from("<L", clear)[0]
return clear[16:16 + size]
def pbkdf2(passphrase, salt, keylen, iterations, digest='sha1'):
"""Implementation of PBKDF2 that allows specifying digest algorithm.
Returns the corresponding expanded key which is keylen long.
"""
buff = ""
i = 1
while len(buff) < keylen:
U = salt + struct.pack("!L", i)
i += 1
derived = M2Crypto.EVP.hmac(passphrase, U, digest)
for r in xrange(iterations - 1):
actual = M2Crypto.EVP.hmac(passphrase, derived, digest)
derived = ''.join([chr(ord(x) ^ ord(y)) for (x, y) in zip(derived, actual)])
buff += derived
return buff[:keylen]
def derivePwdHash(pwdhash, userSID, digest='sha1'):
"""Internal use. Computes the encryption key from a user's password hash"""
return M2Crypto.EVP.hmac(pwdhash, (userSID + "\0").encode("UTF-16LE"), digest)
def dataDecrypt(cipherAlgo, hashAlgo, raw, encKey, iv, rounds):
"""Internal use. Decrypts data stored in DPAPI structures."""
hname = {"HMAC": "sha1"}.get(hashAlgo.name, hashAlgo.name)
derived = pbkdf2(encKey, iv, cipherAlgo.keyLength + cipherAlgo.ivLength, rounds, hname)
key, iv = derived[:cipherAlgo.keyLength], derived[cipherAlgo.keyLength:]
key = key[:cipherAlgo.keyLength]
iv = iv[:cipherAlgo.ivLength]
cipher = M2Crypto.EVP.Cipher(cipherAlgo.m2name, key, iv, M2Crypto.decrypt, 0)
cipher.set_padding(0)
cleartxt = cipher.update(raw) + cipher.final()
return cleartxt
def DPAPIHmac(hashAlgo, pwdhash, hmacSalt, value):
"""Internal function used to compute HMACs of DPAPI structures"""
hname = {"HMAC": "sha1"}.get(hashAlgo.name, hashAlgo.name)
encKey = M2Crypto.EVP.HMAC(pwdhash, hname)
encKey.update(hmacSalt)
encKey = encKey.final()
rv = M2Crypto.EVP.HMAC(encKey, hname)
rv.update(value)
return rv.final()
# vim:ts=4:expandtab:sw=4
|
newsoft/dpapick
|
DPAPI/Core/crypto.py
|
Python
|
gpl-3.0
| 14,519
|
# I will rewrite this file soon.
import os
import re
from re import findall
from string import replace, lower
import dbus
import threading
import cPickle
def dbus_name_running(name):
try:
session = dbus.SessionBus().get_object('org.freedesktop.DBus', '/')
names = session.ListNames()
return name in names
except:
return False
def async_call(func, args, callback, exception):
def run(func, args, callback, exception):
res = None
try:
res = func(*args)
except Exception, e:
exception(e)
else:
callback(res)
thread = threading.Thread(target=run,
args=(func, args, callback, exception))
thread.start()
return thread
def lrctolist(lrc):
#pattern = '\[(?P<type>.*?):(?P<value>.*?)\](?P<tail>.*)'
pattern = '\[.*\](?P<tail>.?)'
pt_value = '\[(?P<type>.+?):(?P<value>.+?)\]'
rpl_tail = '\g<tail>'
lrc = lrc.splitlines()
#title = ''
#artist = ''
#album = ''
#editor = ''
offset = 0
arrays = []
for line in lrc:
line = replace(line, "\n", "")
line = replace(line, "\r", "")
if line == "":
continue
if line[0] == '[': # if a CH char start with [ may crash
value = findall(pt_value, line)
ly = re.sub(pattern, rpl_tail, line)
if value != []:
name = lower(value[0][0])
namev = value[0][1]
if name == "ti":
#title = namev
continue
if name == 'ar':
#artist = namev
continue
if name == 'al':
#album = namev
continue
if name == 'by':
#editor = namev
continue
if name == 'offset':
offset = int(namev)
continue
if name == 'la':
continue
if name == 'encoding':
continue
for j in range(len(value)):
time = offset # offset using here
if '.' in value[j][1]:
time += int(value[j][0])*60*1000
time += int(value[j][1].split('.', 2)[0])*1000 + \
int(value[j][1].split('.', 2)[1])
elif ':' in value[j][1]:
time += int(value[j][0])*60*1000
time += int(value[j][1].split(':', 2)[0])*1000 + \
int(value[j][1].split(':', 2)[1])*100/60
else:
time += int(value[j][0])*60*1000
time += int(value[j][1])*1000
if time < 0:
time = 0
arrays.append((time, ly))
arrays.sort()
return arrays
def makedirs_for_file(filename):
try:
os.makedirs(os.path.dirname(filename))
except Exception, e:
print e
def dump_object(obj, filename):
try:
os.makedirs(os.path.dirname(filename))
except:
pass
try:
cPickle.dump(obj, open(filename, 'w'))
except:
pass
def load_object(objtype, filename):
try:
return objtype(cPickle.load(open(filename)))
except:
return None
def dump_text(text, filename):
try:
os.makedirs(os.path.dirname(filename))
except:
pass
try:
open(filename, 'w').write(text.encode('utf-8'))
except Exception:
pass
def load_text(filename):
try:
return open(filename).read()
except:
return None
|
solos/Lyriczilla
|
lyriczilla/util.py
|
Python
|
gpl-2.0
| 3,773
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Haochen Wu (wuhaochen42@gmail.com)
"""Algorithms to calculate reciprocity in a directed graph."""
from networkx import NetworkXError
from ..utils import not_implemented_for
__all__ = ['reciprocity', 'overall_reciprocity']
@not_implemented_for('undirected', 'multigraph')
def reciprocity(G, nodes=None):
r"""Compute the reciprocity in a directed graph.
The reciprocity of a directed graph is defined as the ratio
of the number of edges pointing in both directions to the total
number of edges in the graph.
Formally, $r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|$.
The reciprocity of a single node u is defined similarly,
it is the ratio of the number of edges in both directions to
the total number of edges attached to node u.
Parameters
----------
G : graph
A networkx directed graph
nodes : container of nodes, optional (default=whole graph)
Compute reciprocity for nodes in this container.
Returns
-------
out : dictionary
Reciprocity keyed by node label.
Notes
-----
The reciprocity is not defined for isolated nodes.
In such cases this function will return None.
"""
# If `nodes` is not specified, calculate the reciprocity of the graph.
if nodes is None:
return overall_reciprocity(G)
# If `nodes` represents a single node in the graph, return only its
# reciprocity.
if nodes in G:
reciprocity = next(_reciprocity_iter(G, nodes))[1]
if reciprocity is None:
raise NetworkXError('Not defined for isolated nodes.')
else:
return reciprocity
# Otherwise, `nodes` represents an iterable of nodes, so return a
# dictionary mapping node to its reciprocity.
return dict(_reciprocity_iter(G, nodes))
def _reciprocity_iter(G, nodes):
""" Return an iterator of (node, reciprocity).
"""
n = G.nbunch_iter(nodes)
for node in n:
pred = set(G.predecessors(node))
succ = set(G.successors(node))
overlap = pred & succ
n_total = len(pred) + len(succ)
# Reciprocity is not defined for isolated nodes.
# Return None.
if n_total == 0:
yield (node, None)
else:
reciprocity = 2.0 * float(len(overlap)) / float(n_total)
yield (node, reciprocity)
@not_implemented_for('undirected', 'multigraph')
def overall_reciprocity(G):
"""Compute the reciprocity for the whole graph.
See the doc of reciprocity for the definition.
Parameters
----------
G : graph
A networkx graph
"""
n_all_edge = G.number_of_edges()
n_overlap_edge = (n_all_edge - G.to_undirected().number_of_edges()) * 2
if n_all_edge == 0:
raise NetworkXError("Not defined for empty graphs")
return float(n_overlap_edge) / float(n_all_edge)
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/algorithms/reciprocity.py
|
Python
|
mit
| 3,089
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
__all__ = ['many_times', 'prog_scope']
def many_times(times):
def __impl__(fn):
def __fn__(*args, **kwargs):
for _ in range(times):
fn(*args, **kwargs)
return __fn__
return __impl__
def prog_scope():
def __impl__(fn):
def __fn__(*args, **kwargs):
prog = fluid.Program()
startup_prog = fluid.Program()
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
with fluid.program_guard(prog, startup_prog):
fn(*args, **kwargs)
return __fn__
return __impl__
|
Canpio/Paddle
|
python/paddle/fluid/tests/unittests/decorators.py
|
Python
|
apache-2.0
| 1,266
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The freesurfer module provides basic functions for interfacing with
freesurfer tools.
Currently these tools are supported:
* Dicom2Nifti: using mri_convert
* Resample: using mri_convert
Examples
--------
See the docstrings for the individual classes for 'working' examples.
"""
__docformat__ = 'restructuredtext'
import os
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (CommandLine, Directory,
CommandLineInputSpec, isdefined)
class Info(object):
""" Freesurfer subject directory and version information.
Examples
--------
>>> from nipype.interfaces.freesurfer import Info
>>> Info.version() # doctest: +SKIP
>>> Info.subjectsdir() # doctest: +SKIP
"""
@staticmethod
def version():
"""Check for freesurfer version on system
Find which freesurfer is being used....and get version from
/path/to/freesurfer/build-stamp.txt
Returns
-------
version : string
version number as string
or None if freesurfer version not found
"""
fs_home = os.getenv('FREESURFER_HOME')
if fs_home is None:
return None
versionfile = os.path.join(fs_home, 'build-stamp.txt')
if not os.path.exists(versionfile):
return None
fid = open(versionfile, 'rt')
version = fid.readline()
fid.close()
return version
@classmethod
def subjectsdir(cls):
"""Check the global SUBJECTS_DIR
Parameters
----------
subjects_dir : string
The system defined subjects directory
Returns
-------
subject_dir : string
Represents the current environment setting of SUBJECTS_DIR
"""
if cls.version():
return os.environ['SUBJECTS_DIR']
return None
class FSTraitedSpec(CommandLineInputSpec):
subjects_dir = Directory(exists=True, desc='subjects directory')
class FSCommand(CommandLine):
"""General support for FreeSurfer commands.
Every FS command accepts 'subjects_dir' input.
"""
input_spec = FSTraitedSpec
_subjects_dir = None
def __init__(self, **inputs):
super(FSCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._subjects_dir_update, 'subjects_dir')
if not self._subjects_dir:
self._subjects_dir = Info.subjectsdir()
if not isdefined(self.inputs.subjects_dir) and self._subjects_dir:
self.inputs.subjects_dir = self._subjects_dir
self._subjects_dir_update()
def _subjects_dir_update(self):
if self.inputs.subjects_dir:
self.inputs.environ.update({'SUBJECTS_DIR':
self.inputs.subjects_dir})
@classmethod
def set_default_subjects_dir(cls, subjects_dir):
cls._subjects_dir = subjects_dir
@property
def version(self):
return Info.version()
def run(self, **inputs):
if 'subjects_dir' in inputs:
self.inputs.subjects_dir = inputs['subjects_dir']
self._subjects_dir_update()
return super(FSCommand, self).run(**inputs)
def _gen_fname(self, basename, fname=None, cwd=None, suffix='_fs',
use_ext=True):
'''Define a generic mapping for a single outfile
The filename is potentially autogenerated by suffixing inputs.infile
Parameters
----------
basename : string (required)
filename to base the new filename on
fname : string
if not None, just use this fname
cwd : string
prefix paths with cwd, otherwise os.getcwd()
suffix : string
default suffix
'''
if basename == '':
msg = 'Unable to generate filename for command %s. ' % self.cmd
msg += 'basename is not set!'
raise ValueError(msg)
if cwd is None:
cwd = os.getcwd()
fname = fname_presuffix(basename, suffix=suffix,
use_ext=use_ext, newpath=cwd)
return fname
@property
def version(self):
ver = Info.version()
if ver:
if 'dev' in ver:
return ver.rstrip().split('-')[-1] + '.dev'
else:
return ver.rstrip().split('-v')[-1]
|
mick-d/nipype_source
|
nipype/interfaces/freesurfer/base.py
|
Python
|
bsd-3-clause
| 4,569
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_igw
short_description: Manage an AWS VPC Internet gateway
description:
- Manage an AWS VPC Internet gateway
version_added: "2.0"
author: Robert Estelle (@erydo)
options:
vpc_id:
description:
- The VPC ID for the VPC in which to manage the Internet Gateway.
required: true
tags:
description:
- "A dict of tags to apply to the internet gateway. Any tags currently applied to the internet gateway and not present here will be removed."
aliases: [ 'resource_tags' ]
version_added: "2.4"
state:
description:
- Create or terminate the IGW
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
requirements:
- botocore
- boto3
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Ensure that the VPC has an Internet Gateway.
# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
ec2_vpc_igw:
vpc_id: vpc-abcdefgh
state: present
register: igw
'''
RETURN = '''
changed:
description: If any changes have been made to the Internet Gateway.
type: bool
returned: always
sample:
changed: false
gateway_id:
description: The unique identifier for the Internet Gateway.
type: str
returned: I(state=present)
sample:
gateway_id: "igw-XXXXXXXX"
tags:
description: The tags associated the Internet Gateway.
type: dict
returned: I(state=present)
sample:
tags:
"Ansible": "Test"
vpc_id:
description: The VPC ID associated with the Internet Gateway.
type: str
returned: I(state=present)
sample:
vpc_id: "vpc-XXXXXXXX"
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (
AWSRetry,
boto3_conn,
ec2_argument_spec,
get_aws_connection_info,
camel_dict_to_snake_dict,
boto3_tag_list_to_ansible_dict,
ansible_dict_to_boto3_filter_list,
ansible_dict_to_boto3_tag_list,
compare_aws_tags
)
from ansible.module_utils.six import string_types
class AnsibleEc2Igw(object):
def __init__(self, module, results):
self._module = module
self._results = results
self._connection = self._module.client('ec2')
self._check_mode = self._module.check_mode
def process(self):
vpc_id = self._module.params.get('vpc_id')
state = self._module.params.get('state', 'present')
tags = self._module.params.get('tags')
if state == 'present':
self.ensure_igw_present(vpc_id, tags)
elif state == 'absent':
self.ensure_igw_absent(vpc_id)
def get_matching_igw(self, vpc_id):
filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
igws = []
try:
response = self._connection.describe_internet_gateways(Filters=filters)
igws = response.get('InternetGateways', [])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e)
igw = None
if len(igws) > 1:
self._module.fail_json(
msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id))
elif igws:
igw = camel_dict_to_snake_dict(igws[0])
return igw
def check_input_tags(self, tags):
nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)]
if nonstring_tags:
self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags))
def ensure_tags(self, igw_id, tags, add_only):
final_tags = []
filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'})
cur_tags = None
try:
cur_tags = self._connection.describe_tags(Filters=filters)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="Couldn't describe tags")
purge_tags = bool(not add_only)
to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags'))
if to_update:
try:
if self._check_mode:
# update tags
final_tags.update(to_update)
else:
AWSRetry.exponential_backoff()(self._connection.create_tags)(
Resources=[igw_id],
Tags=ansible_dict_to_boto3_tag_list(to_update)
)
self._results['changed'] = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="Couldn't create tags")
if to_delete:
try:
if self._check_mode:
# update tags
for key in to_delete:
del final_tags[key]
else:
tags_list = []
for key in to_delete:
tags_list.append({'Key': key})
AWSRetry.exponential_backoff()(self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list)
self._results['changed'] = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="Couldn't delete tags")
if not self._check_mode and (to_update or to_delete):
try:
response = self._connection.describe_tags(Filters=filters)
final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="Couldn't describe tags")
return final_tags
@staticmethod
def get_igw_info(igw):
return {
'gateway_id': igw['internet_gateway_id'],
'tags': igw['tags'],
'vpc_id': igw['vpc_id']
}
def ensure_igw_absent(self, vpc_id):
igw = self.get_matching_igw(vpc_id)
if igw is None:
return self._results
if self._check_mode:
self._results['changed'] = True
return self._results
try:
self._results['changed'] = True
self._connection.detach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
self._connection.delete_internet_gateway(InternetGatewayId=igw['internet_gateway_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway")
return self._results
def ensure_igw_present(self, vpc_id, tags):
self.check_input_tags(tags)
igw = self.get_matching_igw(vpc_id)
if igw is None:
if self._check_mode:
self._results['changed'] = True
self._results['gateway_id'] = None
return self._results
try:
response = self._connection.create_internet_gateway()
igw = camel_dict_to_snake_dict(response['InternetGateway'])
self._connection.attach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
self._results['changed'] = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg='Unable to create Internet Gateway')
igw['vpc_id'] = vpc_id
igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, add_only=False)
igw_info = self.get_igw_info(igw)
self._results.update(igw_info)
return self._results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
vpc_id=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(default=dict(), required=False, type='dict', aliases=['resource_tags'])
)
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
results = dict(
changed=False
)
igw_manager = AnsibleEc2Igw(module=module, results=results)
igw_manager.process()
module.exit_json(**results)
if __name__ == '__main__':
main()
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py
|
Python
|
gpl-3.0
| 9,196
|
from __future__ import division
import math
import os
import platform
import sys
from collections import deque
from contextlib import contextmanager
from threading import Event, Thread
from time import sleep
import pyinfra
IS_WINDOWS = platform.system() == 'Windows'
WAIT_TIME = 1 / 5
WAIT_CHARS = deque(('-', '/', '|', '\\'))
# Hacky way of getting terminal size (so can clear lines)
# Source: http://stackoverflow.com/questions/566746
IS_TTY = sys.stdout.isatty() and sys.stderr.isatty()
TERMINAL_WIDTH = 0
if IS_TTY:
try:
TERMINAL_WIDTH = os.get_terminal_size().columns
except AttributeError:
if not IS_WINDOWS:
terminal_size = os.popen('stty size', 'r').read().split()
if len(terminal_size) == 2:
TERMINAL_WIDTH = int(terminal_size[1])
def _print_spinner(stop_event, progress_queue):
if not IS_TTY or os.environ.get('PYINFRA_PROGRESS') == 'off':
return
progress = ''
text = ''
while True:
# Stop when asked too
if stop_event.is_set():
break
WAIT_CHARS.rotate(1)
try:
progress = progress_queue[-1]
except IndexError:
pass
text = ' {0}'.format(
' '.join((WAIT_CHARS[0], progress)),
)
text = '{0}\r'.format(text)
sys.stderr.write(text)
sys.stderr.flush()
# In pyinfra_cli's __main__ we set stdout & stderr to be line buffered,
# so write this escape code (clear line) into the buffer but don't flush,
# such that any next print/log/etc clear the line first.
if not IS_WINDOWS:
sys.stderr.write('\033[K')
sleep(WAIT_TIME)
@contextmanager
def progress_spinner(items, prefix_message=None):
# If there's no pseudo state we're not in CLI mode, so just return a noop
# handler and exit.
if not pyinfra.is_cli:
yield lambda complete_item: None
return
if not isinstance(items, set):
items = set(items)
total_items = len(items)
stop_event = Event()
def make_progress_message(include_items=True):
message_bits = []
# If we only have 1 item, don't show %
if total_items > 1:
percentage_complete = 0
complete = total_items - len(items)
percentage_complete = int(math.floor(complete / total_items * 100))
message_bits.append('{0}% ({1}/{2})'.format(
percentage_complete,
complete,
total_items,
))
if prefix_message:
message_bits.append(prefix_message)
if include_items and items:
# Plus 3 for the " - " joining below
message_length = sum((len(message) + 3) for message in message_bits)
# -8 for padding left+right, -2 for {} wrapping
items_allowed_width = TERMINAL_WIDTH - 10 - message_length
if items_allowed_width > 0:
items_string = '{%s}' % (', '.join('{0}'.format(i) for i in items))
if len(items_string) >= items_allowed_width:
items_string = '%s...}' % (
# -3 for the ...
items_string[:items_allowed_width - 3],
)
message_bits.append(items_string)
return ' - '.join(message_bits)
progress_queue = deque((make_progress_message(),))
def progress(complete_item):
if complete_item not in items:
raise ValueError('Invalid complete item: {0} not in {1}'.format(
complete_item, items,
))
items.remove(complete_item)
progress_queue.append(make_progress_message())
# Kick off the spinner thread
spinner_thread = Thread(
target=_print_spinner,
args=(stop_event, progress_queue),
)
spinner_thread.daemon = True
spinner_thread.start()
# Yield allowing the actual code the spinner waits for to run
yield progress
# Finally, stop the spinner
stop_event.set()
spinner_thread.join()
|
Fizzadar/pyinfra
|
pyinfra/progress.py
|
Python
|
mit
| 4,111
|
########################################################################
# File name: test_structs.py
# This file is part of: aioxmpp
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import collections.abc
import enum
import unittest
import warnings
import aioxmpp
import aioxmpp.structs as structs
import aioxmpp.stanza as stanza
ESCAPING_TEST_VECTORS = [
# boring test vectors
(r"foo", r"foo"),
# less boring test vectors
(r"\5c", r"\5c5c"),
(r"\5C", r"\5C"),
# those are straight from XEP-0106
(r"space cadet", r"space\20cadet"),
(r'call me "ishmael"', r"call\20me\20\22ishmael\22"),
(r"at&t guy", r"at\26t\20guy"),
(r"d'artagnan", r"d\27artagnan"),
(r"/.fanboy", r"\2f.fanboy"),
(r"::foo::", r"\3a\3afoo\3a\3a"),
(r"<foo>", r"\3cfoo\3e"),
(r"user@host", r"user\40host"),
(r"c:\net", r"c\3a\net"),
(r"c:\\net", r"c\3a\\net"),
(r"c:\cool stuff", r"c\3a\cool\20stuff"),
(r"c:\5commas", r"c\3a\5c5commas"),
# exceptions
(r"\2plus\2is\4", r"\2plus\2is\4"),
(r"foo\bar", r"foo\bar"),
(r"foo\41r", r"foo\41r"),
# additional test vectors
(r"call\20me", r"call\5c20me"),
]
class DisableCompat:
def __enter__(self):
if aioxmpp.version_info < (1, 0, 0):
structs._USE_COMPAT_ENUM = False
def __exit__(self, exc_type, exc_value, exc_traceback):
if aioxmpp.version_info < (1, 0, 0):
structs._USE_COMPAT_ENUM = True
class TestCompatibilityMixin(unittest.TestCase):
class SomeEnum(structs.CompatibilityMixin, enum.Enum):
X = "foo"
Y = "bar"
Z = None
def test_compares_normally_without_warnings(self):
E = self.SomeEnum
with warnings.catch_warnings(record=True) as w:
self.assertEqual(E.X, E.X)
self.assertNotEqual(E.X, E.Y)
self.assertNotEqual(E.X, E.Z)
self.assertNotEqual(E.Y, E.X)
self.assertEqual(E.Y, E.Y)
self.assertNotEqual(E.Y, E.Z)
self.assertNotEqual(E.Z, E.X)
self.assertNotEqual(E.Z, E.Y)
self.assertEqual(E.Z, E.Z)
self.assertFalse(w)
def test_hashes_to_values(self):
for member in self.SomeEnum:
self.assertEqual(
hash(member),
hash(member.value)
)
def _test_eq_with_warning(self, v1, v2):
with self.assertWarnsRegex(
DeprecationWarning,
r"as of aioxmpp 1.0, SomeEnum members will not compare equal to"
r" their values") as ctx:
self.assertTrue(v1 == v2)
self.assertIn(
"test_structs.py",
ctx.filename,
)
with self.assertWarnsRegex(
DeprecationWarning,
r"as of aioxmpp 1.0, SomeEnum members will not compare equal to"
r" their values") as ctx:
self.assertFalse(v1 != v2)
self.assertIn(
"test_structs.py",
ctx.filename,
)
@unittest.skipIf(aioxmpp.version_info >= (1, 0, 0),
"does not apply to this version of aioxmpp")
def test_compares_equal_to_values_with_DeprecationWarning(self):
for member in self.SomeEnum:
self._test_eq_with_warning(member, member.value)
self._test_eq_with_warning(member.value, member)
@unittest.skipUnless(aioxmpp.version_info >= (1, 0, 0),
"does not apply to this version of aioxmpp")
def test_compares_not_equal_to_values_by_default(self):
for member in self.SomeEnum:
self.assertTrue(member != member.value)
self.assertTrue(member.value != member)
self.assertFalse(member == member.value)
self.assertFalse(member.value == member)
def test_compares_not_equal_to_values_with_compat_disabled(self):
with DisableCompat():
for member in self.SomeEnum:
self.assertTrue(member != member.value)
self.assertTrue(member.value != member)
self.assertFalse(member == member.value)
self.assertFalse(member.value == member)
class TestErrorType(unittest.TestCase):
@unittest.skipIf(aioxmpp.version_info >= (1, 0, 0),
"does not apply to this version of aioxmpp")
def test_uses_compat_mixin(self):
self.assertTrue(
issubclass(
structs.ErrorType,
structs.CompatibilityMixin,
)
)
class TestMessageType(unittest.TestCase):
@unittest.skipIf(aioxmpp.version_info >= (1, 0, 0),
"does not apply to this version of aioxmpp")
def test_uses_compat_mixin(self):
self.assertTrue(
issubclass(
structs.MessageType,
structs.CompatibilityMixin,
)
)
def test_values(self):
self.assertSetEqual(
{v.value for v in structs.MessageType},
{
"chat",
"normal",
"headline",
"groupchat",
"error",
}
)
def test_is_response(self):
for member in structs.MessageType:
self.assertEqual(
member == structs.MessageType.ERROR,
member.is_response,
)
def test_is_error(self):
for member in structs.MessageType:
self.assertEqual(
member == structs.MessageType.ERROR,
member.is_error,
)
def test_is_request(self):
for member in structs.MessageType:
self.assertFalse(member.is_request)
class TestPresenceType(unittest.TestCase):
@unittest.skipIf(aioxmpp.version_info >= (1, 0, 0),
"does not apply to this version of aioxmpp")
def test_uses_compat_mixin(self):
self.assertTrue(
issubclass(
structs.PresenceType,
structs.CompatibilityMixin,
)
)
def test_values(self):
self.assertSetEqual(
{v.value for v in structs.PresenceType},
{
"error",
"probe",
"subscribe",
"subscribed",
"unsubscribe",
"unsubscribed",
"unavailable",
None,
}
)
def test_is_response(self):
for member in structs.PresenceType:
self.assertEqual(
member == structs.PresenceType.ERROR,
member.is_response,
)
def test_is_error(self):
for member in structs.PresenceType:
self.assertEqual(
member == structs.PresenceType.ERROR,
member.is_error,
)
def test_is_request(self):
for member in structs.PresenceType:
self.assertFalse(member.is_request)
def test_is_presence_state(self):
positive = [
structs.PresenceType.AVAILABLE,
structs.PresenceType.UNAVAILABLE,
]
for member in structs.PresenceType:
self.assertEqual(
member in positive,
member.is_presence_state,
)
class TestIQType(unittest.TestCase):
@unittest.skipIf(aioxmpp.version_info >= (1, 0, 0),
"does not apply to this version of aioxmpp")
def test_uses_compat_mixin(self):
self.assertTrue(
issubclass(
structs.IQType,
structs.CompatibilityMixin,
)
)
def test_values(self):
self.assertSetEqual(
{member.value for member in structs.IQType},
{
"get",
"set",
"result",
"error",
}
)
def test_is_error(self):
for member in structs.IQType:
self.assertEqual(
member == structs.IQType.ERROR,
member.is_error
)
def test_is_request(self):
positive = [
structs.IQType.GET,
structs.IQType.SET,
]
for member in structs.IQType:
self.assertEqual(
member in positive,
member.is_request,
)
def test_is_response(self):
positive = [
structs.IQType.ERROR,
structs.IQType.RESULT,
]
for member in structs.IQType:
self.assertEqual(
member in positive,
member.is_response,
)
class TestJID(unittest.TestCase):
def test_init_full(self):
j = structs.JID("foo", "example.com", "bar")
self.assertEqual(
"foo",
j.localpart)
self.assertEqual(
"example.com",
j.domain)
self.assertEqual(
"bar",
j.resource)
def test_init_enforces_stringprep(self):
with self.assertRaises(ValueError):
structs.JID("\u0007", "example.com", "bar")
with self.assertRaises(ValueError):
structs.JID("foo", "\u070f", "bar")
with self.assertRaises(ValueError):
structs.JID("foo", "example.com", "\u0007")
self.assertEqual(
"ssa",
structs.JID("ßA", "example.test", None).localpart)
self.assertEqual(
"ix.test",
structs.JID(None, "IX.test", None).domain)
self.assertEqual(
"IX",
structs.JID(None, "example.test", "\u2168").resource)
def test_init_with_default_strict_errors_on_unassigned(self):
with self.assertRaises(ValueError):
structs.JID("\U0001f601", "example.com", "bar")
with self.assertRaises(ValueError):
structs.JID("foo", "\U0001f601example.com", "bar")
with self.assertRaises(ValueError):
structs.JID("foo", "example.com", "\U0001f601")
def test_init_without_strict_does_not_error_on_unassigned(self):
structs.JID("\U0001f601", "example.com", "bar", strict=False)
structs.JID("foo", "\U0001f601example.com", "bar", strict=False)
structs.JID("foo", "example.com", "\U0001f601", strict=False)
def test_replace(self):
j = structs.JID("foo", "example.com", "bar")
j2 = j.replace(localpart="fnord",
domain="example.invalid",
resource="baz")
self.assertEqual(
"fnord",
j2.localpart)
self.assertEqual(
"example.invalid",
j2.domain)
self.assertEqual(
"baz",
j2.resource)
def test_replace_enforces_stringprep(self):
j = structs.JID("foo", "example.com", "bar")
with self.assertRaises(ValueError):
j.replace(localpart="\u0007")
with self.assertRaises(ValueError):
j.replace(domain="\u070f")
with self.assertRaises(ValueError):
j.replace(resource="\u0007")
self.assertEqual(
"ssa",
j.replace(localpart="ßA").localpart)
self.assertEqual(
"ix.test",
j.replace(domain="IX.test").domain)
self.assertEqual(
"IX",
j.replace(resource="\u2168").resource)
def test_hashable(self):
j1 = structs.JID("foo", "bar", "baz")
j2 = structs.JID("foo", "bar", "baz")
self.assertEqual(
hash(j1),
hash(j2))
def test_eq(self):
j1 = structs.JID("foo", "bar", "baz")
j2 = structs.JID("foo", "bar", "baz")
self.assertEqual(j1, j2)
def test_ne(self):
j1 = structs.JID("foo", "bar", "baz")
j2 = structs.JID("fooo", "bar", "baz")
self.assertNotEqual(j1, j2)
def test_str_full_jid(self):
j = structs.JID("foo", "example.test", "bar")
self.assertEqual(
"foo@example.test/bar",
str(j))
def test_str_bare_jid(self):
j = structs.JID("foo", "example.test", None)
self.assertEqual(
"foo@example.test",
str(j))
def test_str_domain_jid(self):
j = structs.JID(None, "example.test", None)
self.assertEqual(
"example.test",
str(j))
def test_init_bare_jid(self):
j = structs.JID("foo", "example.test", None)
self.assertIsNone(j.resource)
self.assertEqual(
"foo",
j.localpart)
self.assertEqual(
"example.test",
j.domain)
def test_init_domain_jid(self):
j = structs.JID(None, "example.test", None)
self.assertIsNone(j.resource)
self.assertIsNone(j.localpart)
self.assertEqual(
"example.test",
j.domain)
def test_replace_domain_jid(self):
j = structs.JID("foo", "example.test", "bar")
self.assertEqual(
structs.JID(None, "example.test", None),
j.replace(localpart=None, resource=None)
)
def test_replace_require_domainpart(self):
j = structs.JID("foo", "example.test", "bar")
with self.assertRaises(ValueError):
j.replace(domain=None)
def test_require_domainpart(self):
with self.assertRaises(ValueError):
structs.JID(None, None, None)
def test_replace_rejects_surplus_argument(self):
j = structs.JID("foo", "example.test", "bar")
with self.assertRaises(TypeError):
j.replace(foobar="baz")
def test_replace_ignores_problems_on_existing_parts(self):
j = structs.JID(
"\U0001f601foo", "\U0001f601example.test", "\U0001f601bar",
strict=False,
)
j2 = j.replace()
self.assertEqual(j, j2)
def test_replace_checks_replaced_strings(self):
j = structs.JID(
"\U0001f601foo", "\U0001f601example.test", "\U0001f601bar",
strict=False,
)
with self.assertRaises(ValueError):
j.replace(
domain=j.domain
)
def test_replace_nonstrict_allows_unassigned_codepoints(self):
j = structs.JID(
"\U0001f601foo", "\U0001f601example.test", "\U0001f601bar",
strict=False,
)
j2 = j.replace(
domain=j.domain,
strict=False,
)
self.assertEqual(j, j2)
def test_immutable(self):
j = structs.JID(None, "example.test", None)
with self.assertRaises(AttributeError):
j.foo = "bar"
def test_bare(self):
j = structs.JID("foo", "example.test", "bar")
self.assertEqual(
structs.JID("foo", "example.test", None),
j.bare())
def test_is_bare(self):
self.assertFalse(structs.JID("foo", "example.test", "bar").is_bare)
self.assertTrue(structs.JID("foo", "example.test", None).is_bare)
self.assertTrue(structs.JID(None, "example.test", None).is_bare)
def test_is_domain(self):
self.assertFalse(structs.JID("foo", "example.test", "bar").is_domain)
self.assertFalse(structs.JID("foo", "example.test", None).is_domain)
self.assertTrue(structs.JID(None, "example.test", None).is_domain)
def test_fromstr_full(self):
self.assertEqual(
structs.JID("foo", "example.test", "bar"),
structs.JID.fromstr("foo@example.test/bar")
)
self.assertEqual(
structs.JID("ßA", "IX.test", "\u2168"),
structs.JID.fromstr("ssa@ix.test/IX")
)
self.assertEqual(
structs.JID("ßA", "IX.test", "bar@baz/fnord"),
structs.JID.fromstr("ssa@ix.test/bar@baz/fnord")
)
def test_fromstr_bare(self):
self.assertEqual(
structs.JID("foo", "example.test", None),
structs.JID.fromstr("foo@example.test")
)
self.assertEqual(
structs.JID("ßA", "IX.test", None),
structs.JID.fromstr("ssa@ix.test")
)
def test_fromstr_domain(self):
self.assertEqual(
structs.JID(None, "example.test", None),
structs.JID.fromstr("example.test")
)
self.assertEqual(
structs.JID(None, "IX.test", None),
structs.JID.fromstr("ix.test")
)
def test_fromstr_domain_with_funny_resource(self):
self.assertEqual(
structs.JID(None, "example.test", "foo@bar"),
structs.JID.fromstr("example.test/foo@bar")
)
def test_fromstr_domain_nonstrict(self):
self.assertEqual(
structs.JID("\U0001f601", "\U0001f601example.test", "\U0001f601",
strict=False),
structs.JID.fromstr("\U0001f601@\U0001f601example.test/\U0001f601",
strict=False)
)
def test_reject_empty_localpart(self):
with self.assertRaises(ValueError):
structs.JID("", "bar.baz", None)
with self.assertRaises(ValueError):
structs.JID.fromstr("@bar.baz")
def test_reject_empty_domainpart(self):
with self.assertRaises(ValueError):
structs.JID("foo", "", None)
with self.assertRaises(ValueError):
structs.JID.fromstr("foo@")
def test_reject_empty_resource(self):
with self.assertRaises(ValueError):
structs.JID("foo", "bar.baz", "")
with self.assertRaises(ValueError):
structs.JID.fromstr("foo@bar.baz/")
def test_reject_long_localpart(self):
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID("x"*1024, "foo", None)
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID("ü"*512, "foo", None)
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID.fromstr("ü"*512 + "@foo")
def test_reject_long_domainpart(self):
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID(None, "x"*1024, None)
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID(None, "ü"*512, None)
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID.fromstr("ü"*512)
def test_reject_long_resource(self):
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID(None, "foo", "x"*1024)
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID(None, "foo", "ü"*512)
with self.assertRaisesRegex(ValueError, "too long"):
structs.JID.fromstr("foo/" + "ü"*512)
class TestPresenceShow(unittest.TestCase):
def test_aliases(self):
self.assertIs(
structs.PresenceShow.XA,
structs.PresenceShow.EXTENDED_AWAY
)
self.assertIs(
structs.PresenceShow.PLAIN,
structs.PresenceShow.NONE
)
self.assertIs(
structs.PresenceShow.CHAT,
structs.PresenceShow.FREE_FOR_CHAT
)
self.assertIs(
structs.PresenceShow.DND,
structs.PresenceShow.DO_NOT_DISTURB
)
def test_ordering_simple(self):
values = [
structs.PresenceShow.AWAY,
structs.PresenceShow.CHAT,
structs.PresenceShow.PLAIN,
structs.PresenceShow.DND,
structs.PresenceShow.XA,
]
values.sort()
self.assertSequenceEqual(
[
structs.PresenceShow.XA,
structs.PresenceShow.AWAY,
structs.PresenceShow.PLAIN,
structs.PresenceShow.CHAT,
structs.PresenceShow.DND,
],
values,
)
def test_proper_error_message_on_invalid_ordering_operand(self):
with self.assertRaises(TypeError):
structs.PresenceShow.AWAY < 1
def test_value(self):
values = [
"xa",
"away",
None,
"chat",
"dnd"
]
for v in values:
m = structs.PresenceShow(v)
self.assertEqual(m.value, v)
def test_ordering(self):
values = [
structs.PresenceShow("xa"),
structs.PresenceShow("away"),
structs.PresenceShow(None),
structs.PresenceShow("chat"),
structs.PresenceShow("dnd"),
]
for i in range(1, len(values)-1):
for v1, v2 in zip(values[:-i], values[i:]):
self.assertLess(v1, v2)
self.assertLessEqual(v1, v2)
self.assertNotEqual(v1, v2)
self.assertGreater(v2, v1)
self.assertGreaterEqual(v2, v1)
@unittest.skipIf(aioxmpp.version_info >= (1, 0, 0),
"does not apply to this version of aioxmpp")
def test_uses_compat_mixin(self):
self.assertTrue(
issubclass(
structs.PresenceShow,
structs.CompatibilityMixin,
)
)
class TestPresenceState(unittest.TestCase):
def test_immutable(self):
ps = structs.PresenceState()
with self.assertRaises(AttributeError):
ps.foo = "bar"
with self.assertRaises(AttributeError):
ps.available = True
with self.assertRaises(AttributeError):
ps.show = "baz"
def test_init_defaults(self):
ps = structs.PresenceState()
self.assertFalse(ps.available)
self.assertEqual(ps.show, structs.PresenceShow.NONE)
def test_init_compat(self):
with self.assertWarnsRegex(
DeprecationWarning,
"as of aioxmpp 1.0, the show argument must use "
"PresenceShow instead of str") as ctx:
ps = structs.PresenceState(True, "dnd")
self.assertIn(
"test_structs.py",
ctx.filename,
)
self.assertTrue(ps.available)
self.assertEqual(ps.show, structs.PresenceShow.DND)
def test_init_available(self):
ps = structs.PresenceState(available=True)
self.assertTrue(ps.available)
def test_init_normalizes_available(self):
ps = structs.PresenceState(available="foo")
self.assertIs(True, ps.available)
def test_init_available_with_show(self):
ps = structs.PresenceState(available=True,
show=structs.PresenceShow.DND)
self.assertTrue(ps.available)
self.assertIs(structs.PresenceShow.DND, ps.show)
def test_init_available_validate_show(self):
with self.assertRaises(ValueError):
ps = structs.PresenceState(available=True, show="foobar")
for value in ["dnd", "xa", "away", None, "chat"]:
value = structs.PresenceShow(value)
ps = structs.PresenceState(
available=True,
show=value)
self.assertEqual(value, ps.show)
def test_init_unavailable_forbids_show(self):
with self.assertRaises(ValueError):
structs.PresenceState(available=False,
show=structs.PresenceShow.DND)
def test_ordering(self):
values = [
structs.PresenceState(),
structs.PresenceState(available=True,
show=structs.PresenceShow.XA),
structs.PresenceState(available=True,
show=structs.PresenceShow.AWAY),
structs.PresenceState(available=True),
structs.PresenceState(available=True,
show=structs.PresenceShow.CHAT),
structs.PresenceState(available=True,
show=structs.PresenceShow.DND),
]
for i in range(1, len(values)-1):
for v1, v2 in zip(values[:-i], values[i:]):
self.assertLess(v1, v2)
self.assertLessEqual(v1, v2)
self.assertNotEqual(v1, v2)
self.assertGreater(v2, v1)
self.assertGreaterEqual(v2, v1)
def test_proper_exception_on_invalid_ordering_operand(self):
with self.assertRaises(TypeError):
structs.PresenceState() < 1
with self.assertRaises(TypeError):
structs.PresenceState() > 1
with self.assertRaises(TypeError):
structs.PresenceState() >= 1
with self.assertRaises(TypeError):
structs.PresenceState() <= 1
self.assertFalse(structs.PresenceState() == 0)
self.assertTrue(structs.PresenceState() != 0)
def test_equality(self):
self.assertEqual(
structs.PresenceState(),
structs.PresenceState()
)
self.assertEqual(
structs.PresenceState(available=True),
structs.PresenceState(available=True)
)
self.assertEqual(
structs.PresenceState(available=True,
show=structs.PresenceShow.DND),
structs.PresenceState(available=True,
show=structs.PresenceShow.DND),
)
self.assertFalse(
structs.PresenceState(available=True,
show=structs.PresenceShow.DND) !=
structs.PresenceState(available=True,
show=structs.PresenceShow.DND)
)
def test_equality_deals_with_different_types(self):
self.assertNotEqual(structs.PresenceState(), None)
self.assertNotEqual(structs.PresenceState(), "foo")
self.assertNotEqual(structs.PresenceState(), 123)
def test_repr(self):
self.assertEqual(
"<PresenceState>",
repr(structs.PresenceState())
)
self.assertEqual(
"<PresenceState available>",
repr(structs.PresenceState(available=True))
)
self.assertEqual(
"<PresenceState available show=<PresenceShow.DND: 'dnd'>>",
repr(structs.PresenceState(available=True,
show=structs.PresenceShow.DND))
)
def test_apply_to_stanza(self):
stanza_obj = stanza.Presence(type_=structs.PresenceType.PROBE)
self.assertEqual(stanza_obj.show, structs.PresenceShow.NONE)
ps = structs.PresenceState(available=True,
show=structs.PresenceShow.DND)
ps.apply_to_stanza(stanza_obj)
self.assertEqual(
structs.PresenceType.AVAILABLE,
stanza_obj.type_
)
self.assertEqual(
structs.PresenceShow.DND,
stanza_obj.show
)
ps = structs.PresenceState()
ps.apply_to_stanza(stanza_obj)
self.assertEqual(
structs.PresenceType.UNAVAILABLE,
stanza_obj.type_
)
self.assertEqual(
stanza_obj.show,
structs.PresenceShow.NONE,
)
def test_from_stanza(self):
stanza_obj = stanza.Presence(
type_=structs.PresenceType.AVAILABLE
)
stanza_obj.show = structs.PresenceShow.XA
self.assertEqual(
structs.PresenceState(available=True,
show=structs.PresenceShow.XA),
structs.PresenceState.from_stanza(stanza_obj)
)
stanza_obj = stanza.Presence(
type_=structs.PresenceType.UNAVAILABLE,
)
self.assertEqual(
structs.PresenceState(available=False),
structs.PresenceState.from_stanza(stanza_obj)
)
def test_from_stanza_reject_incorrect_types(self):
stanza_obj = stanza.Presence(
type_=structs.PresenceType.PROBE
)
with self.assertRaises(ValueError):
structs.PresenceState.from_stanza(stanza_obj)
def test_from_stanza_nonstrict_by_default(self):
stanza_obj = stanza.Presence(
type_=structs.PresenceType.UNAVAILABLE
)
stanza_obj.show = structs.PresenceShow.AWAY
self.assertEqual(
structs.PresenceState(available=False),
structs.PresenceState.from_stanza(stanza_obj)
)
def test_from_stanza_strict_by_default(self):
stanza_obj = stanza.Presence(
type_=structs.PresenceType.UNAVAILABLE,
)
stanza_obj.show = structs.PresenceShow.DND
with self.assertRaises(ValueError):
structs.PresenceState.from_stanza(stanza_obj, strict=True)
class TestLanguageTag(unittest.TestCase):
def test_init_requires_kwargs(self):
with self.assertRaisesRegex(TypeError,
"takes 1 positional argument"):
structs.LanguageTag("foo")
def test_init_requires_language(self):
with self.assertRaisesRegex(ValueError, "tag cannot be empty"):
structs.LanguageTag()
def test_fromstr_match_str(self):
tag = structs.LanguageTag.fromstr("de-Latn-DE-1999")
self.assertEqual(
"de-latn-de-1999",
tag.match_str
)
def test_fromstr_print_str(self):
tag = structs.LanguageTag.fromstr("de-Latn-DE-1999")
self.assertEqual(
"de-Latn-DE-1999",
tag.print_str
)
def test___str__(self):
tag = structs.LanguageTag.fromstr("zh-Hans")
self.assertEqual(
"zh-Hans",
str(tag)
)
tag = structs.LanguageTag.fromstr("de-Latn-DE-1999")
self.assertEqual(
"de-Latn-DE-1999",
str(tag)
)
def test_compare_case_insensitively(self):
tag1 = structs.LanguageTag.fromstr("de-DE")
tag2 = structs.LanguageTag.fromstr("de-de")
tag3 = structs.LanguageTag.fromstr("fr")
self.assertTrue(tag1 == tag2)
self.assertFalse(tag1 != tag2)
self.assertTrue(tag2 == tag1)
self.assertFalse(tag2 != tag1)
self.assertTrue(tag1 != tag3)
self.assertFalse(tag1 == tag3)
self.assertTrue(tag2 != tag3)
self.assertFalse(tag2 == tag3)
self.assertTrue(tag3 != tag1)
self.assertFalse(tag3 == tag1)
self.assertTrue(tag3 != tag1)
self.assertFalse(tag3 == tag1)
def test_order_case_insensitively(self):
tag1 = structs.LanguageTag.fromstr("de-DE")
tag2 = structs.LanguageTag.fromstr("de-de")
tag3 = structs.LanguageTag.fromstr("en-us")
tag4 = structs.LanguageTag.fromstr("fr")
self.assertLess(tag1, tag3)
self.assertLess(tag1, tag4)
self.assertLess(tag2, tag3)
self.assertLess(tag2, tag4)
self.assertLess(tag3, tag4)
self.assertGreater(tag4, tag3)
self.assertGreater(tag4, tag2)
self.assertGreater(tag4, tag1)
self.assertGreater(tag3, tag2)
self.assertGreater(tag3, tag1)
self.assertFalse(tag1 > tag2)
self.assertFalse(tag2 > tag1)
self.assertFalse(tag1 < tag2)
self.assertFalse(tag2 < tag1)
def test_hash_case_insensitively(self):
tag1 = structs.LanguageTag.fromstr("de-DE")
tag2 = structs.LanguageTag.fromstr("de-de")
self.assertEqual(hash(tag1), hash(tag2))
def test_not_equal_to_None(self):
tag1 = structs.LanguageTag.fromstr("de-DE")
self.assertNotEqual(tag1, None)
def test_dont_compare_with_None(self):
tag1 = structs.LanguageTag.fromstr("de-DE")
with self.assertRaises(TypeError):
tag1 > None
with self.assertRaises(TypeError):
tag1 < None
with self.assertRaises(TypeError):
tag1 >= None
with self.assertRaises(TypeError):
tag1 <= None
def test__repr__(self):
tag1 = structs.LanguageTag.fromstr("de-DE")
tag2 = structs.LanguageTag.fromstr("fr")
self.assertEqual(
"<aioxmpp.structs.LanguageTag.fromstr('de-DE')>",
repr(tag1)
)
self.assertEqual(
"<aioxmpp.structs.LanguageTag.fromstr('fr')>",
repr(tag2)
)
def test_immutable(self):
tag = structs.LanguageTag.fromstr("foo")
with self.assertRaises(AttributeError):
tag.foo = "bar"
class TestLanguageRange(unittest.TestCase):
def test_init_requires_kwargs(self):
with self.assertRaisesRegex(TypeError,
"takes 1 positional argument"):
structs.LanguageRange("foo")
def test_init_requires_language(self):
with self.assertRaisesRegex(ValueError, "range cannot be empty"):
structs.LanguageRange()
def test_fromstr_match_str(self):
tag = structs.LanguageRange.fromstr("de-DE")
self.assertEqual(
"de-de",
tag.match_str
)
def test_fromstr_print_str(self):
tag = structs.LanguageRange.fromstr("de-Latn-DE-1999")
self.assertEqual(
"de-Latn-DE-1999",
tag.print_str
)
def test___str__(self):
tag = structs.LanguageRange.fromstr("zh-Hans")
self.assertEqual(
"zh-Hans",
str(tag)
)
tag = structs.LanguageRange.fromstr("de-Latn-DE-1999")
self.assertEqual(
"de-Latn-DE-1999",
str(tag)
)
def test_compare_case_insensitively(self):
tag1 = structs.LanguageRange.fromstr("de-DE")
tag2 = structs.LanguageRange.fromstr("de-de")
tag3 = structs.LanguageRange.fromstr("fr")
self.assertTrue(tag1 == tag2)
self.assertFalse(tag1 != tag2)
self.assertTrue(tag2 == tag1)
self.assertFalse(tag2 != tag1)
self.assertTrue(tag1 != tag3)
self.assertFalse(tag1 == tag3)
self.assertTrue(tag2 != tag3)
self.assertFalse(tag2 == tag3)
self.assertTrue(tag3 != tag1)
self.assertFalse(tag3 == tag1)
self.assertTrue(tag3 != tag1)
self.assertFalse(tag3 == tag1)
def test_hash_case_insensitively(self):
tag1 = structs.LanguageRange.fromstr("de-DE")
tag2 = structs.LanguageRange.fromstr("de-de")
self.assertEqual(hash(tag1), hash(tag2))
def test_not_equal_to_None(self):
r1 = structs.LanguageRange.fromstr("de-DE")
self.assertNotEqual(r1, None)
def test_wildcard(self):
r1 = structs.LanguageRange.fromstr("*")
r2 = structs.LanguageRange.fromstr("*")
self.assertIs(r1, r2)
def test_strip_rightmost(self):
r = structs.LanguageRange.fromstr("de-Latn-DE-x-foo")
self.assertEqual(
structs.LanguageRange.fromstr("de-Latn-DE"),
r.strip_rightmost()
)
self.assertEqual(
structs.LanguageRange.fromstr("de-Latn"),
r.strip_rightmost().strip_rightmost()
)
self.assertEqual(
structs.LanguageRange.fromstr("de"),
r.strip_rightmost().strip_rightmost().strip_rightmost()
)
with self.assertRaises(ValueError):
r.strip_rightmost().strip_rightmost()\
.strip_rightmost().strip_rightmost()
def test_immutable(self):
r = structs.LanguageRange.fromstr("foo")
with self.assertRaises(AttributeError):
r.foo = "bar"
class Testbasic_filter_languages(unittest.TestCase):
def setUp(self):
self.languages = [
structs.LanguageTag.fromstr("de-Latn-DE-1999"),
structs.LanguageTag.fromstr("de-DE"),
structs.LanguageTag.fromstr("de-Latn"),
structs.LanguageTag.fromstr("fr-CH"),
structs.LanguageTag.fromstr("it"),
]
def test_filter(self):
self.assertSequenceEqual(
[
self.languages[0],
self.languages[1],
self.languages[2],
],
list(structs.basic_filter_languages(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"de",
]))
))
)
self.assertSequenceEqual(
[
self.languages[1],
],
list(structs.basic_filter_languages(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"de-DE",
]))
))
)
self.assertSequenceEqual(
[
self.languages[0],
self.languages[2],
],
list(structs.basic_filter_languages(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"de-Latn",
]))
))
)
def test_filter_no_dupes_and_ordered(self):
self.assertSequenceEqual(
[
self.languages[0],
self.languages[2],
self.languages[1],
],
list(structs.basic_filter_languages(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"de-Latn",
"de",
]))
))
)
def test_filter_wildcard(self):
self.assertSequenceEqual(
self.languages,
list(structs.basic_filter_languages(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"fr",
"*",
]))
))
)
class Testlookup_language(unittest.TestCase):
def setUp(self):
self.languages = [
structs.LanguageTag.fromstr("de-Latn-DE-1999"),
structs.LanguageTag.fromstr("fr-CH"),
structs.LanguageTag.fromstr("it"),
]
def test_match_direct(self):
self.assertEqual(
structs.LanguageTag.fromstr("fr-CH"),
structs.lookup_language(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"en",
"fr-ch",
"de-de"
]))
)
)
self.assertEqual(
structs.LanguageTag.fromstr("it"),
structs.lookup_language(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"it",
]))
)
)
def test_decay(self):
self.assertEqual(
structs.LanguageTag.fromstr("de-Latn-DE-1999"),
structs.lookup_language(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"de-de",
"en-GB",
"en"
]))
)
)
self.assertEqual(
structs.LanguageTag.fromstr("fr-CH"),
structs.lookup_language(
self.languages,
list(map(structs.LanguageRange.fromstr, [
"fr-FR",
"de-DE",
"fr",
]))
)
)
def test_decay_skips_extension_prefixes_properly(self):
self.assertEqual(
structs.LanguageTag.fromstr("de-DE"),
structs.lookup_language(
list(map(structs.LanguageTag.fromstr, [
"de-DE",
"de-x",
])),
list(map(structs.LanguageRange.fromstr, [
"de-x-foobar",
]))
)
)
class TestLanguageMap(unittest.TestCase):
def test_implements_mapping(self):
mapping = structs.LanguageMap()
self.assertIsInstance(
mapping,
collections.abc.MutableMapping
)
def test_mapping_interface(self):
key1 = structs.LanguageTag.fromstr("de-DE")
key2 = structs.LanguageTag.fromstr("en-US")
key3 = structs.LanguageTag.fromstr("en")
mapping = structs.LanguageMap()
self.assertFalse(mapping)
self.assertEqual(0, len(mapping))
mapping[key1] = 10
self.assertIn(key1, mapping)
self.assertEqual(
10,
mapping[key1]
)
self.assertSetEqual(
{key1},
set(mapping)
)
mapping[key2] = 20
self.assertIn(key2, mapping)
self.assertEqual(
20,
mapping[key2]
)
self.assertSetEqual(
{key1, key2},
set(mapping)
)
key2_prime = structs.LanguageTag.fromstr("en-us")
self.assertIn(key2_prime, mapping)
self.assertEqual(
20,
mapping[key2_prime]
)
self.assertNotIn(key3, mapping)
del mapping[key1]
self.assertNotIn(key1, mapping)
mapping.clear()
self.assertNotIn(key2, mapping)
def test_lookup(self):
key1 = structs.LanguageTag.fromstr("de-DE")
key2 = structs.LanguageTag.fromstr("en-US")
key3 = structs.LanguageTag.fromstr("en")
mapping = structs.LanguageMap()
mapping[key1] = 10
mapping[key2] = 20
mapping[key3] = 30
self.assertEqual(
30,
mapping.lookup([structs.LanguageRange.fromstr("en-GB")])
)
def test_values(self):
key1 = structs.LanguageTag.fromstr("de-DE")
key2 = structs.LanguageTag.fromstr("en-US")
key3 = structs.LanguageTag.fromstr("en")
mapping = structs.LanguageMap()
mapping[key1] = 10
mapping[key2] = 20
mapping[key3] = 30
self.assertSetEqual(
{10, 20, 30},
set(mapping.values())
)
def test_keys(self):
key1 = structs.LanguageTag.fromstr("de-DE")
key2 = structs.LanguageTag.fromstr("en-US")
key3 = structs.LanguageTag.fromstr("en")
mapping = structs.LanguageMap()
mapping[key1] = 10
mapping[key2] = 20
mapping[key3] = 30
self.assertSetEqual(
{key1, key2, key3},
set(mapping.keys())
)
def test_items(self):
key1 = structs.LanguageTag.fromstr("de-DE")
key2 = structs.LanguageTag.fromstr("en-US")
key3 = structs.LanguageTag.fromstr("en")
mapping = structs.LanguageMap()
mapping[key1] = 10
mapping[key2] = 20
mapping[key3] = 30
self.assertSetEqual(
{
(key1, 10),
(key2, 20),
(key3, 30),
},
set(mapping.items())
)
def test_equality(self):
mapping1 = structs.LanguageMap()
mapping1[structs.LanguageTag.fromstr("de-de")] = 10
mapping1[structs.LanguageTag.fromstr("en-US")] = 20
mapping2 = structs.LanguageMap()
mapping2[structs.LanguageTag.fromstr("de-DE")] = 10
mapping2[structs.LanguageTag.fromstr("en-US")] = 20
mapping3 = structs.LanguageMap()
mapping3[structs.LanguageTag.fromstr("de-DE")] = 10
mapping3[structs.LanguageTag.fromstr("en-GB")] = 20
self.assertEqual(
mapping1,
mapping2
)
self.assertFalse(mapping1 != mapping2)
self.assertNotEqual(
mapping1,
mapping3
)
self.assertFalse(mapping1 == mapping3)
self.assertNotEqual(
mapping2,
mapping3
)
self.assertFalse(mapping2 == mapping3)
def test_setdefault(self):
l = []
mapping = structs.LanguageMap()
result = mapping.setdefault(structs.LanguageTag.fromstr("de-de"), l)
self.assertIs(result, l)
result = mapping.setdefault(structs.LanguageTag.fromstr("de-de"), [])
self.assertIs(result, l)
def test_lookup_returns_None_key_if_nothing_matches(self):
mapping = structs.LanguageMap()
mapping[None] = "foobar"
mapping[structs.LanguageTag.fromstr("de")] = "Test"
mapping[structs.LanguageTag.fromstr("en")] = "test"
self.assertEqual(
"foobar",
mapping.lookup([structs.LanguageRange.fromstr("it")])
)
def test_any_returns_only_key(self):
m = structs.LanguageMap()
m[None] = "fnord"
self.assertEqual(m.any(), "fnord")
m = structs.LanguageMap()
m[structs.LanguageTag.fromstr("de")] = "Test"
self.assertEqual(m.any(), "Test")
def test_any_raises_ValueError_on_empty_map(self):
m = structs.LanguageMap()
with self.assertRaises(ValueError):
m.any()
def test_any_prefers_None(self):
m = structs.LanguageMap()
m[structs.LanguageTag.fromstr("de")] = "Test"
m[None] = "fnord"
self.assertEqual(m.any(), "fnord")
m = structs.LanguageMap()
m[None] = "fnord"
m[structs.LanguageTag.fromstr("de")] = "Test"
self.assertEqual(m.any(), "fnord")
def test_any_returns_same_key_for_same_keyset(self):
m = structs.LanguageMap()
m[structs.LanguageTag.fromstr("de")] = "Test"
m[structs.LanguageTag.fromstr("fr")] = "fnord"
self.assertEqual(m.any(), "Test")
m = structs.LanguageMap()
m[structs.LanguageTag.fromstr("fr")] = "fnord"
m[structs.LanguageTag.fromstr("de")] = "Test"
self.assertEqual(m.any(), "Test")
class Testjid_escape(unittest.TestCase):
def test_vectors(self):
for unescaped, escaped in ESCAPING_TEST_VECTORS:
self.assertEqual(structs.jid_escape(unescaped),
escaped,
unescaped)
class Testjid_unescape(unittest.TestCase):
def test_vectors(self):
for unescaped, escaped in ESCAPING_TEST_VECTORS:
self.assertEqual(structs.jid_unescape(escaped),
unescaped,
escaped)
|
horazont/aioxmpp
|
tests/test_structs.py
|
Python
|
lgpl-3.0
| 46,947
|
from django.contrib import admin
from polls.models import Choice, Poll
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
list_display = ('question', 'pub_date', 'was_published_recently')
#fieldsets = [
# (None, {'fields': ['question']}),
# ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
#]
#inlines = [ChoiceInline]
admin.site.register(Poll, PollAdmin)
|
minaevmike/praktica
|
Diplom/mysite/polls/admin.py
|
Python
|
gpl-2.0
| 481
|
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
# To ensure security of users' info
from app import db, login_manager
# Models for my database
class Users(UserMixin, db.Model):
"""
User table for user signup info
"""
# Retains plural of table names
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
# Id for unique reference in the database
email = db.Column(db.String(60), index=True, unique=True)
# Limiting striing length and input to be unique
username = db.Column(db.String(30), index=True, unique=True)
# Ensure input is unique
first_name = db.Column(db.String(30), index=True)
# Ensure input is string
last_name = db.Column(db.String(60), index=True)
password_hash = db.Column(db.String(64))
# limit password string length
# @password.setter
# def password(self, password):
# """
# Enable the password to be hashed
# """
# self.password_hash = generate_password_hash(password)
def password_verify(self, password):
"""
Check if both passwords match
"""
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User: {}>'.format(self.username)
# Formatting the user output in the flask terminal
@login_manager.user_loader
# Set up user_loader to load the current users
def load_user(user_id):
# Loads the users in
return User.query.get(int(user_id))
|
Maxwell-Icharia/bc-14-pairprog
|
app/models.py
|
Python
|
gpl-3.0
| 1,572
|
class Role(RoleMixin, db.Model):
__tablename__ = 'role'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __eq__(self, other):
return (self.name == other or
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return (self.name != other and
self.name != getattr(other, 'name', None))
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
|
Elpieich/teamwork
|
crm/models/role.py
|
Python
|
mit
| 638
|
## CSC320 Winter 2016
## Assignment 2
## (c) Kyros Kutulakos
##
## DISTRIBUTION OF THIS CODE ANY FORM (ELECTRONIC OR OTHERWISE,
## AS-IS, MODIFIED OR IN PART), WITHOUT PRIOR WRITTEN AUTHORIZATION
## BY THE INSTRUCTOR IS STRICTLY PROHIBITED. VIOLATION OF THIS
## POLICY WILL BE CONSIDERED AN ACT OF ACADEMIC DISHONESTY
##
## DO NOT MODIFY THIS FILE ANYWHERE EXCEPT AT THE VERY END,
## AS INDICATED
##
##
## This file defines the ImageViewer widget class. This
## class manages all image dispay functionalities of the GUI.
## It is a generic image display widget that does not depend
## on the inpainting algorithm in any way.
##
## This class was adapted from Kivy's Pictures tutorial example
## and relies on Kivy's built-in Scatter widget class.
##
import kivy
kivy.require('1.9.1')
import io
import sys
import numpy as np
import cv2 as cv
from kivy.logger import Logger
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.core.image import Image as CoreImage
from kivy.graphics.texture import Texture
from kivy.properties import StringProperty
from kivy.properties import ObjectProperty
from kivy.graphics import *
#
# Convert from OpenCV's internal memory representation for
# images to Kivy's internal memory representation for images.
# There is likely an easier way to do this but I haven't
# quite found it. For the time being, I convert the image to
# from OpenCV to PNG and then from PNG to Kivy. These
# conversions are performed in memory (ie. nothing written to disk)
#
def openCV_to_kivy(cvImage):
# encode the OpenCV image in PNG format
_, imPNG = cv.imencode(".png", cvImage)
# create a binary data stream for reading that data
data = io.BytesIO(imPNG.tobytes())
# create a Kivy Core Image data structure to hold that data
return CoreImage(data, ext="png")
#
# A user-defined widget class for image display, derived from Kivy's
# built-in Scatter widget class
#
class ImageViewer(Scatter):
source = StringProperty(None)
centerInitialized = False
# initial position of bottom-left corner of image frame when no image has been loaded yet
init_pos = [106,82]
display_list = dict([])
display_list_groups = dict([])
# Display an OpenCV image by creating a Kivy Texture object whose
# texture memory stores the OpenCV image
# I am not using mip-mapped Kivy textures for the time being but this
# really should be done...
def display_opencv_image(self, im = None, name = None):
if im is not None:
# convert an image in OpenCV's native in-memory format to
# kivy's in-memory format
kivyImage = openCV_to_kivy(im)
# display the image over a white background
self.ids.image.color = [1,1,1,1]
# set the kivy Image's texture data
self.ids.image.texture = kivyImage.texture
# make sure the image is displayed at its full size
self.resize(kivyImage.size,0)
# store the initial position of the image so we can
# move the image back there if we wish to do so
self.init_pos = self.pos
# compute the 3x3 openCV to kivy coordinate transformation
# that maps openCV (row,col) coordinates to kivy (x,y) coordinates
self._openCVRowColToKivyXY = np.array([[0, 1, 0],
[-1, 0, im.shape[0]-1],
[0, 0, 1]])
else:
# if we don't have an image, just display a black background
# in its place
self.ids.image.color = [0,0,0,1]
# Set the size parameter of the Kivy Image object so that the image
# it stores is drawn in its entirety on the canvas. Since the image
# contains a border as well as a shadow region around it, we must
# account for them when deciding on the total size of the image in pixels
def resize(self, size, shadow):
# reduce the image size by the amount needed for shadow rendering
newSize = [size[0]-shadow*2, size[1]-shadow*2]
normSize = self.ids.image.norm_image_size
# udpate the image size while preserving aspect ratio
aspect = self.ids.image.image_ratio
if aspect > 1.0:
if newSize[0]/aspect > newSize[1]:
# image fits in the window at full width
newSize2 = [newSize[0], newSize[0]/aspect]
else:
newSize2 = [newSize[1]*aspect, newSize[1]]
else:
if newSize[0]/aspect > newSize[1]:
# image fits in the window at full width
newSize2 = [newSize[1]*aspect, newSize[1]]
else:
newSize2 = [newSize[0], newSize[0]/aspect]
self.ids.image.size = newSize2
# Reposition the Kivy image
def repos(self, pos, shadow):
self.pos = [pos[0]+shadow, pos[1]+shadow]
# function to handle a mouse button pressed event
def on_touch_down_callback(self, touch):
# we are only interested in mouse button events
if 'button' not in touch.profile:
return
# we are only interested in left mouse button events
if (touch.button != 'left'):
return
# if a left-double-tap is detected, we reset the image display
if touch.is_double_tap:
# if the image was zoomed in or out, reset its scale to 1
self.scale=1
# if the image was rotated, reset its rotation angle to zero
self.rotation=0
# move image to the position when it was originally constructed
# and displayed
self.pos = self.init_pos
# delete any axes we've already drawn
if self.ud.has_key('group'):
self.canvas.remove_group(self.ud['group'])
else:
# get the user-defined data dictionary for the touch event
self.ud = touch.ud
# get the unique ID of the touch event and store it in the
# dictionary
self.ud['group'] = str(touch.uid)
# store the list of OpenGL drawing commands for drawing the axes
with self.canvas:
# command to draw the axes in red
Color(1, 0, 0, mode='rgb', group=self.ud['group'])
# commands to draw the lines themselves
self.ud['lines'] = [
Line(points=[touch.pos[0], 0,
touch.pos[0], self.ids.image.height
], group=self.ud['group']),
Line(points=[0, touch.pos[1],
self.ids.image.width, touch.pos[1],
], group=self.ud['group'])
]
# create a label widget that will display the pixel position
self.ud['label'] = Label(size_hint=(None, None))
# update the label's content
self.update_touch_label(self.ud['label'], touch)
# display the widget
self.add_widget(self.ud['label'])
def update_touch_label(self, label, touch):
# create a new text string for the label
label.text = '(x,y) = (%d, %d)' % (touch.x, touch.y)
# trigger a refresh of the label's contents
label.texture_update()
# reposition the label
label.pos = touch.pos
label.size = label.texture_size[0] + 20, label.texture_size[1] + 20
#
# Methods for converting OpenCV (r,c) coordinates to Kivy (x,y) coordinates
#
def openCVRowColToKivyXY(self, r, c):
pt = np.dot(self._openCVRowColToKivyXY, np.array([[r],[c],[1]]))
return pt[0], pt[1]
def openCVRowColToKivyXYPts(self, mat):
return np.dot(self._openCVRowColToKivyXY, mat)
#
# Methods for drawing basic geometric shapes over images in Kivy
#
def draw_set_group(self, group):
if not self.display_list.has_key(group):
self.display_list[group] = []
self.display_list_groups[group] = False
def draw_remove_group(self, group):
if self.display_list_groups.has_key(group):
del self.display_list_groups[group]
if self.display_list.has_key(group):
self.canvas.remove_group(group)
del self.display_list[group]
def draw_remove_all(self):
for g in self.display_list_groups:
self.canvas.remove_group(g)
self.display_list_groups = dict([])
self.display_list = dict([])
def draw_enable_group(self, group):
self.draw_set_group(group)
self.display_list_groups[group] = True
def draw_disable_group(self, group):
self.draw_set_group(group)
self.display_list_groups[group] = False
def draw_color(self, red=1, green=1, blue=1, group='default'):
self.draw_set_group(group)
c = Color(red, green, blue, mode='rgb', group=group)
self.display_list[group].append(c)
def draw_point(self, r=0, c=0, group='default'):
x, y = self.openCVRowColToKivyXY(r, c)
self.draw_set_group(group)
p = Point(points=[x,y], group=group)
self.display_list[group].append(p)
def draw_line(self, r0=0, c0=0, r1=0, c1=0, group='default'):
x0, y0 = self.openCVRowColToKivyXY(r0, c0)
x1, y1 = self.openCVRowColToKivyXY(r1, c1)
self._draw_line(self, x0=0, y0=0, x1=0, y1=0, group='default')
def _draw_line(self, x0=0, y0=0, x1=0, y1=0, group='default'):
self.draw_set_group(group)
l = Line(points=[x0, y0, x1, y1], group=group)
self.display_list[group].append(l)
def draw_vector(self, r=0, c=0, angle=0, length=10, arrow=0.2, group='default'):
points = np.array([[0, length, length, length-length*arrow, length, length-length*arrow],
[0, 0, 0, length*arrow, 0, -length*arrow],
[1, 1, 1, 1, 1, 1]])
tmat = np.array([[np.cos(angle), -np.sin(angle), r],
[np.sin(angle), np.cos(angle), c],
[0, 0, 1]])
vecPoints = np.dot(tmat,points)
pts = self.openCVRowColToKivyXYPts(vecPoints)
for i in range(0,3):
self._draw_line(x0=pts[0,2*i], y0=pts[1,2*i],
x1=pts[0,2*i+1], y1=pts[1,2*i+1],
group=group)
def draw_rectangle_centered(self, r=0, c=0, radius=0, group='default'):
x, y = self.openCVRowColToKivyXY(r, c)
self.draw_set_group(group)
r = Line(rectangle=(x-radius, y-radius,2*radius+1,2*radius+1), group=group)
self.display_list[group].append(r)
def draw_enabled(self):
for group, enabled in self.display_list_groups.items():
self.canvas.remove_group(group)
if enabled:
for obj in self.display_list[group]:
self.canvas.add(obj)
#########################################
## PLACE YOUR CODE BETWEEN THESE LINES ##
#########################################
# COPY THE CODE YOU IMPLEMENTED IN viewer.py of A1-PartB
# TO THIS SPACE IN ORDER TO HANDLE MOUSE BUTTON RELEASE EVENTS
def on_touch_up_callback(self, touch):
if hasattr(self, 'ud'):
self.remove_widget(self.ud['label'])
self.canvas.remove_group(self.ud['group'])
#########################################
|
ericxyan/ImageInpainting
|
code/inpaintingui/viewer.py
|
Python
|
mit
| 11,637
|
import time
def main(request, response):
delay = float(request.GET.first("ms", 500))
time.sleep(delay / 1E3);
return [("Content-type", "text/plain")], "TEST_DELAY"
|
xiaojunwu/crosswalk-test-suite
|
webapi/tct-xmlhttprequest-w3c-tests/xmlhttprequest-py/w3c/resources/delay.py
|
Python
|
bsd-3-clause
| 177
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Outline nodes.
We use them for re-formulations and for inlining of code. They are expressions
that get their value from return statements in their code body. They do not
own anything by themselves. It's just a way of having try/finally for the
expressions.
"""
from .NodeBases import ChildrenHavingMixin, ExpressionChildrenHavingBase
class ExpressionOutlineBody(ExpressionChildrenHavingBase):
""" Outlined code.
This is for a call to a piece of code to be executed in a specific
context. It contains an exclusively owned function body, that has
no other references, and can be considered part of the calling
context.
It must return a value, to use as expression value.
"""
kind = "EXPRESSION_OUTLINE_BODY"
named_children = (
"body",
)
def __init__(self, provider, name, body, source_ref):
assert name != ""
ExpressionChildrenHavingBase.__init__(
self,
values = {
"body" : body
},
source_ref = source_ref
)
self.provider = provider
self.name = name
self.temp_scope = None
# Hack: This allows some APIs to work although this is not yet
# officially a child yet. Important during building.
self.parent = provider
def getDetails(self):
return {
"provider" : self.provider,
"name" : self.name
}
getBody = ChildrenHavingMixin.childGetter("body")
setBody = ChildrenHavingMixin.childSetter("body")
def getOutlineTempScope(self):
# We use our own name as a temp_scope, cached from the parent, if the
# scope is None.
if self.temp_scope is None:
self.temp_scope = self.provider.allocateTempScope(self.name)
return self.temp_scope
def allocateTempVariable(self, temp_scope, name):
if temp_scope is None:
temp_scope = self.getOutlineTempScope()
return self.provider.allocateTempVariable(
temp_scope = temp_scope,
name = name
)
def allocateTempScope(self, name):
# Let's scope the temporary scopes by the outline they come from.
return self.provider.allocateTempScope(
name = self.name + '$' + name
)
def computeExpressionRaw(self, constraint_collection):
owning_module = self.getParentModule()
# Make sure the owning module is added to the used set. This is most
# important for helper functions, or modules, which otherwise have
# become unused.
from nuitka.ModuleRegistry import addUsedModule
addUsedModule(owning_module)
abort_context = constraint_collection.makeAbortStackContext(
catch_breaks = False,
catch_continues = False,
catch_returns = True,
catch_exceptions = False
)
with abort_context:
body = self.getBody()
result = body.computeStatementsSequence(
constraint_collection = constraint_collection
)
if result is not body:
self.setBody(result)
body = result
return_collections = constraint_collection.getFunctionReturnCollections()
constraint_collection.mergeMultipleBranches(return_collections)
if body.getStatements()[0].isStatementReturn():
return (
body.getStatements()[0].getExpression(),
"new_expression",
"Outline is now simple expression, use directly."
)
# TODO: Function outline may become too trivial to outline and return
# collections may tell us something.
return self, None, None
|
wfxiang08/Nuitka
|
nuitka/nodes/OutlineNodes.py
|
Python
|
apache-2.0
| 4,590
|
import cmath
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy.special
from numpy.testing._private.utils import assert_equal
from pymwm.cutoff import Cutoff
def test_cutoffs():
co = Cutoff(2, 2)
num_rr = len(co.r_ratios)
assert co.num_n == 2
assert co.num_m == 2
assert len(co.samples) == co.num_n * (2 * co.num_m + 1) * num_rr
co = Cutoff(2, 1)
assert co.num_m == 1
assert len(co.samples) == co.num_n * (2 * co.num_m + 1) * num_rr
def test_cutoffs_TE_r1_0():
co = Cutoff(3, 3)
num_rr = len(co.r_ratios)
assert co.num_n == 3
assert co.num_m == 3
assert len(co.samples) == co.num_n * (2 * co.num_m + 1) * num_rr
for n in range(3):
bessels = scipy.special.jnp_zeros(n, 3)
for m in range(1, 4):
s = co.samples.query(f"pol=='E' and n=={n} and m=={m} and irr==0").iloc[0]
print(s["val"], bessels[m - 1])
assert s["val"] == bessels[m - 1]
def test_cutoffs_TM_r1_0():
co = Cutoff(3, 3)
s = co.samples.query("pol=='M' and n==0 and m==1 and irr==0").iloc[0]
assert s["val"] == 0.0
bessels = scipy.special.jn_zeros(0, 4)
for m in range(2, 5):
s = co.samples.query(f"pol=='M' and n==0 and m=={m} and irr==0").iloc[0]
print(s["val"], bessels[m - 2])
assert s["val"] == bessels[m - 2]
for n in range(1, 3):
bessels = scipy.special.jn_zeros(n, 4)
for m in range(1, 5):
s = co.samples.query(f"pol=='M' and n=={n} and m=={m} and irr==0").iloc[0]
print(s["val"], bessels[m - 1])
assert s["val"] == bessels[m - 1]
def test_cutoffs_TE():
co = Cutoff(2, 3)
num_rr = len(co.r_ratios)
assert co.num_n == 2
assert co.num_m == 3
assert len(co.samples) == co.num_n * (2 * co.num_m + 1) * num_rr
a = 0.1
c = 0.5
r_ratio = a / c
k0 = 0.3
eps = 1
kc = co(("E", 1, 2), r_ratio) / c
beta = (1 + 1j) * cmath.sqrt(-0.5j * (eps * k0 ** 2 - kc ** 2))
val = beta ** 2
print(val)
npt.assert_allclose(val, -98.35032331623853 + 0j, rtol=1e-6)
def test_cutoffs_cython():
co = Cutoff(3, 3)
num_rrs = len(co.r_ratios)
df1 = co.cutoffs()
df2 = co.cutoffs_numpy()
for pol, m_end in [("M", 4), ("E", 4)]:
for n in range(3):
for m in range(1, m_end):
for irr in range(num_rrs):
val1 = df1[
(df1["pol"] == pol)
& (df1["n"] == n)
& (df1["m"] == m)
& (df1["irr"] == irr)
]["val"].iloc[0]
val2 = df2[
(df2["pol"] == pol)
& (df2["n"] == n)
& (df2["m"] == m)
& (df2["irr"] == irr)
]["val"].iloc[0]
try:
npt.assert_almost_equal(val1, val2)
except Exception as e:
print(pol, n, m, irr, num_rrs, val1, val2)
raise e
def test_cutoffs_samples(num_regression):
df = Cutoff(16, 8).samples
d = {}
for pol, m_end in [("M", 10), ("E", 9)]:
for n in range(16):
for m in range(1, m_end):
df1 = df[(df["pol"] == pol) & (df["n"] == n) & (df["m"] == m)]
d[f"{pol}_{n}_{m}_rr"] = df1["rr"].to_numpy()
d[f"{pol}_{n}_{m}_val"] = df1["val"].to_numpy()
num_regression.check(d)
|
mnishida/PyMWM
|
tests/test_cutoff.py
|
Python
|
mit
| 3,542
|
for _ in range(int(input())):
print(2**32 - 1 - int(input()))
|
csixteen/HackerRank_Python
|
Algorithms/flipping_bits.py
|
Python
|
mit
| 66
|
# main.py
#
# Copyright 2011 Hugo Teso <hugo.teso@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os, sys
import platform
import lib.bokken_globals as glob
import lib.common as common
# Perform the GTK UI dependency check here
import ui.dependency_check as dependency_check
dependency_check.check_all()
# Now that I know that I have them, import them!
import gtk
import gobject
# This is just general info, to help people knowing their system
print("Starting bokken {}, running on:".format(glob.version))
print(" Python version:")
print("\n".join(" "+x for x in sys.version.split("\n")))
print(" GTK version: {}".format(".".join(str(x) for x in gtk.gtk_version)))
print(" PyGTK version: {}\n".format(".".join(str(x) for x in gtk.pygtk_version)))
import ui.gtk2.common
import ui.textviews as textviews
import ui.statusbar as statusbar
import ui.file_dialog as file_dialog
MAINTITLE = "Bokken "
class BokkenGTKClient:
'''Main GTK application'''
def __init__(self, target, backend):
import time
# Allow only the main thread to touch the GUI (gtk) part, while letting
# other threads do background work.
gobject.threads_init()
self.target = target
self.backend = backend
self.empty_gui = False
# Variable to hold the Process object in case we choose to disassemble a binary.
self.dasm_process = False
# Check if we have, at least, one available core; otherwise exit.
# TODO: Should be removed? now with one core and dependency_check doing the core check...
if not glob.has_radare:
md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, None)
md.set_markup("<big><b>No backend engine found!</b></big>")
md.format_secondary_markup((
'Install radare to run bokken:\n\n'
'<b>Radare:</b>\t<a href="http://radare.org/">'
'http://radare.org</a>'))
md.run()
md.destroy()
sys.exit(1)
# Start up the HTTP server.
if glob.http_server:
import lib.http as httpd
http = httpd.BokkenHttpServer(glob.http_server_bind_address,
glob.http_server_port)
print("\nBringing up HTTP server on %s:%d." %
(glob.http_server_bind_address, glob.http_server_port))
# We start the thread.
http.start()
time.sleep(0.2)
if not http.is_alive():
print('Unable to bind to %s:%d.' %
(glob.http_server_bind_address, glob.http_server_port))
return None
# We put the http structure in glob to have it accessible in the
# global __main__ handler.
glob.http = http
# Launch file selection dialog
dialog = file_dialog.FileDialog(self.target, True)
resp = dialog.run()
if resp == gtk.RESPONSE_DELETE_EVENT or resp == gtk.RESPONSE_REJECT:
return None
# Get dialog selected file, backend and options
self.target = dialog.file
self.backend = 'radare' # I'm leaving that as may be needed in the future
# Load core
import ui.radare_core as core
self.uicore = core.Core(dialog)
# Create a global object under glob.
glob.core = self.uicore
if self.target:
# Just open the target if path is correct or an url
if not os.path.isfile(self.target):
print(common.console_color('Incorrect file argument: %s' %
self.target, 'red'))
sys.exit(1)
self.load_file(self.target)
if not self.uicore.file_loaded:
error_msg = "Error opening file " + self.target
md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, None)
md.set_markup("<big><b>File open error!</b></big>")
md.format_secondary_markup(error_msg)
md.run()
md.destroy()
print error_msg
sys.exit(1)
ui.gtk2.common.repaint()
else:
self.empty_gui = True
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_focus = True
self.window.connect("delete_event", self.quit)
ui.gtk2.common.set_bokken_icon(self.window)
gtk.settings_get_default().set_long_property("gtk-button-images", True, "main")
# Title
self.window.set_title(MAINTITLE + glob.version + " - " + self.target)
# Positions
#self.window.resize(1000, 700)
#self.window.move(25, 25)
self.window.set_position(gtk.WIN_POS_CENTER)
# Maximize window
#self.window.maximize()
# Create VBox to contain top buttons and other VBox
self.supervb = gtk.VBox(False, 1)
# Create top buttons and add to VBox
import ui.radare_toolbar as toolbar
self.topbuttons = toolbar.TopButtons(self.uicore, self)
self.supervb.pack_start(self.topbuttons, False, True, 1)
# Create VBox to contain textviews and statusbar
self.mainvb = gtk.VBox(False, 1)
self.supervb.pack_start(self.mainvb, True, True, 1)
# Initialize and add TextViews
self.tviews = textviews.TextViews(self.uicore, self)
# Create toolbar show/hide tabs menu
self.topbuttons.menu.create_view_menu()
# Initialize and add Statusbar
self.sbar = statusbar.Statusbar(self.uicore, self.tviews)
self.sbar.create_statusbar()
# Add textviews and statusbar to the VBox
self.mainvb.pack_start(self.tviews, True, True, 1)
self.mainvb.pack_start(self.sbar, False, True, 1)
self.window.add(self.supervb)
# Disable all until file loads
self.disable_all()
if self.empty_gui:
self.show_empty_gui()
self.show_file_data()
self.tviews.console.add_message('Bokken ' + glob.version + ' ready')
self.tviews.console.add_message('Starting background analysis')
self.window.show_all()
# Hide left tree for plain or unsupported formats
if self.uicore.core.format == 'Hexdump':
self.tviews.left_scrolled_window.hide()
if not self.uicore.do_anal:
self.topbuttons.diff_tb.set_sensitive(False)
self.topbuttons.sections_tb.set_sensitive(False)
dialog.destroy()
# We make sure that we remove the reference to the scrollbar to avoid errors.
self.uicore.core.progress_bar = None
gtk.main()
# Do all the core stuff of parsing file
def load_file(self, target):
#print "Loading file: %s..." % (target)
self.uicore.load_file(target)
if not self.uicore.file_loaded:
return
if self.uicore.core.format == 'Program' and self.uicore.do_anal:
self.uicore.get_sections()
self.uicore.get_relocs()
def show_empty_gui(self):
self.topbuttons.throbber.running('start')
# Once we have the file info, let's create the GUI
def show_file_data(self):
#print "File format detected: %s" % (self.uicore.core.format)
# Create left buttons depending on file format
self.tviews.update_left_buttons()
# Add data to RIGHT TextView
if self.uicore.core.format == "Program":
self.tviews.update_righttext('Disassembly')
else:
self.tviews.update_righttext('Hexdump')
if platform.system() != 'Windows':
gobject.timeout_add(250, self.merge_dasm_rightextview)
else:
if self.uicore.text_dasm:
self.tviews.update_dasm(self.uicore.text_dasm)
elif self.uicore.fulldasm:
self.tviews.update_dasm(self.uicore.fulldasm)
self.uicore.restore_va()
if self.uicore.core.format == 'Program':
link_name = "0x%08x" % self.uicore.core.num.get('entry0')
if not link_name:
link_name = "0x%08x" % self.uicore.core.num.get('section..text')
if not link_name:
link_name = "0x%08x" % self.uicore.core.num.get('section.' + self.uicore.execsections[0][0])
self.tviews.update_graph(self, link_name)
self.tviews.search(self, link_name)
self.tviews.right_notebook.finish_dasm()
self.topbuttons.throbber.running('')
# Load data to LEFT Tree
if self.uicore.core.format == "Program":
self.tviews.create_model('Functions')
# Update statusbar with file info
info = self.uicore.get_file_info()
self.sbar.add_text(info, glob.version)
self.sbar.hide_all()
self.sbar._statusbar.show_all()
# Create seek entry autocompletion of function names...
self.tviews.create_completion()
# Enable GUI
self.enable_all()
if platform.system() != 'Windows':
if self.uicore.core.format != "Program":
self.topbuttons.throbber.running('')
else:
self.tviews.right_textview.right_scrolled_window.set_sensitive(False)
self.topbuttons.throbber.running('start')
def merge_dasm_rightextview(self):
""" Timeout to make sure we join the spawned process for disassembling a binary. """
if not self.dasm_process:
# We don't need a process for this file.
return False
if not self.dasm_event.is_set():
# Keep retrying.
return True
# Once finished the load, let's fill the UI
#print "DEBUG: DASM finished, reading from queue!"
#print "Process state", self.dasm_process.is_alive()
# We read from the queue the disassembly.
if self.uicore.do_anal:
self.uicore.text_dasm, self.uicore.sections_lines = self.dasm_queue.get()
#print "DEBUG: Got a disassembly of", len(self.uicore.text_dasm), "bytes."
#print "DEBUG: Section lines created", self.uicore.sections_lines
if self.uicore.text_dasm:
self.tviews.update_dasm(self.uicore.text_dasm)
else:
self.tviews.update_dasm(self.uicore.fulldasm)
self.uicore.restore_va()
link_name = "0x%08x" % self.uicore.core.num.get('entry0')
if not link_name:
link_name = "0x%08x" % self.uicore.core.num.get('section..text')
if not link_name:
link_name = "0x%08x" % self.uicore.core.num.get('section.' + self.uicore.execsections[0][0])
self.tviews.update_graph(self, link_name)
self.tviews.search(self, link_name)
self.tviews.right_notebook.finish_dasm()
self.topbuttons.menu._finish_dasm()
self.tviews.right_textview.right_scrolled_window.set_sensitive(True)
self.topbuttons.throbber.running('')
self.tviews.console.add_message('Background analysis finished')
self.tviews.console.add_message('Happy reversing!')
# Insert random r2 quote
self.tviews.console.add_message(self.uicore.execute_command('fo'))
return False
def disable_all(self):
self.topbuttons.disable_all()
self.tviews.set_sensitive(False)
def enable_all(self):
self.topbuttons.set_sensitive(True)
self.topbuttons.enable_all()
self.tviews.set_sensitive(True)
def load_new_file(self, dialog, target):
self.window.hide()
self.disable_all()
self.target = target
if self.target:
# Just open the target if path is correct or an url
if not os.path.isfile(self.target):
print(common.console_color('Incorrect file argument: ' %
self.target, 'red'))
#sys.exit(1)
# Get dialog selected file, backend and options
self.backend = 'radare'
# Set user selected options
self.uicore.set_options(dialog)
self.uicore.backend = self.backend
self.uicore.clean_fullvars()
self.load_file(self.target)
ui.gtk2.common.repaint()
# Clean UI
self.topbuttons.menu.delete_view_menu()
self.tviews.left_buttons.remove_all()
self.tviews.right_notebook.remove_tabs()
self.tviews.left_treeview.remove_columns()
self.sbar.remove_all()
# Add new content
self.tviews.right_notebook.create_tabs()
self.topbuttons.menu.create_view_menu()
self.show_file_data()
self.uicore.core.progress_bar = None
# Hide left tree for plain or unsupported formats
if self.uicore.core.format == 'Hexdump':
self.tviews.left_scrolled_window.hide()
# Show UI
self.enable_all()
self.sbar.show_all()
self.tviews.right_notebook.show_all()
self.window.show()
dialog.destroy()
def quit(self, widget, event=None, data=None):
'''Main quit.
@param widget: who sent the signal.
@param event: the event that happened
@param data: optional data to receive.
'''
msg = ("Do you really want to quit?")
dlg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, msg)
dlg.set_default_response(gtk.RESPONSE_YES)
opt = dlg.run()
dlg.destroy()
if opt != gtk.RESPONSE_YES:
return True
gtk.main_quit()
if self.dasm_process:
self.dasm_process.terminate()
return True
def main(target, backend):
BokkenGTKClient(target, backend)
if glob.http:
glob.http.terminate()
|
8l/bokken
|
ui/main.py
|
Python
|
gpl-2.0
| 14,679
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
from oslo import i18n
from wormhole.common import gettextutils
DOMAIN = 'wormhole'
_translators = i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def translate(value, user_locale):
return i18n.translate(value, user_locale)
def get_available_languages():
return i18n.get_available_languages(DOMAIN)
# Parts in oslo-incubator are still using gettextutils._(), _LI(), etc., from
# oslo-incubator. Until these parts are changed to use oslo.i18n, Keystone
# needs to do something to allow them to work. One option is to continue to
# initialize gettextutils, but with the way that Nova has initialization
# spread out over mutltiple entry points, we'll monkey-patch
# gettextutils._(), _LI(), etc., to use our oslo.i18n versions.
# FIXME(dims): Remove the monkey-patching and update openstack-common.conf and
# do a sync with oslo-incubator to remove gettextutils once oslo-incubator
# isn't using oslo-incubator gettextutils any more.
gettextutils._ = _
gettextutils._LI = _LI
gettextutils._LW = _LW
gettextutils._LE = _LE
gettextutils._LC = _LC
|
HybridF5/hybrid-jacket
|
wormhole/i18n.py
|
Python
|
apache-2.0
| 2,126
|
"""
Discussion API internal interface
"""
from collections import defaultdict
from urllib import urlencode
from urlparse import urlunparse
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import Http404
from rest_framework.exceptions import PermissionDenied
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locator import CourseKey
from courseware.courses import get_course_with_access
from discussion_api.forms import ThreadActionsForm
from discussion_api.pagination import get_paginated_data
from discussion_api.serializers import CommentSerializer, ThreadSerializer, get_context
from django_comment_client.base.views import (
THREAD_CREATED_EVENT_NAME,
get_comment_created_event_data,
get_comment_created_event_name,
get_thread_created_event_data,
track_forum_event,
)
from django_comment_client.utils import get_accessible_discussion_modules
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from lms.lib.comment_client.utils import CommentClientRequestError
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_id, is_commentable_cohorted
def _get_course_or_404(course_key, user):
"""
Get the course descriptor, raising Http404 if the course is not found,
the user cannot access forums for the course, or the discussion tab is
disabled for the course.
"""
course = get_course_with_access(user, 'load', course_key, check_if_enrolled=True)
if not any([tab.type == 'discussion' for tab in course.tabs]):
raise Http404
return course
def _get_thread_and_context(request, thread_id, retrieve_kwargs=None):
"""
Retrieve the given thread and build a serializer context for it, returning
both. This function also enforces access control for the thread (checking
both the user's access to the course and to the thread's cohort if
applicable). Raises Http404 if the thread does not exist or the user cannot
access it.
"""
retrieve_kwargs = retrieve_kwargs or {}
try:
if "mark_as_read" not in retrieve_kwargs:
retrieve_kwargs["mark_as_read"] = False
cc_thread = Thread(id=thread_id).retrieve(**retrieve_kwargs)
course_key = CourseKey.from_string(cc_thread["course_id"])
course = _get_course_or_404(course_key, request.user)
context = get_context(course, request, cc_thread)
if (
not context["is_requester_privileged"] and
cc_thread["group_id"] and
is_commentable_cohorted(course.id, cc_thread["commentable_id"])
):
requester_cohort = get_cohort_id(request.user, course.id)
if requester_cohort is not None and cc_thread["group_id"] != requester_cohort:
raise Http404
return cc_thread, context
except CommentClientRequestError:
# params are validated at a higher level, so the only possible request
# error is if the thread doesn't exist
raise Http404
def _get_comment_and_context(request, comment_id):
"""
Retrieve the given comment and build a serializer context for it, returning
both. This function also enforces access control for the comment (checking
both the user's access to the course and to the comment's thread's cohort if
applicable). Raises Http404 if the comment does not exist or the user cannot
access it.
"""
try:
cc_comment = Comment(id=comment_id).retrieve()
_, context = _get_thread_and_context(
request,
cc_comment["thread_id"],
cc_comment["parent_id"]
)
return cc_comment, context
except CommentClientRequestError:
raise Http404
def _is_user_author_or_privileged(cc_content, context):
"""
Check if the user is the author of a content object or a privileged user.
Returns:
Boolean
"""
return (
context["is_requester_privileged"] or
context["cc_requester"]["id"] == cc_content["user_id"]
)
def get_thread_list_url(request, course_key, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = reverse("thread-list")
query_list = [("course_id", unicode(course_key))] + [("topic_id", topic_id) for topic_id in topic_id_list]
return request.build_absolute_uri(urlunparse(("", "", path, "", urlencode(query_list), "")))
def get_course_topics(request, course_key):
"""
Return the course topic listing for the given course and user.
Parameters:
course_key: The key of the course to get topics for
user: The requesting user, for access control
Returns:
A course topic listing dictionary; see discussion_api.views.CourseTopicViews
for more detail.
"""
def get_module_sort_key(module):
"""
Get the sort key for the module (falling back to the discussion_target
setting if absent)
"""
return module.sort_key or module.discussion_target
course = _get_course_or_404(course_key, request.user)
discussion_modules = get_accessible_discussion_modules(course, request.user)
modules_by_category = defaultdict(list)
for module in discussion_modules:
modules_by_category[module.discussion_category].append(module)
def get_sorted_modules(category):
"""Returns key sorted modules by category"""
return sorted(modules_by_category[category], key=get_module_sort_key)
courseware_topics = [
{
"id": None,
"name": category,
"thread_list_url": get_thread_list_url(
request,
course_key,
[item.discussion_id for item in get_sorted_modules(category)]
),
"children": [
{
"id": module.discussion_id,
"name": module.discussion_target,
"thread_list_url": get_thread_list_url(request, course_key, [module.discussion_id]),
"children": [],
}
for module in get_sorted_modules(category)
],
}
for category in sorted(modules_by_category.keys())
]
non_courseware_topics = [
{
"id": entry["id"],
"name": name,
"thread_list_url": get_thread_list_url(request, course_key, [entry["id"]]),
"children": [],
}
for name, entry in sorted(
course.discussion_topics.items(),
key=lambda item: item[1].get("sort_key", item[0])
)
]
return {
"courseware_topics": courseware_topics,
"non_courseware_topics": non_courseware_topics,
}
def get_thread_list(request, course_key, page, page_size, topic_id_list=None):
"""
Return the list of all discussion threads pertaining to the given course
Parameters:
request: The django request objects used for build_absolute_uri
course_key: The key of the course to get discussion threads for
page: The page number (1-indexed) to retrieve
page_size: The number of threads to retrieve per page
topic_id_list: The list of topic_ids to get the discussion threads for
Returns:
A paginated result containing a list of threads; see
discussion_api.views.ThreadViewSet for more detail.
"""
course = _get_course_or_404(course_key, request.user)
context = get_context(course, request)
topic_ids_csv = ",".join(topic_id_list) if topic_id_list else None
threads, result_page, num_pages, _ = Thread.search({
"course_id": unicode(course.id),
"group_id": (
None if context["is_requester_privileged"] else
get_cohort_id(request.user, course.id)
),
"sort_key": "date",
"sort_order": "desc",
"page": page,
"per_page": page_size,
"commentable_ids": topic_ids_csv,
})
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a 404 in that case
if result_page != page:
raise Http404
results = [ThreadSerializer(thread, context=context).data for thread in threads]
return get_paginated_data(request, results, page, num_pages)
def get_comment_list(request, thread_id, endorsed, page, page_size):
"""
Return the list of comments in the given thread.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id of the thread to get comments for.
endorsed: Boolean indicating whether to get endorsed or non-endorsed
comments (or None for all comments). Must be None for a discussion
thread and non-None for a question thread.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
Returns:
A paginated result containing a list of comments; see
discussion_api.views.CommentViewSet for more detail.
"""
response_skip = page_size * (page - 1)
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={
"recursive": True,
"user_id": request.user.id,
"mark_as_read": True,
"response_skip": response_skip,
"response_limit": page_size,
}
)
# Responses to discussion threads cannot be separated by endorsed, but
# responses to question threads must be separated by endorsed due to the
# existing comments service interface
if cc_thread["thread_type"] == "question":
if endorsed is None:
raise ValidationError({"endorsed": ["This field is required for question threads."]})
elif endorsed:
# CS does not apply resp_skip and resp_limit to endorsed responses
# of a question post
responses = cc_thread["endorsed_responses"][response_skip:(response_skip + page_size)]
resp_total = len(cc_thread["endorsed_responses"])
else:
responses = cc_thread["non_endorsed_responses"]
resp_total = cc_thread["non_endorsed_resp_total"]
else:
if endorsed is not None:
raise ValidationError(
{"endorsed": ["This field may not be specified for discussion threads."]}
)
responses = cc_thread["children"]
resp_total = cc_thread["resp_total"]
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a 404 in that case
if not responses and page != 1:
raise Http404
num_pages = (resp_total + page_size - 1) / page_size if resp_total else 1
results = [CommentSerializer(response, context=context).data for response in responses]
return get_paginated_data(request, results, page, num_pages)
def _do_extra_thread_actions(api_thread, cc_thread, request_fields, actions_form, context):
"""
Perform any necessary additional actions related to thread creation or
update that require a separate comments service request.
"""
for field, form_value in actions_form.cleaned_data.items():
if field in request_fields and form_value != api_thread[field]:
api_thread[field] = form_value
if field == "following":
if form_value:
context["cc_requester"].follow(cc_thread)
else:
context["cc_requester"].unfollow(cc_thread)
else:
assert field == "voted"
if form_value:
context["cc_requester"].vote(cc_thread, "up")
else:
context["cc_requester"].unvote(cc_thread)
def create_thread(request, thread_data):
"""
Create a thread.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_data: The data for the created thread.
Returns:
The created thread; see discussion_api.views.ThreadViewSet for more
detail.
"""
course_id = thread_data.get("course_id")
if not course_id:
raise ValidationError({"course_id": ["This field is required."]})
try:
course_key = CourseKey.from_string(course_id)
course = _get_course_or_404(course_key, request.user)
except (Http404, InvalidKeyError):
raise ValidationError({"course_id": ["Invalid value."]})
context = get_context(course, request)
serializer = ThreadSerializer(data=thread_data, context=context)
actions_form = ThreadActionsForm(thread_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
serializer.save()
cc_thread = serializer.object
api_thread = serializer.data
_do_extra_thread_actions(api_thread, cc_thread, thread_data.keys(), actions_form, context)
track_forum_event(
request,
THREAD_CREATED_EVENT_NAME,
course,
cc_thread,
get_thread_created_event_data(cc_thread, followed=actions_form.cleaned_data["following"])
)
return api_thread
def create_comment(request, comment_data):
"""
Create a comment.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_data: The data for the created comment.
Returns:
The created comment; see discussion_api.views.CommentViewSet for more
detail.
"""
thread_id = comment_data.get("thread_id")
if not thread_id:
raise ValidationError({"thread_id": ["This field is required."]})
try:
cc_thread, context = _get_thread_and_context(request, thread_id)
except Http404:
raise ValidationError({"thread_id": ["Invalid value."]})
serializer = CommentSerializer(data=comment_data, context=context)
if not serializer.is_valid():
raise ValidationError(serializer.errors)
serializer.save()
cc_comment = serializer.object
track_forum_event(
request,
get_comment_created_event_name(cc_comment),
context["course"],
cc_comment,
get_comment_created_event_data(cc_comment, cc_thread["commentable_id"], followed=False)
)
return serializer.data
_THREAD_EDITABLE_BY_ANY = {"following", "voted"}
_THREAD_EDITABLE_BY_AUTHOR = {"topic_id", "type", "title", "raw_body"} | _THREAD_EDITABLE_BY_ANY
def _get_thread_editable_fields(cc_thread, context):
"""
Get the list of editable fields for the given thread in the given context
"""
if _is_user_author_or_privileged(cc_thread, context):
return _THREAD_EDITABLE_BY_AUTHOR
else:
return _THREAD_EDITABLE_BY_ANY
def update_thread(request, thread_id, update_data):
"""
Update a thread.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to update.
update_data: The data to update in the thread.
Returns:
The updated thread; see discussion_api.views.ThreadViewSet for more
detail.
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
editable_fields = _get_thread_editable_fields(cc_thread, context)
non_editable_errors = {
field: ["This field is not editable."]
for field in update_data.keys()
if field not in editable_fields
}
if non_editable_errors:
raise ValidationError(non_editable_errors)
serializer = ThreadSerializer(cc_thread, data=update_data, partial=True, context=context)
actions_form = ThreadActionsForm(update_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
# Only save thread object if some of the edited fields are in the thread data, not extra actions
if set(update_data) - set(actions_form.fields):
serializer.save()
api_thread = serializer.data
_do_extra_thread_actions(api_thread, cc_thread, update_data.keys(), actions_form, context)
return api_thread
def update_comment(request, comment_id, update_data):
"""
Update a comment.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id for the comment to update.
update_data: The data to update in the comment.
Returns:
The updated comment; see discussion_api.views.CommentViewSet for more
detail.
Raises:
Http404: if the comment does not exist or is not accessible to the
requesting user
PermissionDenied: if the comment is accessible to but not editable by
the requesting user
ValidationError: if there is an error applying the update (e.g. raw_body
is empty or thread_id is included)
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
if not _is_user_author_or_privileged(cc_comment, context):
raise PermissionDenied()
serializer = CommentSerializer(cc_comment, data=update_data, partial=True, context=context)
if not serializer.is_valid():
raise ValidationError(serializer.errors)
# Only save comment object if the comment is actually modified
if update_data:
serializer.save()
return serializer.data
def delete_thread(request, thread_id):
"""
Delete a thread.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
if _is_user_author_or_privileged(cc_thread, context):
cc_thread.delete()
else:
raise PermissionDenied
def delete_comment(request, comment_id):
"""
Delete a comment.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id of the comment to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
if _is_user_author_or_privileged(cc_comment, context):
cc_comment.delete()
else:
raise PermissionDenied
|
shubhdev/openedx
|
lms/djangoapps/discussion_api/api.py
|
Python
|
agpl-3.0
| 19,008
|
import json
import docker
import sys
import subprocess
import re
def run(cmd, returncode=False, echo=True, **kargs):
""" Executes a shell command and prints out STDOUT / STDERROR, exits on failure by default """
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, **kargs)
if echo:
print "$ %s" % cmd
while True:
out = process.stdout.read(1)
if out == '' and process.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
if returncode:
return process.returncode
else:
if process.returncode != 0:
print "Something went wrong! returncode=%s" % process.returncode
sys.exit(1)
dockConfig = json.load(open('/vagrant/dock.json'))
client = docker.Client(base_url='http://127.0.0.1:5555', version="1.6")
dockName = sys.argv[1]
domainName = dockConfig['docks'][dockName]['domain']
activeContainers = []
for containerInfo in client.containers():
inspectInfo = client.inspect_container(containerInfo['Id'])
activeContainers.append({ 'name': containerInfo['Image'].split(':')[0] , 'ip': inspectInfo['NetworkSettings']['IPAddress'] })
content = open('/vagrant/scripts/dns.tpl').read()
# Compile list of A entries
aEntries = ''
for container in activeContainers:
aEntries = aEntries + '%(name)s IN A %(ip)s\n' % container
replace = {'hostname': dockConfig['docks'][dockName]['domain'], 'aEntries': aEntries}
f = open('/vagrant/temp','w')
f.write(content % replace)
f.close()
# Copy new zone file into bind directory, set ownership / privileges correctly
run('sudo cp /vagrant/temp /etc/bind/%s' % domainName)
run('sudo chown bind:bind /etc/bind/%s' % domainName)
# Check to see if we have the zone included
zoneFile = '/etc/bind/named.conf.default-zones';
zoneContents = open(zoneFile).read()
match = re.search(domainName, zoneContents, re.IGNORECASE)
if not match:
zoneContents = zoneContents + 'zone "%s" { type master; file "/etc/bind/%s"; };\n' % (domainName, domainName)
f = open('/vagrant/temp','w')
f.write(zoneContents)
f.close()
run('sudo cp /vagrant/temp %s' % zoneFile)
run('sudo chown bind:bind %s' % zoneFile)
run('sudo service bind9 reload')
|
vormund/web-stack
|
scripts/dns.py
|
Python
|
mit
| 2,304
|
from setuptools import setup, find_packages
requirements = ["pytz"]
setup(
name = "cubes-mixpanel",
version = '0.1',
install_requires = requirements,
packages = find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data = {
},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Scientific/Engineering',
'Topic :: Utilities'
],
entry_points={
},
test_suite = "tests",
# metadata for upload to PyPI
author = "Stefan Urbanek",
author_email = "stefan.urbanek@gmail.com",
description = "Mixpanel Backend for Cubes Python OLAP",
license = "MIT",
keywords = "olap multidimensional data analysis",
url = "http://cubes.databrewery.org"
)
|
DataBrewery/cubes-mixpanel
|
setup.py
|
Python
|
mit
| 1,039
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# (c) Con Radchenko mailto:lankier@gmail.com
#
# $Id: fb2desc.py,v 1.10 2008/09/15 04:18:45 con Exp con $
#
import sys, os
import locale
import getopt
import codecs
import zipfile
from cStringIO import StringIO
import xml.sax
import shutil
import traceback
def get_filename(authors_list, sequence_name, sequence_number, title):
'''Форматы:
1 - "полные имена авторов, разделенные запятой - название (серия #номер)"
2 - тоже, но преобразованное в транслит и с заменой пробелов
3 - "фамилии авторов, разделенные запятой - название"
4 - тоже, но преобразованное в транслит и с заменой пробелов
5 - "первая буква автора в нижнем регистре/авторы, разделенные запятой, в нижнем регистре/авторы, разделенные запятой - название (серия #номер)"
6 - тоже, но преобразованное в транслит и с заменой пробелов
'''
format = options['fn-format']
out = []
authors = []
full_authors = []
for a in authors_list:
if a[0]:
authors.append(a[0])
fa = ' '.join(i for i in a if i)
if fa:
full_authors.append(fa)
authors = ', '.join(authors)
if not authors:
authors = 'unknown'
full_authors = ', '.join(full_authors)
if not full_authors:
full_authors = 'unknown'
if not title:
title = 'unknown'
seq = ''
if sequence_name:
if sequence_number:
seq = '(%s #%s)' % (sequence_name, sequence_number)
else:
seq = '(%s)' % sequence_name
if format == 3:
out.append(authors)
out.append('-')
out.append(title)
out = ' '.join(out)
else:
out.append(full_authors)
out.append('-')
out.append(title)
if seq:
out.append(seq)
out = ' '.join(out)
if format in (2, 4, 6):
out = translit(out)
full_authors = translit(full_authors)
#out = out.replace('/', '%').replace('\0', '').replace('?', '')
for c in '|\\?*<":>+[]/': # invalid chars in VFAT
out = out.replace(c, '')
if format in (4, 5):
full_authors = full_authors.replace(c, '')
fn_max = 240
if format in (5, 6):
fl = full_authors[0]
if not fl.isalpha():
fl = full_authors[1] # FIXME
out = os.path.join(
fl.lower().encode(options['charset']),
full_authors.lower().encode(options['charset'])[:fn_max],
out.encode(options['charset'])[:fn_max])
else:
out = out.encode(options['charset'])[:fn_max]
return out
##----------------------------------------------------------------------
options = {
'format' : '',
'charset' : 'utf-8',
'zip-charset' : 'cp866',
'elements' : [],
'replace' : False,
'rename' : False,
'slink' : False,
'copy' : False,
'fn-format' : 2,
'show-cover' : False,
'show-content' : False,
'show-tree' : False,
'image-viewer' : 'xv',
'quiet' : False,
'dest-dir' : None,
#
'suffix' : None,
}
##----------------------------------------------------------------------
class StopParsing(Exception):
pass
##----------------------------------------------------------------------
# u'\u2013' -> '--'
# u'\u2014' -> '---'
# u'\xa0' -> неразрывный пробел
# u'\u2026' -> dots...
# u'\xab' -> '<<'
# u'\xbb' -> '>>'
# u'\u201c' -> ``
# u'\u201d' -> ''
# u'\u201e' -> ,,
def replace_chars(s):
return (s
.replace(u'\u2013', u'--')
.replace(u'\u2014', u'---')
.replace(u'\xa0' , u' ')
.replace(u'\u2026', u'...')
.replace(u'\xab' , u'<<')
.replace(u'\xbb' , u'>>')
.replace(u'\u201c', u'``')
.replace(u'\u201d', u'\'\'')
.replace(u'\u201e', u',,')
)
def translit(s):
trans_tbl = {
u'\u0430': 'a', #а
u'\u0431': 'b', #б
u'\u0432': 'v', #в
u'\u0433': 'g', #г
u'\u0434': 'd', #д
u'\u0435': 'e', #е
u'\u0451': 'yo', #ё
u'\u0436': 'zh', #ж
u'\u0437': 'z', #з
u'\u0438': 'i', #и
u'\u0439': 'y', #й
u'\u043a': 'k', #к
u'\u043b': 'l', #л
u'\u043c': 'm', #м
u'\u043d': 'n', #н
u'\u043e': 'o', #о
u'\u043f': 'p', #п
u'\u0440': 'r', #р
u'\u0441': 's', #с
u'\u0442': 't', #т
u'\u0443': 'u', #у
u'\u0444': 'f', #ф
u'\u0445': 'h', #х
u'\u0446': 'c', #ц
u'\u0447': 'ch', #ч
u'\u0448': 'sh', #ш
u'\u0449': 'sh', #щ
u'\u044a': '', #ъ
u'\u044b': 'y', #ы
u'\u044c': '', #ь
u'\u044d': 'e', #э
u'\u044e': 'ju', #ю
u'\u044f': 'ya', #я
}
alnum = 'abcdefghijklmnopqrstuvwxyz0123456789'
out = []
out_s = ''
for i in s.lower():
if i.isalnum():
if i in trans_tbl:
out_s += trans_tbl[i]
elif i in alnum:
out_s += i
else:
if out_s: out.append(out_s)
out_s = ''
if out_s: out.append(out_s)
return '_'.join(out)
def wrap_line(s):
if len(s) <= 70:
return u' '+s
ss = u' '
sl = []
for word in s.split():
if len(ss+word) > 72:
sl.append(ss)
ss = word
elif ss:
ss += u' ' + word
else:
ss = word
sl.append(ss)
return '\n'.join(sl)
##----------------------------------------------------------------------
def show_cover(filename, data, content_type):
if not data:
print >> sys.stderr, '%s: sorry, cover not found' % filename
return
import base64, tempfile
data = base64.decodestring(data)
if content_type and content_type.startswith('image/'):
suffix = '.'+content_type[6:]
else:
suffix = ''
tmp_id, tmp_file = tempfile.mkstemp(suffix)
try:
open(tmp_file, 'w').write(data)
os.system(options['image-viewer']+' '+tmp_file)
finally:
os.close(tmp_id)
os.remove(tmp_file)
def show_content(filename, titles):
for secttion_level, data in titles:
if options['replace']: data = replace_chars(data)
print ' '*secttion_level+data.encode(options['charset'], 'replace')
print
def rename(filename, zipfilename, desc, data):
to = pretty_format(filename, zipfilename, len(data), desc, 'filename')
##filename = os.path.abspath(filename)
to += options['suffix']
if options['dest-dir']:
to = os.path.join(options['dest-dir'], to)
to = os.path.abspath(to)
if os.path.exists(to):
print >> sys.stderr, 'file %s already exists' % to
return
dir_name = os.path.dirname(to)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if options['slink']:
os.symlink(filename, to)
return
elif options['copy']:
shutil.copy(filename, to)
return
os.rename(filename, to)
def pretty_format(filename, zipfilename, filesize, desc, format='pretty'):
ann = []
title = ''
authors_list = []
# [last-name, first-name, middle-name, nick-name]
author_name = [None, None, None, None]
genres = []
sequence_name = ''
sequence_number = ''
for elem, data in desc:
## data = data.strip()
## if not data:
## continue
if elem.startswith('/description/title-info/annotation/'):
if not elem.endswith('href'):
ann.append(data) #wrap_line(data))
if elem.endswith('/p'):
ann.append('\n')
elif elem == '/description/title-info/book-title':
title = data
elif elem == '/description/title-info/author/first-name':
author_name[1] = data
elif elem == '/description/title-info/author/middle-name':
author_name[2] = data
elif elem == '/description/title-info/author/last-name':
author_name[0] = data
authors_list.append(author_name)
author_name = [None, None, None, None]
elif elem == '/description/title-info/author/nick-name':
#author_name[3] = data
if not author_name[0]:
author_name[0] = data
else:
author_name[3] = data
authors_list.append(author_name)
author_name = [None, None, None, None]
elif elem == '/description/title-info/genre':
genres.append(data)
elif elem == '/description/title-info/sequence/name':
sequence_name = data
elif elem == '/description/title-info/sequence/number':
sequence_number = data
##authors_list.sort()
authors = u', '.join(' '.join(n for n in a if n) for a in authors_list if a)
annotation = []
ann = ''.join(ann).split('\n')
for s in ann:
annotation.append(wrap_line(s))
annotation = '\n'.join(annotation)
if format == 'single':
if sequence_name and sequence_number:
out = u'%s - %s (%s %s)' % (authors, title,
sequence_name, sequence_number)
elif sequence_name:
out = u'%s - %s (%s)' % (authors, title, sequence_name)
else:
out = u'%s - %s' % (authors, title)
#out = '%s: %s' % (filename, out)
if options['replace']: out = replace_chars(out)
return out.encode(options['charset'], 'replace')
elif format == 'pretty':
out = u'''\
File : %s
''' % filename
if zipfilename:
out += u'''\
Zip Filename : %s
''' % zipfilename
out += u'''\
Size : %d kb
''' % int(filesize/1024)
out += u'''\
Author(s) : %s
Title : %s
Genres : %s
''' % (authors, title, u', '.join(genres))
if sequence_name:
if sequence_number:
sequence = u'%s (%s)' % (sequence_name, sequence_number)
else:
sequence = sequence_name
out += u'''\
Sequence : %s
''' % sequence
if annotation:
out += u'''\
Annotation :
%s
''' % annotation
if options['replace']: out = replace_chars(out)
return out.encode(options['charset'], 'replace')
elif format == 'filename':
return get_filename(authors_list, sequence_name, sequence_number, title)
def raw_format(filename, zipfilename, desc):
if options['quiet']:
out = u''
else:
out = u'filename: %s\n' % filename
if zipfilename:
out += u'zipfilename: %s\n' % zipfilename
for elem, data in desc:
if not data:
continue
t = filter(elem.startswith, options['elements'])
#t = [x for x in options['elements'] if elem.startswith(x)]
if options['elements'] == [] or t:
out += u'%s: %s\n' % (elem, data)
if options['replace']: out = replace_chars(out)
return out.encode(options['charset'], 'replace')
##----------------------------------------------------------------------
class ContentHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.elem_stack = []
self.is_desc = False
self.is_cover = False
self.cur_data = ''
self.desc = []
self.cover = ''
self.cover_name = ''
self.cover_content_type = ''
self.is_title = False
self.cur_title = []
self.titles = []
self.section_level = 0
self.tree = []
def startElement(self, name, attrs):
if name == 'description': self.is_desc = True
if name == 'section': self.section_level += 1
if self.is_desc or options['show-tree']:
self.elem_stack.append(name)
elem = '/'+'/'.join(self.elem_stack)
if options['show-tree']:
if self.tree and self.tree[-1][0] == elem:
#print self.tree[-1]
self.tree[-1][1] += 1
else:
#if not elem.endswith('/p') and not elem.endswith('/v'):
self.tree.append([elem, 1])
for atr in attrs.getNames():
#t = (elem+u'/'+atr, attrs.getValue(atr))
self.desc.append((elem+u'/'+atr, attrs.getValue(atr)))
if elem == '/description/title-info/coverpage/image' and \
atr.endswith('href'):
self.cover_name = attrs.getValue(atr)[1:]
self.is_cover = False
if options['show-cover'] and name == 'binary':
content_type = ''
for atr in attrs.getNames():
if atr == 'id' and attrs.getValue(atr) == self.cover_name:
self.is_cover = True
elif atr == 'content-type':
content_type = attrs.getValue(atr)
if self.is_cover and content_type:
self.cover_content_type = content_type
if options['show-content'] and name == 'title':
self.is_title = True
self.cur_title = []
def endElement(self, name):
if self.is_desc and self.cur_data:
elem_name = '/'+'/'.join(self.elem_stack)
self.desc.append((elem_name, self.cur_data.strip()))
self.cur_data = ''
if self.is_desc or options['show-tree']:
del self.elem_stack[-1]
if name == 'description':
if not options['show-cover'] \
and not options['show-content'] \
and not options['show-tree']:
raise StopParsing
else:
self.is_desc = False
if options['show-content'] and name == 'title':
self.is_title = False
self.titles.append((self.section_level, ' '.join(self.cur_title)))
self.cur_data = ''
if name == 'section': self.section_level -= 1
def characters(self, data):
if self.is_desc:
#data = data.strip()
data = data.replace('\n', ' ')
if self.cur_data:
self.cur_data += data
else:
self.cur_data = data
if options['show-cover'] and self.is_cover:
self.cover += data
if options['show-content'] and self.is_title:
data = data.strip()
if data: self.cur_title.append(data)
class ErrorHandler(xml.sax.handler.ErrorHandler): pass
class EntityResolver(xml.sax.handler.EntityResolver): pass
class DTDHandler(xml.sax.handler.DTDHandler): pass
##----------------------------------------------------------------------
def fb2parse(filename, zipfilename, data):
if not data.startswith('<?xml') and not data.startswith('\xef\xbb\xbf<?xml'):
print >> sys.stderr, \
'Warning: file %s is not an XML file. Skipped.' % filename
print repr(data[:5])
#shutil.copy(filename, '/home/con/t/')
return
chandler = ContentHandler()
input_source = xml.sax.InputSource()
input_source.setByteStream(StringIO(data))
xml_reader = xml.sax.make_parser()
xml_reader.setContentHandler(chandler)
xml_reader.setErrorHandler(ErrorHandler())
xml_reader.setEntityResolver(EntityResolver())
xml_reader.setDTDHandler(DTDHandler())
try:
xml_reader.parse(input_source)
except StopParsing:
pass
if options['rename']:
rename(filename, zipfilename, chandler.desc, data)
return
if options['show-tree']:
for e, n in chandler.tree:
if n > 1:
print '%s [%d]' % (e, n)
else:
print e
return
if options['format'] == 'pretty':
print pretty_format(filename, zipfilename, len(data), chandler.desc, 'pretty')
elif options['format'] == 'filename':
print pretty_format(filename, zipfilename, len(data), chandler.desc, 'filename')
elif options['format'] == 'single':
print pretty_format(filename, zipfilename, len(data), chandler.desc, 'single')
elif options['format'] == '' \
and not options['show-cover'] \
and not options['show-content']:
print raw_format(filename, zipfilename, chandler.desc)
if options['show-cover'] or options['show-content']:
if options['format'] == 'raw':
print raw_format(filename, zipfilename, chandler.desc)
if options['show-content']:
show_content(filename, chandler.titles)
if options['show-cover']:
show_cover(filename, chandler.cover, chandler.cover_content_type)
##----------------------------------------------------------------------
def main():
#locale.setlocale(locale.LC_ALL, '')
default_charset = locale.getdefaultlocale()[1]
if default_charset:
options['charset'] = default_charset
prog_name = os.path.basename(sys.argv[0])
try:
optlist, args = getopt.getopt(sys.argv[1:], 'c:Ce:f:hlopqrRStvwz:',
['raw', 'pretty',
'single',
'output=',
'rename', 'copy', 'slink',
'fn-format=',
'cover', 'contents', 'tree',
'charset=', 'zip-charset=',
'elements=',
'dest-dir=',
'image-viewer=',
'replace', 'quiet', 'help'])
except getopt.GetoptError, err:
sys.exit('%s: %s\ntry %s --help for more information'
% (prog_name, err, prog_name))
help_msg = '''fb2desc -- show description of FB2 file(s)
Usage: %s [options] files|dir
-w --raw-format output in raw format (default)
-p --pretty output in pretty format
-l --single output in single format
--output format output in format (raw, pretty, single, filename)
-o --contents show contents
-t --tree
-v --cover show cover
-c --charset <charset> specify output charset (default: %s)
-z --zip-charset <charset>
-r --replace replace any chars
-e --elements <elements> show only this elements (comma separeted)
-R --rename rename mode
-S --slink create softlinks
-C --copy copy files
--fn-format <format> rename pattern (1, 2, 3, 4, 5, 6)
--dest-dir
--image-viewer
-q --quiet suppress output filename
-h --help display this help''' \
% (prog_name, default_charset)
for i in optlist:
if i[0] == '--help' or i[0] == '-h':
print help_msg
sys.exit()
elif i[0] in ('--charset', '-c'):
charset = i[1]
try:
codecs.lookup(charset)
except LookupError, err:
sys.exit('%s: %s' % (prog_name, err))
options['charset'] = charset
elif i[0] in ('-z', '--zip-charset'):
charset = i[1]
try:
codecs.lookup(charset)
except LookupError, err:
sys.exit('%s: %s' % (prog_name, err))
options['zip-charset'] = charset
elif i[0] == '--elements' or i[0] == '-e':
options['elements'] = i[1].split(',')
elif i[0] == '--output':
f = i[1]
if f not in ('raw', 'pretty', 'single', 'filename'):
sys.exit('''bad option for --output
must be raw, pretty, single, filename
''')
options['format'] = f
elif i[0] == '--raw' or i[0] == '-w':
options['format'] = 'raw'
elif i[0] == '--single' or i[0] == '-l':
options['format'] = 'single'
elif i[0] == '--pretty-format' or i[0] == '-p':
options['format'] = 'pretty'
elif i[0] == '--replace' or i[0] == '-r':
options['replace'] = True
elif i[0] == '--rename' or i[0] == '-R':
options['rename'] = True
elif i[0] == '--slink' or i[0] == '-S':
options['rename'] = True
options['slink'] = True
elif i[0] == '--copy' or i[0] == '-C':
options['rename'] = True
options['copy'] = True
elif i[0] in ('--fn-format', '-f'):
f = i[1]
if f not in ('1', '2', '3', '4', '5', '6'):
sys.exit('''bad option for --fn-format
must be 1, 2, 3, 4, 5, 6
''')
options['fn-format'] = int(f)
elif i[0] == '--contents' or i[0] == '-o':
options['show-content'] = True
elif i[0] == '--cover' or i[0] == '-v':
options['show-cover'] = True
elif i[0] == '--tree' or i[0] == '-t':
options['show-tree'] = True
elif i[0] == '--quiet' or i[0] == '-q':
options['quiet'] = True
elif i[0] == '--dest-dir':
options['dest-dir'] = i[1]
elif i[0] == '--image-viewer':
options['image-viewer'] = i[1]
if len(args) == 0:
sys.exit('%s: missing filename\ntry %s --help for more information'
% (prog_name, prog_name))
in_files = []
for fn in args:
if os.path.isdir(fn):
for root, dirs, files in os.walk(fn):
for f in files:
in_files.append(os.path.join(root, f))
else:
in_files.append(fn)
#print in_files
#return
for raw_filename in in_files:
try:
filename = os.path.abspath(raw_filename)
filename = unicode(filename, options['charset'])
except UnicodeDecodeError, err:
#raise
#print >> sys.stderr, 'WARNING: decode filename:', str(err)
#continue
filename = '' # fixme
pass
if zipfile.is_zipfile(raw_filename):
options['suffix'] = '.fb2.zip'
zf = zipfile.ZipFile(raw_filename)
for zip_filename in zf.namelist():
data = zf.read(zip_filename)
try:
##zip_filename = unicode(zip_filename, options['charset'])
zip_filename = unicode(zip_filename, options['zip-charset'])
except UnicodeDecodeError, err:
print >> sys.stderr, 'WARNING: decode zip filename:', str(err)
zip_filename = ''
try:
fb2parse(filename, zip_filename, data)
except:
traceback.print_exc()
##shutil.copy(raw_filename, '/home/con/t/')
else:
if options['rename']:
continue
else:
options['suffix'] = '.fb2'
data = None
try:
data = open(raw_filename).read()
except IOError as e:
data = open(filename).read()
if data.startswith('BZh'):
import bz2
options['suffix'] = '.fb2.bz2'
data = bz2.decompress(data)
elif data.startswith('\x1f\x8b'):
import gzip
options['suffix'] = '.fb2.gz'
data = gzip.GzipFile(fileobj=StringIO(data)).read()
try:
fb2parse(filename, '', data)
except:
traceback.print_exc()
if __name__ == '__main__':
main()
|
vasnake/fb2tools
|
fb2tools/fb2desc.py
|
Python
|
gpl-3.0
| 24,174
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from typing import List, Dict, Set
from base.ddd.utils.business_validator import MultipleBusinessExceptions
from ddd.logic.encodage_des_notes.shared_kernel.domain.model.encoder_notes_rapport import EncoderNotesRapport
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.i_inscription_examen import IInscriptionExamenTranslator
from ddd.logic.encodage_des_notes.shared_kernel.dtos import PeriodeEncodageNotesDTO
from ddd.logic.encodage_des_notes.shared_kernel.repository.i_encoder_notes_rapport import IEncoderNotesRapportRepository
from ddd.logic.encodage_des_notes.soumission.builder.note_etudiant_builder import NoteEtudiantBuilder
from ddd.logic.encodage_des_notes.soumission.builder.note_etudiant_identity_builder import NoteEtudiantIdentityBuilder
from ddd.logic.encodage_des_notes.soumission.commands import EncoderNotesEtudiantCommand
from ddd.logic.encodage_des_notes.soumission.domain.model.note_etudiant import IdentiteNoteEtudiant, NoteEtudiant
from ddd.logic.encodage_des_notes.soumission.domain.service.i_historiser_notes import IHistoriserNotesService
from ddd.logic.encodage_des_notes.soumission.domain.validator.exceptions import \
EncoderNotesEtudiantEnLotLigneBusinessExceptions, EtudiantNonInscritAExamenException
from ddd.logic.encodage_des_notes.soumission.dtos import DesinscriptionExamenDTO
from ddd.logic.encodage_des_notes.soumission.repository.i_note_etudiant import INoteEtudiantRepository
from osis_common.ddd import interface
class EncoderNotesEtudiantEnLot(interface.DomainService):
@classmethod
def execute(
cls,
cmd: 'EncoderNotesEtudiantCommand',
note_etudiant_repo: 'INoteEtudiantRepository',
periode_soumission: 'PeriodeEncodageNotesDTO',
historiser_note_service: 'IHistoriserNotesService',
inscription_examen_translator: 'IInscriptionExamenTranslator',
rapport: 'EncoderNotesRapport',
rapport_repository: 'IEncoderNotesRapportRepository'
) -> List['IdentiteNoteEtudiant']:
identite_builder = NoteEtudiantIdentityBuilder()
identites_notes_a_encoder = [
identite_builder.build_from_encoder_note_command(cmd, cmd_note)
for cmd_note in cmd.notes
]
notes_a_encoder = note_etudiant_repo.search(entity_ids=identites_notes_a_encoder)
notes_par_identite = {n.entity_id: n for n in notes_a_encoder} # type: Dict[IdentiteNoteEtudiant, NoteEtudiant]
desinscriptions = inscription_examen_translator.search_desinscrits_pour_plusieurs_unites_enseignement(
codes_unites_enseignement={n.code_unite_enseignement for n in notes_a_encoder},
numero_session=periode_soumission.session_concernee,
annee=periode_soumission.annee_concernee
)
exceptions = []
notes_a_persister = []
for note_encodee_cmd in cmd.notes:
identite = identite_builder.build_from_encoder_note_command(cmd, note_encodee_cmd)
note_a_modifier = notes_par_identite.get(identite)
if note_a_modifier:
try:
_verifier_etudiant_est_desinscrit(identite, desinscriptions)
nouvelle_note = NoteEtudiantBuilder().build_from_ancienne_note(
ancienne_note=note_a_modifier,
email_encode=note_encodee_cmd.email_etudiant,
nouvelle_note=note_encodee_cmd.note,
)
if note_a_modifier.note != nouvelle_note.note:
notes_a_persister.append(nouvelle_note)
except MultipleBusinessExceptions as e:
for business_exception in e.exceptions:
rapport.add_note_non_enregistree(
noma=note_a_modifier.noma,
numero_session=note_a_modifier.numero_session,
code_unite_enseignement=note_a_modifier.code_unite_enseignement,
annee_academique=note_a_modifier.annee,
cause=str(business_exception.message)
)
exceptions.append(
EncoderNotesEtudiantEnLotLigneBusinessExceptions(
note_id=identite,
exception=business_exception,
)
)
for note in notes_a_persister:
rapport.add_note_enregistree(
noma=note.noma,
numero_session=note.numero_session,
code_unite_enseignement=note.code_unite_enseignement,
annee_academique=note.annee,
)
note_etudiant_repo.save(note)
if notes_a_persister:
historiser_note_service.historiser_encodage(cmd.matricule_fgs_enseignant, notes_a_persister)
rapport_repository.save(rapport)
if exceptions:
raise MultipleBusinessExceptions(exceptions=exceptions)
return [n.entity_id for n in notes_a_persister]
def _verifier_etudiant_est_desinscrit(
identite_note_etudiant: IdentiteNoteEtudiant,
desinscriptions: Set[DesinscriptionExamenDTO]
) -> None:
if any(desinscription for desinscription in desinscriptions if
desinscription.noma == identite_note_etudiant.noma and
desinscription.code_unite_enseignement == identite_note_etudiant.code_unite_enseignement and
desinscription.annee == identite_note_etudiant.annee_academique):
raise MultipleBusinessExceptions(exceptions=[EtudiantNonInscritAExamenException()])
|
uclouvain/osis
|
ddd/logic/encodage_des_notes/soumission/domain/service/encoder_notes_en_lot.py
|
Python
|
agpl-3.0
| 6,960
|
wsgi_app = "weasyl.wsgi:make_wsgi_app()"
proc_name = "weasyl"
preload_app = False
secure_scheme_headers = {
'X-FORWARDED-PROTO': 'https',
}
forwarded_allow_ips = '*'
|
Weasyl/weasyl
|
gunicorn.conf.py
|
Python
|
apache-2.0
| 173
|
from collections import OrderedDict
from src.models import FlashscoreMatch
from src.settings import match_cache, Colors
@match_cache
def is_available(match: FlashscoreMatch) -> bool:
return (
match.ah_0_1_current_odds is not None and
match.ah_0_2_current_odds is not None and
match.home_draw_current_odds is not None and
match.home_away_current_odds is not None and
match.draw_away_current_odds is not None and
match.home_team_rank_across_home is not None and
match.away_team_rank_across_away is not None
)
@match_cache
def e1(match: FlashscoreMatch) -> bool:
return (
2.8 > match.home_current_odds > 1.9 and
match.away_current_odds > 2.45 and
match.draw_current_odds >=3 and
match.prediction_a3 < 2 and
match.prediction_a4 > 0 and
match.home_team_rank_across_home < 18 and
match.away_team_rank_across_away < 15 and
6.5 > (match.xxx or 0.) >= 3.67
)
@match_cache
def e2(match: FlashscoreMatch) -> bool:
return (
3.4 > match.home_current_odds > 2.45 and
3 > match.away_current_odds > 1.59 and
match.prediction_a4 == 0 and
match.prediction_a3 != 1 and
(match.xxx or 0.) >= 4
)
@match_cache
def a1(match: FlashscoreMatch) -> bool:
return (
match.away_current_odds < 2 and
2.5 < match.ah_0_1_current_odds < 5.25 and
match.prediction_a3 < 3 and
match.prediction_a4 < 2 and
(match.xxx or 0.) > 3.5
)
@match_cache
def h2(match: FlashscoreMatch) -> bool:
return (
match.home_current_odds < 2 and
match.prediction_a4 == 0 and
(match.xxx or 0.) > 4.16
)
@match_cache
def test(match: FlashscoreMatch) -> bool:
return (
False)
@match_cache
def other(match: FlashscoreMatch) -> bool:
return not(e1(match) or e2(match) or a1(match) or h2(match) or test(match))
@match_cache
def bet(match: FlashscoreMatch) -> str:
values = OrderedDict([
('e1', e1(match)),
('e2', e2(match)),
('a1', a1(match)),
('h2', h2(match)),
('test', test(match))])
return ', '.join((key for key, value in values.items() if value))
@match_cache
def ah_0_1_color(match: FlashscoreMatch) -> Colors:
if e1(match) or a1(match):
return Colors.GREEN
return Colors.EMPTY
@match_cache
def ah_0_2_color(match: FlashscoreMatch) -> Colors:
if e2(match) or h2(match):
return Colors.GREEN
return Colors.EMPTY
@match_cache
def total_score_color(match: FlashscoreMatch) -> Colors:
if match.home_score is not None and match.away_score is not None:
if e1(match) or a1(match):
if match.home_score > match.away_score:
return Colors.GREEN
elif match.home_score < match.away_score:
return Colors.RED
elif e2(match) or h2(match):
if match.away_score > match.home_score:
return Colors.GREEN
elif match.away_score < match.home_score:
return Colors.RED
return Colors.EMPTY
|
vapkarian/soccer-analyzer
|
src/versions/f4.py
|
Python
|
mit
| 3,131
|
import os
import multiprocessing as mp
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from logger import Logger
class LogListener(QThread):
def __init__(self, *log_paths, parent=None):
QThread.__init__(self, parent)
self.log_paths = []
for elem in log_paths:
self.log_paths.append(elem)
if __name__ == '__main__':
pass
|
mvwicky/ScotusScraper
|
log_listener.py
|
Python
|
mit
| 375
|
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import numpy as np
def chooseChunkShape(outerShape, desiredChunkSize):
'''
Choose a chunk shape that
* is less than or equal the desired chunk size
* respects the aspect ratio of the outer shape
Note that you will most likely have to handle channel and time dimension
differently (i.e. set them to 1 or max).
Each dimension will be at least 1 and at most the outer shape.
@param outerShape the shape of the volume as tuple of ints
@param desiredChunkSize the chunk size in pixels (not bytes!)
@return the 'optimal' chunk shape as tuple of ints
'''
x = np.array(outerShape, dtype=np.int)
assert np.all(x > 0)
size = np.prod(x)
n = len(x)
assert n > 0
if desiredChunkSize >= size:
return tuple(x)
if desiredChunkSize <= 0:
return (1,)*n
# determine the factor (f*y_1 * ... f*y_n = x_1 * ... * x_n)
# y_1 * ... * y_n = desiredChunkSize
# x_1 * ... * x_n = size
# f^n = size/desiredChunkSize
f = np.power(size/float(desiredChunkSize), 1/float(n))
y = np.floor(x/f)
y = np.maximum(y, 1).astype(np.int)
return tuple(y)
|
stuarteberg/lazyflow
|
lazyflow/utility/chunkHelpers.py
|
Python
|
lgpl-3.0
| 2,235
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app as absl_app
from absl import flags
from six.moves import range
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.r1.mnist import dataset
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
LEARNING_RATE = 1e-4
def create_model(data_format):
"""Model to recognize digits in the MNIST dataset.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But uses the tf.keras API.
Args:
data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is
typically faster on GPUs while 'channels_last' is typically faster on
CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
Returns:
A tf.keras.Model.
"""
if data_format == 'channels_first':
input_shape = [1, 28, 28]
else:
assert data_format == 'channels_last'
input_shape = [28, 28, 1]
l = tf.keras.layers
max_pool = l.MaxPooling2D(
(2, 2), (2, 2), padding='same', data_format=data_format)
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
return tf.keras.Sequential(
[
l.Reshape(
target_shape=input_shape,
input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Flatten(),
l.Dense(1024, activation=tf.nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
def define_mnist_flags():
"""Defines flags for mnist."""
flags_core.define_base(clean=True, train_epochs=True,
epochs_between_evals=True, stop_threshold=True,
num_gpu=True, hooks=True, export_dir=True,
distribution_strategy=True)
flags_core.define_performance(inter_op=True, intra_op=True,
num_parallel_calls=False,
all_reduce_alg=True)
flags_core.define_image()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(data_dir='/tmp/mnist_data',
model_dir='/tmp/mnist_model',
batch_size=100,
train_epochs=40)
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
model = create_model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=LEARNING_RATE)
logits = model(image, training=True)
loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)
accuracy = tf.compat.v1.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1))
# Name tensors to be logged with LoggingTensorHook.
tf.identity(LEARNING_RATE, 'learning_rate')
tf.identity(loss, 'cross_entropy')
tf.identity(accuracy[1], name='train_accuracy')
# Save accuracy scalar to Tensorboard output.
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(
loss,
tf.compat.v1.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1)),
})
def run_mnist(flags_obj):
"""Run MNIST training and eval loop.
Args:
flags_obj: An object containing parsed flag values.
"""
model_helpers.apply_clean(flags_obj)
model_function = model_fn
session_config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_core.get_num_gpus(flags_obj),
all_reduce_alg=flags_obj.all_reduce_alg)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy, session_config=session_config)
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
mnist_classifier = tf.estimator.Estimator(
model_fn=model_function,
model_dir=flags_obj.model_dir,
config=run_config,
params={
'data_format': data_format,
})
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(flags_obj.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds.repeat(flags_obj.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(flags_obj.data_dir).batch(
flags_obj.batch_size).make_one_shot_iterator().get_next()
# Set up hook that outputs training logs every 100 steps.
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks, model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
# Train and evaluate model.
for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print('\nEvaluation results:\n\t%s\n' % eval_results)
if model_helpers.past_stop_threshold(flags_obj.stop_threshold,
eval_results['accuracy']):
break
# Export the model
if flags_obj.export_dir is not None:
image = tf.compat.v1.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'image': image,
})
mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn,
strip_default_attrs=True)
def main(_):
run_mnist(flags.FLAGS)
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
define_mnist_flags()
absl_app.run(main)
|
alexgorban/models
|
official/r1/mnist/mnist.py
|
Python
|
apache-2.0
| 8,798
|
"""
Dataset for images and related functionality.
This module does not have dependencies inside pyl2extra package, so you
can just copy-paste it inside your source tree.
To use this dataset prepare a .csv file with targets (integers or real numbers)
on first column and file paths on the second column:
.. code::
0,file1.png
1,file2.png
Image file paths are relative to current directory (``os.getcwd()``). The
images need not be square and can be in any format recognized by the
``Image`` module. Internally, the images are converted to RGB and are made
square for you.
Use it in a .yaml file like so:
.. code::
dataset: &trndataset !obj:pyl2extra.datasets.images.Images {
source: 'train.csv',
image_size: 128
}
The ``image_size`` can be skipped, in which case the size of the images is
derived from first image that is provided.
By default the class assumes a classification problem (targets are integers).
If you need to uset it in a regression problem create it like so:
.. code::
dataset: &trndataset !obj:pyl2extra.datasets.images.Images {
source: 'train.csv',
image_size: 128,
regression: True
}
As the dataset simply wraps the ``DenseDesignMatrix``, parameters like
``rng`` (random number generator), ``preprocessor`` and ``fit_preprocessor``
can be used and will be passed to ``DenseDesignMatrix`` superclass.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "nicu.tofan@gmail.com"
import csv
import numpy
import os
from PIL import Image
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
import theano
class Images(DenseDesignMatrix):
"""
A pylearn2 dataset that loads the images from a list or csv file.
Please note that - if you use this dataset and your model has a
final Softmax layer you should construct it like so (YAML syntax):
.. code::
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
irange: .0,
n_classes: %(classes)d,
binary_target_dim: 1
}
where ``classes`` is the same number of classes passed to ``Images``
constructor. ``binary_target_dim`` is important and failing to set it
constructs the wrong architecture, causing errors like:
ValueError: VectorSpace(dim=1, dtype=float32) with total dimension 1
can't format a batch into VectorSpace(dim=X, dtype=float32) because
its total dimension is X.
Parameters
----------
source: OrderedDict, dict, str, tuple, list
This argument provides the input images and (optionally)
associated categories. The meaning of the argument depends
on the data type:
- if ``source`` is a string, it is interpreted to be the
path towards a csv file; the file must NOT have a header,
first column must contain the targets (classes or values) and
second column must contain the paths for the image files;
- if ``source`` is a dictionary, the keys must be the
paths for image files, ``Image`` instances or numpy arrays and
the values must be the classes or values (None or empty
string if this instance does not provide the labels);
- a tuple or list must have exactly one or two
members: first one must be a list or tuple of image paths or
Images or numpy arrays, while second one (optional)
has the targets (classes as integers or real values).
image_size: int, optional
The size of the images in the final dataset. All images
will be resized to be ``image_size`` x ``image_size``
pixels.
classes: int, optional
If this is a classification problem the parameter should be
used to indicate the total number of classes and targets are
expected to be integers in the range ``[0; classes-1]``.
If this is a regression problem the parameter should be ``None`` and
targets are expected to be real numbers.
rng: object, optional
A random number generator used for picking random \
indices into the design matrix when choosing minibatches.
preprocessor: Preprocessor, optional
Preprocessor to apply to images.
fit_preprocessor: bool, optional
Whether preprocessor can fit parameters when applied to training
data.
"""
def __init__(self, source, image_size=None, classes=None,
rng=None, preprocessor=None, fit_preprocessor=False):
#: preserve original argument for future reference
self.source = source
#: Number of classes (None for regression)
self.classes = classes
# all images are loaded in ``ind`` variable
ind = _init_input(source)
# DenseDesignMatrix expects us to provide a numpy array
# we choose to have number of examples on first axis ('b'),
# then rows and columns of the image, then the channels
# always 3 in our case
self.axes = ('b', 0, 1, 'c')
if image_size is None:
dense_x = None
else:
dense_x = numpy.zeros(shape=(len(ind), image_size, image_size, 3),
dtype='uint8')
categories = []
has_targets = False
for i, (img, ctg) in enumerate(ind):
if isinstance(img, Image.Image):
img = numpy.array(img)
width = img.shape[1]
height = img.shape[0]
largest = max(width, height)
if image_size is None:
# if the user did not specify an image size we determine
# the size using the first image that we encounter; this is
# usefull if all images are already of required size,
# for example
image_size = largest
dense_x = numpy.zeros(shape=(len(ind), image_size,
image_size, 3),
dtype='uint8')
imgin = img
# do we need to enlarge / shrink the image?
elif largest != image_size:
wpercent = image_size / float(largest)
width = int(width * wpercent)
height = int(height * wpercent)
largest = max(width, height)
# inefficient? could use scipy.ndimage.zoom.
img_tmp = Image.fromarray(img)
img_tmp = img_tmp.resize((width, height), Image.ANTIALIAS)
imgin = numpy.array(img_tmp)
else:
imgin = img
delta_x = (largest - width) / 2
delta_y = (largest - height) / 2
delta_x2 = delta_x + width
delta_y2 = delta_y + height
#print delta_x, delta_y, delta_x2, delta_y2, width, height
dense_x[i, delta_y:delta_y2, delta_x:delta_x2, :] = imgin
categories.append(ctg)
if ctg != '':
has_targets = True
dense_x = numpy.cast[theano.config.floatX](dense_x)
# if we have categories / values convert them to proper format
if has_targets:
if classes is None:
# in regression we expect real values
dense_y = numpy.empty(shape=(len(ind), 1),
dtype=theano.config.floatX)
for i, ctg in enumerate(categories):
dense_y[i, 0] = float(ctg)
else:
# in classification we expect integers
dense_y = numpy.empty(shape=(len(ind), 1), dtype=int)
for i, ctg in enumerate(categories):
dense_y[i, 0] = int(ctg)
else:
dense_y = None
if rng is None:
rng = DenseDesignMatrix._default_seed
# everything else is handled by the DenseDesignMatrix superclass
super(Images, self).__init__(topo_view=dense_x,
y=dense_y,
axes=self.axes,
view_converter=None,
preprocessor=preprocessor,
fit_preprocessor=fit_preprocessor,
X_labels=None,
y_labels=classes if has_targets else None)
def _init_input(source):
"""
Homogenize sources.
"""
if isinstance(source, basestring):
# this is a csv file that we're going to read
result = _load_list(_load_csv(source))
elif isinstance(source, dict):
# keys are file names, values are classes
result = _load_list(source.items())
elif isinstance(source, (list, tuple)):
# one item lists the files, the other lists the classes
if len(source) == 1:
result = _load_list([(src, None) for src in source[0]])
elif len(source) == 2:
if len(source[0]) == len(source[1]):
result = _load_list(zip(source[0], source[1]))
else:
raise ValueError("Lists/tuples provded to Images class "
"constructor are expected to have "
"same length (%d != %d)" %
(len(source[0]), len(source[1])))
else:
raise ValueError("Lists/tuples provided to Images class "
"constructor are expected to have one "
"(images only) or two members (images"
" and classes); the input has %d members." %
len(source))
else:
raise ValueError("Images class expects for its `source` argument "
"a file path (string), a dictionary of "
"file:class pairs, or a pair of lists (tuples); "
"%s is not supported" % str(source.__class__))
return result
def _load_csv(csv_path):
"""
Internal function for loading the content from a .csv file.
Parameters
----------
csv_path: str
The path towards the .csv file to read.
Returns
-------
result: list of tuples
The method creates a list of tuples that should be passed to
`_load_list()`.
"""
# we're going to accumulate files and categories here
result = []
# compute absolute path of the source csv file
csv_path = os.path.abspath(csv_path)
with open(csv_path, 'rt') as fhand:
# the reader is flexible, allowing delimiters
# other than comma; quotation can also be customized
csvr = csv.reader(fhand,
delimiter=',',
quotechar='"')
# the reader will give us a list for each row of
# the source file
for row in csvr:
# we're going to skip empty rows without warning
if len(row) == 0:
continue
# we could skip the header here, if present; we
# could even detect the column index from its
# name; but we try to keep the things simple
# class/value is always first, file path second
result.append((row[1], row[0]))
return result
def _load_list(srclist):
"""
Internal function for loading the content from a list.
Image files are converted to `numpy.ndarray`;
empty classes are normalized to a string of lenghth 0.
Parameters
----------
srclist: list of tuples
A list of tuples, with first entry in tuple being
a string, an Image or `numpy.ndarray` instances and
second being classes (None for no class).
Returns
-------
result: list of tuples
The method creates a list of tuples, with first entry in tuple being
`numpy.ndarray` instances and second being targets (None for no
target) - integer classes (classification) or real values
(regression).
"""
# we're going to accumulate Images and categories here
result = []
for img, cls in srclist:
if isinstance(img, basestring):
imgin = Image.open(img)
elif isinstance(img, numpy.ndarray):
imgin = Image.fromarray(img)
elif isinstance(img, Image.Image):
imgin = img
elif Image.isImageType(img):
imgin = img
else:
raise ValueError("Valid input for images are strings (a "
"path towards a file), pil images "
"and numpy arrays; %s is not supported" %
str(img.__class__))
if cls is None:
cls = ''
imgin = imgin.convert('RGB')
result.append((numpy.array(imgin), cls))
return result
def one_image(image, image_size=None, classes=None,
rng=None, preprocessor=None, fit_preprocessor=False):
"""
Convenience function that creates an Images dataset from a single image.
Parameters
----------
image: string, image or numpy.ndarray
The image to use as source.
See :class:`Images` for a description of other parameters.
"""
return Images(source=((image,),),
image_size=image_size, classes=classes,
rng=rng, preprocessor=preprocessor,
fit_preprocessor=fit_preprocessor)
|
TNick/pyl2extra
|
pyl2extra/datasets/images.py
|
Python
|
bsd-3-clause
| 13,590
|
import argparse
import os
from joblib import Parallel, delayed
import numpy as np
import autosklearn
import autosklearn.data
import autosklearn.data.competition_data_manager
from autosklearn.pipeline.classification import SimpleClassificationPipeline
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
input = args.input
dataset = 'helena'
output = args.output
path = os.path.join(input, dataset)
D = autosklearn.data.competition_data_manager.CompetitionDataManager(path)
X = D.data['X_train']
y = D.data['Y_train']
X_valid = D.data['X_valid']
X_test = D.data['X_test']
# Replace the following array by a new ensemble
choices = \
[(0.220000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'adaboost',
'classifier:adaboost:algorithm': 'SAMME.R',
'classifier:adaboost:learning_rate': 0.12736378214916136,
'classifier:adaboost:max_depth': 2,
'classifier:adaboost:n_estimators': 102,
'imputation:strategy': 'mean',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'min/max'})),
(0.140000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'liblinear_svc',
'classifier:liblinear_svc:C': 34.52330718740001,
'classifier:liblinear_svc:dual': 'False',
'classifier:liblinear_svc:fit_intercept': 'True',
'classifier:liblinear_svc:intercept_scaling': 1,
'classifier:liblinear_svc:loss': 'squared_hinge',
'classifier:liblinear_svc:multi_class': 'ovr',
'classifier:liblinear_svc:penalty': 'l2',
'classifier:liblinear_svc:tol': 0.010305332230700001,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.00012464201046600006,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'polynomial',
'preprocessor:polynomial:degree': 2,
'preprocessor:polynomial:include_bias': 'True',
'preprocessor:polynomial:interaction_only': 'False',
'rescaling:__choice__': 'none'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.1473936812138448,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 11,
'classifier:random_forest:min_samples_split': 10,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'fast_ica',
'preprocessor:fast_ica:algorithm': 'parallel',
'preprocessor:fast_ica:fun': 'logcosh',
'preprocessor:fast_ica:n_components': 945,
'preprocessor:fast_ica:whiten': 'True',
'rescaling:__choice__': 'none'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 1.3455409527727558,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'pca',
'preprocessor:pca:keep_variance': 0.7598172817638718,
'preprocessor:pca:whiten': 'False',
'rescaling:__choice__': 'standardize'})),
(0.060000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.873556221817867,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.007384474684230516,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.44352666713957484,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'f_classif',
'rescaling:__choice__': 'normalize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'lda',
'classifier:lda:n_components': 12,
'classifier:lda:shrinkage': 'manual',
'classifier:lda:shrinkage_factor': 0.9016175646665451,
'classifier:lda:tol': 0.0001716207118446579,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.009728842857612658,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'fast_ica',
'preprocessor:fast_ica:algorithm': 'parallel',
'preprocessor:fast_ica:fun': 'logcosh',
'preprocessor:fast_ica:n_components': 914,
'preprocessor:fast_ica:whiten': 'True',
'rescaling:__choice__': 'standardize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 4.213462678722325,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.020501216047798837,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'f_classif',
'rescaling:__choice__': 'standardize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 4.367371232039595,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.01303718715506049,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.286051530772571,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.026747542179073727,
'preprocessor:select_rates:mode': 'fwe',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'min/max'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.907981363846062,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.38925641117203025,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'f_classif',
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.873556221817867,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.007384474684230516,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 709.0694499917347,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.013228763477510586,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 3.0174234734498917,
'classifier:extra_trees:min_samples_leaf': 2,
'classifier:extra_trees:min_samples_split': 12,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.007553789957243724,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 31.20787569423215,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 1.7149340429765088e-05,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 4.592027482980136,
'classifier:extra_trees:min_samples_leaf': 12,
'classifier:extra_trees:min_samples_split': 12,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.003355962206220629,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 0.14162959993684351,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.009394425053603682,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 2.655307919311661,
'classifier:extra_trees:min_samples_leaf': 2,
'classifier:extra_trees:min_samples_split': 16,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.00019806605573813597,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 18.30206355212093,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 3.267407083806816e-05,
'rescaling:__choice__': 'normalize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 1.2325644317889806,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.01303718715506049,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.286051530772571,
'imputation:strategy': 'mean',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 1.3440864854665975,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 8,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 17,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 8.519756045823158,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.08901572125739037,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 3.842249530515841,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 13,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 10,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 3.7289920990557777,
'classifier:extra_trees:min_samples_leaf': 3,
'classifier:extra_trees:min_samples_split': 13,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.00037734441447340595,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 0.6186775496832956,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 1.710156140413348e-05,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 4.430190032276566,
'classifier:extra_trees:min_samples_leaf': 5,
'classifier:extra_trees:min_samples_split': 9,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0027303638882864483,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 30343.867455246524,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.005743178077382402,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.8440666453536427,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 1,
'classifier:random_forest:min_samples_split': 14,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 28279.093774727116,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.0010803540483296555,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 1.3455409527727558,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.04805977625874754,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 3.6600607240096594,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 18,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 18,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'none'})),
]
targets = []
predictions = []
predictions_valid = []
predictions_test = []
def fit_and_predict(estimator, weight, X, y):
try:
estimator.fit(X.copy(), y.copy())
pv = estimator.predict_proba(X_valid.copy()) * weight
pt = estimator.predict_proba(X_test.copy()) * weight
except Exception as e:
print(e)
print(estimator.configuration)
pv = None
pt = None
return pv, pt
# Make predictions and weight them
all_predictions = Parallel(n_jobs=-1)(delayed(fit_and_predict) \
(estimator, weight, X, y) for
weight, estimator in choices)
for pv, pt in all_predictions:
predictions_valid.append(pv)
predictions_test.append(pt)
# Output the predictions
for name, predictions in [('valid', predictions_valid),
('test', predictions_test)]:
predictions = np.array(predictions)
predictions = np.sum(predictions, axis=0).astype(np.float32)
filepath = os.path.join(output, '%s_%s_000.predict' % (dataset, name))
np.savetxt(filepath, predictions, delimiter=' ', fmt='%.4e')
|
automl/ChaLearn_Automatic_Machine_Learning_Challenge_2015
|
004_helena.py
|
Python
|
bsd-2-clause
| 22,816
|
from openid.consumer import consumer
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import ax, pape, sreg
from . import util
PAPE_POLICIES = [
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
AX_REQUIRED_FIELDS = {
'firstname' : 'http://axschema.org/namePerson/first',
'lastname' : 'http://axschema.org/namePerson/last',
'fullname' : 'http://axschema.org/namePerson',
'email' : 'http://axschema.org/contact/email'
}
# List of (name, uri) for use in generating the request form.
POLICY_PAIRS = [(p, getattr(pape, p))
for p in PAPE_POLICIES]
def getOpenIDStore():
"""
Return an OpenID store object fit for the currently-chosen
database backend, if any.
"""
return util.getOpenIDStore('/tmp/djopenid_c_store', 'c_')
def get_consumer(session):
"""
Get a Consumer object to perform OpenID authentication.
"""
return consumer.Consumer(session, getOpenIDStore())
def start_openid(session, openid_url, trust_root, return_to):
"""
Start the OpenID authentication process.
* Requests some Simple Registration data using the OpenID
library's Simple Registration machinery
* Generates the appropriate trust root and return URL values for
this application (tweak where appropriate)
* Generates the appropriate redirect based on the OpenID protocol
version.
"""
# Start OpenID authentication.
c = get_consumer(session)
try:
auth_request = c.begin(openid_url)
except DiscoveryFailure as e:
# Some other protocol-level failure occurred.
raise Exception("error in openid: OpenID discovery error") from e
# Add Simple Registration request information. Some fields
# are optional, some are required. It's possible that the
# server doesn't support sreg or won't return any of the
# fields.
sreg_request = sreg.SRegRequest(required=['email'],
optional=[])
auth_request.addExtension(sreg_request)
# Add Attribute Exchange request information.
ax_request = ax.FetchRequest()
# XXX - uses myOpenID-compatible schema values, which are
# not those listed at axschema.org.
for k, v in AX_REQUIRED_FIELDS.items():
ax_request.add(ax.AttrInfo(v, required=True))
auth_request.addExtension(ax_request)
# Compute the trust root and return URL values to build the
# redirect information.
# trust_root = util.getViewURL(request, startOpenID)
# return_to = util.getViewURL(request, finishOpenID)
# Send the browser to the server either by sending a redirect
# URL or by generating a POST form.
url = auth_request.redirectURL(trust_root, return_to)
return url
def finish_openid(session, request_args, return_to):
"""
Finish the OpenID authentication process. Invoke the OpenID
library with the response from the OpenID server and render a page
detailing the result.
"""
result = {}
# Because the object containing the query parameters is a
# MultiValueDict and the OpenID library doesn't allow that, we'll
# convert it to a normal dict.
if request_args:
c = get_consumer(session)
# Get a response object indicating the result of the OpenID
# protocol.
response = c.complete(request_args, return_to)
# Get a Simple Registration response object if response
# information was included in the OpenID response.
sreg_response = {}
ax_items = {}
if response.status == consumer.SUCCESS:
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response:
for k, v in AX_REQUIRED_FIELDS.items():
"""
the values are the URIs, they are the key into the data
the key is the shortname
"""
if v in ax_response.data:
ax_items[k] = ax_response.get(v)
# Map different consumer status codes to template contexts.
results = {
consumer.CANCEL:
{'message': 'OpenID authentication cancelled.'},
consumer.FAILURE:
{'error': 'OpenID authentication failed.'},
consumer.SUCCESS:
{'url': response.getDisplayIdentifier(),
'sreg': sreg_response and list(sreg_response.items()),
'ax': ax_items}
}
result = results[response.status]
if isinstance(response, consumer.FailureResponse):
# In a real application, this information should be
# written to a log for debugging/tracking OpenID
# authentication failures. In general, the messages are
# not user-friendly, but intended for developers.
result['failure_reason'] = response.message
return result
|
benadida/helios-server
|
helios_auth/auth_systems/openid/view_helpers.py
|
Python
|
apache-2.0
| 5,049
|
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import re
import uuid
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import UserManager, AbstractBaseUser
from django.core import validators
from django.utils import timezone
from django_pgjson.fields import JsonField
from djorm_pgarray.fields import TextArrayField
from taiga.auth.tokens import get_token_for_user
from taiga.base.utils.slug import slugify_uniquely
from taiga.base.utils.files import get_file_path
from taiga.permissions.permissions import MEMBERS_PERMISSIONS
from taiga.projects.choices import BLOCKED_BY_OWNER_LEAVING
from taiga.projects.notifications.choices import NotifyLevel
from easy_thumbnails.files import get_thumbnailer
def generate_random_hex_color():
return "#{:06x}".format(random.randint(0,0xFFFFFF))
def get_user_file_path(instance, filename):
return get_file_path(instance, filename, "user")
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django"s Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_("superuser status"), default=False,
help_text=_("Designates that this user has all permissions without "
"explicitly assigning them."))
class Meta:
abstract = True
def has_perm(self, perm, obj=None):
"""
Returns True if the user is superadmin and is active
"""
return self.is_active and self.is_superuser
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user is superadmin and is active
"""
return self.is_active and self.is_superuser
def has_module_perms(self, app_label):
"""
Returns True if the user is superadmin and is active
"""
return self.is_active and self.is_superuser
@property
def is_staff(self):
return self.is_superuser
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(_("username"), max_length=255, unique=True,
help_text=_("Required. 30 characters or fewer. Letters, numbers and "
"/./-/_ characters"),
validators=[
validators.RegexValidator(re.compile("^[\w.-]+$"), _("Enter a valid username."), "invalid")
])
email = models.EmailField(_("email address"), max_length=255, blank=True, unique=True)
is_active = models.BooleanField(_("active"), default=True,
help_text=_("Designates whether this user should be treated as "
"active. Unselect this instead of deleting accounts."))
full_name = models.CharField(_("full name"), max_length=256, blank=True)
color = models.CharField(max_length=9, null=False, blank=True, default=generate_random_hex_color,
verbose_name=_("color"))
bio = models.TextField(null=False, blank=True, default="", verbose_name=_("biography"))
photo = models.FileField(upload_to=get_user_file_path,
max_length=500, null=True, blank=True,
verbose_name=_("photo"))
date_joined = models.DateTimeField(_("date joined"), default=timezone.now)
lang = models.CharField(max_length=20, null=True, blank=True, default="",
verbose_name=_("default language"))
theme = models.CharField(max_length=100, null=True, blank=True, default="",
verbose_name=_("default theme"))
timezone = models.CharField(max_length=20, null=True, blank=True, default="",
verbose_name=_("default timezone"))
colorize_tags = models.BooleanField(null=False, blank=True, default=False,
verbose_name=_("colorize tags"))
token = models.CharField(max_length=200, null=True, blank=True, default=None,
verbose_name=_("token"))
email_token = models.CharField(max_length=200, null=True, blank=True, default=None,
verbose_name=_("email token"))
new_email = models.EmailField(_("new email address"), null=True, blank=True)
is_system = models.BooleanField(null=False, blank=False, default=False)
max_private_projects = models.IntegerField(null=True, blank=True,
default=settings.MAX_PRIVATE_PROJECTS_PER_USER,
verbose_name=_("max number of private projects owned"))
max_public_projects = models.IntegerField(null=True, blank=True,
default=settings.MAX_PUBLIC_PROJECTS_PER_USER,
verbose_name=_("max number of public projects owned"))
max_members_private_projects = models.IntegerField(null=True, blank=True,
default=settings.MAX_MEMBERS_PRIVATE_PROJECTS,
verbose_name=_("max number of memberships for "
"each owned private project"))
max_members_public_projects = models.IntegerField(null=True, blank=True,
default=settings.MAX_MEMBERS_PUBLIC_PROJECTS,
verbose_name=_("max number of memberships for "
"each owned public project"))
_cached_memberships = None
_cached_liked_ids = None
_cached_watched_ids = None
_cached_notify_levels = None
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email"]
objects = UserManager()
class Meta:
verbose_name = "user"
verbose_name_plural = "users"
ordering = ["username"]
def __str__(self):
return self.get_full_name()
def _fill_cached_memberships(self):
self._cached_memberships = {}
qs = self.memberships.prefetch_related("user", "project", "role")
for membership in qs.all():
self._cached_memberships[membership.project.id] = membership
@property
def cached_memberships(self):
if self._cached_memberships is None:
self._fill_cached_memberships()
return self._cached_memberships.values()
def cached_membership_for_project(self, project):
if self._cached_memberships is None:
self._fill_cached_memberships()
return self._cached_memberships.get(project.id, None)
def is_fan(self, obj):
if self._cached_liked_ids is None:
self._cached_liked_ids = set()
for like in self.likes.select_related("content_type").all():
like_id = "{}-{}".format(like.content_type.id, like.object_id)
self._cached_liked_ids.add(like_id)
obj_type = ContentType.objects.get_for_model(obj)
obj_id = "{}-{}".format(obj_type.id, obj.id)
return obj_id in self._cached_liked_ids
def is_watcher(self, obj):
if self._cached_watched_ids is None:
self._cached_watched_ids = set()
for watched in self.watched.select_related("content_type").all():
watched_id = "{}-{}".format(watched.content_type.id, watched.object_id)
self._cached_watched_ids.add(watched_id)
notify_policies = self.notify_policies.select_related("project")\
.exclude(notify_level=NotifyLevel.none)
for notify_policy in notify_policies:
obj_type = ContentType.objects.get_for_model(notify_policy.project)
watched_id = "{}-{}".format(obj_type.id, notify_policy.project.id)
self._cached_watched_ids.add(watched_id)
obj_type = ContentType.objects.get_for_model(obj)
obj_id = "{}-{}".format(obj_type.id, obj.id)
return obj_id in self._cached_watched_ids
def get_notify_level(self, project):
if self._cached_notify_levels is None:
self._cached_notify_levels = {}
for notify_policy in self.notify_policies.select_related("project"):
self._cached_notify_levels[notify_policy.project.id] = notify_policy.notify_level
return self._cached_notify_levels.get(project.id, None)
def get_short_name(self):
"Returns the short name for the user."
return self.username
def get_full_name(self):
return self.full_name or self.username or self.email
def save(self, *args, **kwargs):
get_token_for_user(self, "cancel_account")
super().save(*args, **kwargs)
def cancel(self):
self.username = slugify_uniquely("deleted-user", User, slugfield="username")
self.email = "{}@taiga.io".format(self.username)
self.is_active = False
self.full_name = "Deleted user"
self.color = ""
self.bio = ""
self.lang = ""
self.theme = ""
self.timezone = ""
self.colorize_tags = True
self.token = None
self.set_unusable_password()
self.photo = None
self.save()
self.auth_data.all().delete()
#Blocking all owned users
self.owned_projects.update(blocked_code=BLOCKED_BY_OWNER_LEAVING)
class Role(models.Model):
name = models.CharField(max_length=200, null=False, blank=False,
verbose_name=_("name"))
slug = models.SlugField(max_length=250, null=False, blank=True,
verbose_name=_("slug"))
permissions = TextArrayField(blank=True, null=True,
default=[],
verbose_name=_("permissions"),
choices=MEMBERS_PERMISSIONS)
order = models.IntegerField(default=10, null=False, blank=False,
verbose_name=_("order"))
# null=True is for make work django 1.7 migrations. project
# field causes some circular dependencies, and due to this
# it can not be serialized in one transactional migration.
project = models.ForeignKey("projects.Project", null=True, blank=False,
related_name="roles", verbose_name=_("project"))
computable = models.BooleanField(default=True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify_uniquely(self.name, self.__class__)
super().save(*args, **kwargs)
class Meta:
verbose_name = "role"
verbose_name_plural = "roles"
ordering = ["order", "slug"]
unique_together = (("slug", "project"),)
def __str__(self):
return self.name
class AuthData(models.Model):
user = models.ForeignKey("users.User", related_name="auth_data")
key = models.SlugField(max_length=50)
value = models.CharField(max_length=300)
extra = JsonField()
class Meta:
unique_together = ["key", "value"]
# On Role object is changed, update all membership
# related to current role.
@receiver(models.signals.post_save, sender=Role,
dispatch_uid="role_post_save")
def role_post_save(sender, instance, created, **kwargs):
# ignore if object is just created
if created:
return
instance.project.update_role_points()
|
gam-phon/taiga-back
|
taiga/users/models.py
|
Python
|
agpl-3.0
| 12,446
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for generating resource names."""
import cStringIO
import random
import string
_LENGTH = 12
_BEGIN_ALPHABET = string.ascii_lowercase
_ALPHABET = _BEGIN_ALPHABET + string.digits
def GenerateRandomName():
"""Generates a random string.
Returns:
The returned string will be 12 characters long and will begin with
a lowercase letter followed by 10 characters drawn from the set
[-a-z0-9] and finally a character drawn from the set [a-z0-9].
"""
buf = cStringIO.StringIO()
buf.write(random.choice(_BEGIN_ALPHABET))
for _ in xrange(_LENGTH - 1):
buf.write(random.choice(_ALPHABET))
return buf.getvalue()
|
KaranToor/MA450
|
google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/name_generator.py
|
Python
|
apache-2.0
| 1,239
|
"""
HasBreak detects if a loop has a direct break
"""
from pythran.passmanager import NodeAnalysis
class HasBreak(NodeAnalysis):
def __init__(self):
self.result = False
super(HasBreak, self).__init__()
def visit_For(self, _):
return
def visit_Break(self, _):
self.result = True
|
hainm/pythran
|
pythran/analyses/has_break.py
|
Python
|
bsd-3-clause
| 328
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import tensorflow as tf
import tensorflow_models as tf_models
from tensorflow_models.trainers import BaseTrainer
class Trainer(BaseTrainer):
def finalize_hook(self):
print('Done training for {} epochs'.format(self.epoch()))
# Create the functions that perform learning and evaluation
def learning_hooks(self):
critic_steps = self._settings['critic_steps']
discriminator_steps = self._settings['discriminator_steps']
start_avb = self._settings['start_avb']
elbo_train_op = tf_models.get_inference('elbo_like')
train_elbo_loss_op = tf_models.get_loss('train/elbo_like')
critic_train_op = tf_models.get_inference('critic')
train_critic_loss_op = tf_models.get_loss('train/critic')
discriminator_train_op = tf_models.get_inference('discriminator')
train_discriminator_loss_op = tf_models.get_loss('train/discriminator')
test_elbo_loss_op = tf_models.get_loss('test/elbo_like')
test_critic_loss_op = tf_models.get_loss('test/critic')
elbo_avb_train_op = tf_models.get_inference('elbo_avb')
train_elbo_avb_loss_op = tf_models.get_loss('train/elbo_avb')
test_elbo_avb_loss_op = tf_models.get_loss('test/elbo_avb')
test_discriminator_loss_op = tf_models.get_loss('test/discriminator')
def train(count_steps):
total_elbo = 0.
total_critic = 0.
total_elbo_avb = 0.
total_discriminator = 0.
# Decide whether to do EMVB or AVB
if start_avb is None or self.step < start_avb:
train_op = elbo_train_op
#print('Doing EMVB')
else:
train_op = elbo_avb_train_op
#print('Doing AVB')
if critic_steps == discriminator_steps:
for idx in range(count_steps):
_, this_elbo, this_elbo_avb = self.sess.run([train_op, train_elbo_loss_op, train_elbo_avb_loss_op])
for jdx in range(critic_steps):
_, this_critic, _, this_discriminator = self.sess.run([critic_train_op, train_critic_loss_op, discriminator_train_op, train_discriminator_loss_op])
total_elbo += this_elbo
total_critic += this_critic
total_elbo_avb += this_elbo_avb
total_discriminator += this_discriminator
else:
for idx in range(count_steps):
_, this_elbo, this_elbo_avb = self.sess.run([train_op, train_elbo_loss_op, train_elbo_avb_loss_op])
for jdx in range(critic_steps):
_, this_critic = self.sess.run([critic_train_op, train_critic_loss_op])
for jdx in range(discriminator_steps):
_, this_discriminator = self.sess.run([discriminator_train_op, train_discriminator_loss_op])
total_elbo += this_elbo
total_critic += this_critic
total_elbo_avb += this_elbo_avb
total_discriminator += this_discriminator
return total_elbo / count_steps, total_critic / count_steps, total_elbo_avb/count_steps, total_discriminator/count_steps
def test():
total_elbo = 0.
total_critic = 0.
total_elbo_avb = 0.
total_discriminator = 0.
for idx in range(self.test_batches):
this_critic, this_elbo, this_discriminator, this_elbo_avb = self.sess.run([test_critic_loss_op, test_elbo_loss_op, test_discriminator_loss_op, test_elbo_avb_loss_op])
total_elbo += this_elbo
total_critic += this_critic
total_elbo_avb += this_elbo_avb
total_discriminator += this_discriminator
return total_critic/self.test_batches, total_elbo/self.test_batches, total_discriminator/self.test_batches, total_elbo_avb/self.test_batches
return train, test
def initialize_hook(self):
# See where the test loss starts
if self._settings['resume_from'] is None:
# Do a test evaluation before any training happens
test_critic, test_elbo, test_discriminator, test_elbo_avb = self.test()
self.results['elbo_test'] += [test_elbo]
self.results['critic_test'] += [test_critic]
self.results['elbo_avb_test'] += [test_elbo_avb]
self.results['discriminator_test'] += [test_discriminator]
else:
test_elbo = self.results['elbo_test'][-1]
test_critic = self.results['critic_test'][-1]
test_elbo_avb = self.results['elbo_test_avb'][-1]
test_discriminator = self.results['discriminator_test'][-1]
#print('*** DEBUG ***')
#print(test_elbo.shape)
#print(test_critic.shape)
print('epoch {:.3f}, test elbo = {:.2f}/{:.2f}, test critic = {:.2f}/{:.2f}'.format(self.epoch(), test_elbo, test_elbo_avb, test_critic, test_discriminator))
def step_hook(self):
with tf_models.timer.Timer() as train_timer:
train_elbo, train_critic, train_elbo_avb, train_discriminator = self.train(self._batches_per_step)
test_critic, test_elbo, test_discriminator, test_elbo_avb = self.test()
self.results['times_train'] += [train_timer.interval]
self.results['elbo_train'] += [train_elbo]
self.results['critic_train'] += [train_critic]
self.results['elbo_test'] += [test_elbo]
self.results['critic_test'] += [test_critic]
self.results['elbo_avb_test'] += [test_elbo_avb]
self.results['discriminator_test'] += [test_discriminator]
self.results['elbo_avb_train'] += [train_elbo_avb]
self.results['discriminator_train'] += [train_discriminator]
def before_step_hook(self):
pass
def after_step_hook(self):
train_time = self.results['times_train'][-1]
test_elbo = self.results['elbo_test'][-1]
test_critic = self.results['critic_test'][-1]
test_discriminator = self.results['discriminator_test'][-1]
train_elbo = self.results['elbo_train'][-1]
#test_discriminator = self.results['discriminator_test'][-1]
train_elbo_avb = self.results['elbo_avb_train'][-1]
test_elbo_avb = self.results['elbo_avb_test'][-1]
examples_per_sec = self._settings['batch_size'] * self._batches_per_step / train_time
sec_per_batch = train_time / self._batches_per_step
print('epoch {:.3f}, test elbo = {:.2f}/{:.2f}, test critic = {:.2f}/{:.2f}, train elbo = {:.2f}/{:.2f} ({:.1f} examples/sec)'.format(self.epoch(), test_elbo, test_elbo_avb, test_critic, test_discriminator, train_elbo, train_elbo_avb, examples_per_sec))
def initialize_results_hook(self):
results = {}
results['elbo_train'] = []
results['critic_train'] = []
results['times_train'] = []
results['elbo_test'] = []
results['critic_test'] = []
results['elbo_avb_test'] = []
results['discriminator_test'] = []
results['elbo_avb_train'] = []
results['discriminator_train'] = []
return results
|
stefanwebb/tensorflow-models
|
tensorflow_models/trainers/emvb_final.py
|
Python
|
mit
| 7,499
|
from setuptools import setup, find_packages
from setuptools.command.install import install
def get_version():
version_fh = open("CHANGES", "r")
first_line = version_fh.readline().strip()
version_fh.close()
version = first_line.split()[1]
return version
setup(
name='pride',
version=get_version(),
description="Framework for submitting proteomics data to the PRIDE archive at EBI",
packages=["pride"],
zip_safe=False,
install_requires=[
'anadama',
'osdf-python',
'cutlass'
],
package_data={'': ['.anadama_pride','pg-converter-1.2/*.*','pg-converter-1.2/lib/*.*']},
include_package_data=True,
entry_points= {
'anadama.pipeline': [
".pride = pride.pipeline:PRIDEPipeline"
]
}
)
|
ihmpdcc/anadama-pride
|
setup.py
|
Python
|
mit
| 793
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-05 15:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pictures', '0002_auto_20161005_1501'),
]
operations = [
migrations.RemoveField(
model_name='picture',
name='group',
),
migrations.RemoveField(
model_name='picture',
name='subject',
),
migrations.RemoveField(
model_name='picture',
name='timetable_entry',
),
migrations.RemoveField(
model_name='picture',
name='topic',
),
migrations.AddField(
model_name='picture',
name='gallery',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pictures.TimetableEntryGallery'),
),
]
|
photoboard/photoboard-django
|
pictures/migrations/0003_auto_20161005_1525.py
|
Python
|
mit
| 988
|
import requests, base64, hmac, hashlib
from Exception import CasperException
requests.packages.urllib3.disable_warnings()
class CasperAgent(object):
USER_AGENT = "Casper-API-Python/1.0.0"
URL = "https://api.casper.io"
def __init__(self, apiKey = None, apiSecret = None):
self.apiKey = apiKey
self.apiSecret = apiSecret
def get(self, endpoint, headers = None):
return self.request(endpoint = endpoint,
headers = headers)
def post(self, endpoint, headers = None, params = None):
return self.request(endpoint = endpoint,
headers = headers,
params = params,
post = True)
def request(self, endpoint, headers = None, params = None, post = False):
s = requests.Session()
if headers is None: headers = {}
headers.update({
"X-Casper-API-Key": self.apiKey,
"X-Casper-Signature": self.generateRequestSignature(params),
"User-Agent": self.USER_AGENT
})
s.headers = headers
requestURL = "{0}{1}".format(self.URL, endpoint)
if post:
res = s.post(requestURL, data = params, timeout = 10, verify = False)
else:
res = s.get(requestURL, timeout = 10, verify = False)
try:
rJSON = res.json()
except ValueError:
raise CasperException("Failed to decode response!")
if res.status_code != 200:
if "code" in rJSON and "message" in rJSON:
raise CasperException("API Response: [{0}] {1}".format(rJSON["code"], rJSON["message"]))
else:
raise CasperException("API Response: [{0}] Unknown Error Message".format(res.status_code))
return rJSON
@staticmethod
def externalRequest(url, headers = None, params = None, post = False):
s = requests.Session()
s.headers = headers
if post:
res = s.post(url, data = params, timeout = 10, verify = False)
else:
res = s.get(url, timeout = 10, verify = False)
if not res:
raise CasperException("Request failed!")
if res.status_code != 200:
raise CasperException("External request failed!")
return res.content
def generateRequestSignature(self, params):
if params is None: params = {}
sortedData = "".join("{0}{1}".format(key, value) for key, value in sorted(params.items()))
signature = hmac.new(key = self.apiSecret, msg = sortedData, digestmod = hashlib.sha256).hexdigest()
return "v1:{0}".format(signature)
|
rxw/snapy
|
snapy/Agent.py
|
Python
|
mit
| 2,329
|
import array
import numbers
real_types = [numbers.Real]
int_types = [numbers.Integral]
iterable_types = [set, list, tuple, array.array]
try:
import numpy
except ImportError:
pass
else:
real_types.extend([numpy.float32, numpy.float64])
int_types.extend([numpy.int32, numpy.int64])
iterable_types.append(numpy.ndarray)
# use these with isinstance to test for various types that include builtins
# and numpy types (if numpy is available)
real_types = tuple(real_types)
int_types = tuple(int_types)
iterable_types = tuple(iterable_types)
|
DailyActie/Surrogate-Model
|
01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/typegroups.py
|
Python
|
mit
| 558
|
#***************************************************************************
#* *
#* Copyright (c) 2014 Sebastian Hoogen <github@sebastianhoogen.de> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD OpenSCAD Workbench - DRAWEXE exporter"
__author__ = "Sebastian Hoogen <github@sebastianhoogen.de>"
import FreeCAD, Part
if open.__module__ == '__builtin__':
pythonopen = open
# unsupported primitives
# Part:: Wedge, Helix, Spiral, Elipsoid
# Draft: Rectangle, BSpline, BezCurve
def f2s(n,angle=False,axis=False):
'''convert to numerical value to string
try to remove no significant digits, by guessing a former rounding
'''
if abs(n) < 1e-14: return '0'
if angle and len(('%0.6e' % n).split('e')[0].rstrip('0') ) < 3:
return ('%0.5f' % n).rstrip('0').rstrip('.')
elif axis and len(('%0.13e' % n).split('e')[0].rstrip('0') ) < 6:
return ('%0.10f' % n).rstrip('0').rstrip('.')
else:
for i in range(20):
s = ('%%1.%df'% i) % n
if float(s) == n:
return s
for i in range(20):
s = ('%%0.%de'% i) % n
if float(s) == n:
return s
def polygonstr(r,pcount):
import math
v=FreeCAD.Vector(r,0,0)
m=FreeCAD.Matrix()
m.rotateZ(2*math.pi/pcount)
points=[]
for i in range(pcount):
points.append(v)
v=m.multiply(v)
points.append(v)
return ' '.join('%s %s %s'%(f2s(v.x),f2s(v.y),f2s(v.z)) \
for v in points)
def formatobjtype(ob):
objtype=ob.TypeId
if (ob.isDerivedFrom('Part::FeaturePython') or \
ob.isDerivedFrom('Part::Part2DObjectPython') or\
ob.isDerivedFrom('App::FeaturePython')) and \
hasattr(ob.Proxy,'__module__'):
return '%s::%s.%s' % (ob.TypeId,ob.Proxy.__module__,\
ob.Proxy.__class__.__name__)
else:
return ob.TypeId
def placement2draw(placement,name='object'):
"""converts a FreeCAD Placement to trotate and ttranslate commands"""
drawcommand=''
if not placement.Rotation.isNull():
import math
dx,dy,dz=placement.Rotation.Axis
an=math.degrees(placement.Rotation.Angle)
drawcommand += "trotate %s 0 0 0 %s %s %s %s\n" % \
(name,f2s(dx,axis=True),f2s(dy,axis=True),f2s(dz,axis=True),\
f2s(an,angle=True))
if placement.Base.Length > 1e-8:
x,y,z=placement.Base
drawcommand += "ttranslate %s %s %s %s\n" % \
(name,f2s(x),f2s(y),f2s(z))
return drawcommand
def saveShape(csg,filename,shape,name,hasplacement = True,cleanshape=False):
import os
spath,sname = os.path.split(filename)
sname=sname.replace('.','-')
uname='%s-%s' %(sname,name)
breppath=os.path.join(spath,'%s.brep'%uname)
csg.write("restore %s.brep %s\n" % (uname,name))
if cleanshape:
import Part
try:
shape = shape.cleaned()
except Part.OCCError:
shape = shape.copy()
if hasplacement is None: # saved with placement
hasplacement = False # saved with placement
shape.exportBrep(breppath)
elif not hasplacement: #doesn't matter
shape.exportBrep(breppath)
else: #remove placement
sh=shape.copy()
sh.Placement=FreeCAD.Placement()
# it not yet tested if changing the placement recreated the
# tesselation. but for now we simply do the cleaing once agian
# to stay on the safe side
if cleanshape:
shape = shape.cleaned()
sh.exportBrep(breppath)
return hasplacement
def isDraftFeature(ob):
if (ob.isDerivedFrom('Part::FeaturePython') or \
ob.isDerivedFrom('Part::Part2DObjectPython')) and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'Draft':
return True
def isDraftClone(ob):
if ob.isDerivedFrom('Part::FeaturePython') and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'Draft':
import Draft
return isinstance(ob.Proxy,Draft._Clone)
def isDraftCircle(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Circle)
def isDraftEllipse(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Ellipse)
def isDraftPolygon(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Polygon)
def isDraftPoint(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Point)
def isDraftWire(ob):
if isDraftFeature(ob):
import Draft
if isinstance(ob.Proxy,Draft._Wire):
#only return true if we support all options
#"Closed" append last point at the end
#"MakeFace"
#"Points" data we need
# the usage of 'start' and 'end' is not clear
if ob.Base is None and ob.Tool is None and \
ob.FilletRadius.Value == 0.0 and \
ob.ChamferSize.Value == 0.0:
return True
def isOpenSCADFeature(ob):
if ob.isDerivedFrom('Part::FeaturePython') and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'OpenSCADFeatures':
return True
def isOpenSCADMultMatrixFeature(ob):
if ob.isDerivedFrom('Part::FeaturePython') and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'OpenSCADFeatures':
import OpenSCADFeatures
return isinstance(ob.Proxy,OpenSCADFeatures.MatrixTransform)
def isDeform(ob):
"""tests whether the object is a Matrix transformation
that does a non-uniform scaling"""
# the [ is important to exclude cases with additional
# rotation or mirroring.
# TBD decompose complex matrix operations
return isOpenSCADMultMatrixFeature(ob) and \
ob.Matrix.analyze().startswith('Scale [')
class Drawexporter(object):
def __init__(self, filename):
self.objectcache=set()
self.csg = pythonopen(filename,'w')
#self.csg=csg
self.filename=filename
#settings
self.alwaysexplode = True
self.cleanshape = False
def __enter__(self):
return self
def write_header(self):
import FreeCAD
self.csg.write('#generated by FreeCAD %s\n' % \
'.'.join(FreeCAD.Version()[0:3]))
self.csg.write('pload MODELING\n')
def write_displayonly(self,objlst):
self.csg.write('donly %s\n'%' '.join([obj.Name for obj in objlst]))
def saveSweep(self,ob):
import Part
spine,subshapelst=ob.Spine
#process_object(csg,spine,filename)
explodeshape = self.alwaysexplode or self.process_object(spine,True)
if explodeshape:
self.process_object(spine)
if len(subshapelst) and spine.Shape.ShapeType != 'Edge':
#raise NotImplementedError # hit the fallback
# currently all subshapes are edges
self.csg.write('explode %s E\n' % spine.Name )
edgelst = ' '.join(('%s_%s' % (spine.Name,ss[4:]) for ss \
in subshapelst))
spinename = '%s-0-spine' % ob.Name
self.csg.write('wire %s %s\n' %(spinename,edgelst))
elif spine.Shape.ShapeType == 'Wire':
spinename = spine.Name
elif spine.Shape.ShapeType == 'Edge':
spinename = '%s-0-spine' % ob.Name
self.csg.write('wire %s %s\n' %(spinename,spine.Name))
else: # extract only the used subshape
if len(subshapelst):
path=Part.Wire([spine.Shape.getElement(subshapename) for \
subshapename in subshapelst])
elif spine.Shape.ShapeType == 'Edge':
path = spine.Shape
elif spine.Shape.ShapeType == 'Wire':
path = Part.Wire(spine.Shape)
else:
raise ValueError('Unsuitabel Shape Type')
spinename = '%s-0-spine' % ob.Name
saveShape(self.csg,self.filename, path,spinename,None,\
self.cleanshape) # placement with shape
#safePlacement(ob.Placement,ob.Name)
self.csg.write('mksweep %s\n' % spinename)
#setsweep
setoptions=[]
buildoptions=[]
if ob.Frenet:
setoptions.append('-FR')
else:
setoptions.append('-CF')
if ob.Transition == 'Transformed':
buildoptions.append('-M')
elif ob.Transition == 'Right corner':
buildoptions.append('-C')
elif ob.Transition == 'Round corner':
buildoptions.append('-R')
if ob.Solid:
buildoptions.append('-S')
self.csg.write('setsweep %s\n' % (" ".join(setoptions)))
#addsweep
sections=ob.Sections
sectionnames = []
for i,subobj in enumerate(ob.Sections):
#process_object(csg,subobj,filename)
#sectionsnames.append(subobj.Name)
#d1['basename']=subobj.Name
sectionname = '%s-0-section-%02d-%s' % (ob.Name,i,subobj.Name)
addoptions=[]
explodeshape = self.alwaysexplode or \
self.process_object(subobj,True)
if explodeshape:
sh = subobj.Shape
if sh.ShapeType == 'Vertex' or sh.ShapeType == 'Wire' or \
sh.ShapeType == 'Edge' or \
sh.ShapeType == 'Face' and len(sh.Wires) == 1:
self.process_object(subobj)
if sh.ShapeType == 'Wire' or sh.ShapeType == 'Vertex':
#csg.write('tcopy %s %s\n' %(subobj.Name,sectionname))
sectionname = subobj.Name
if sh.ShapeType == 'Edge':
self.csg.write('explode %s E\n' % subobj.Name )
self.csg.write('wire %s %s_1\n' %(sectionname,subobj.Name))
if sh.ShapeType == 'Face':
#we should use outer wire when it becomes avaiable
self.csg.write('explode %s W\n' % subobj.Name )
#csg.write('tcopy %s_1 %s\n' %(subobj.Name,sectionname))
sectionname ='%s_1' % subobj.Name
else:
explodeshape = False
if not explodeshape: # extract only the used subshape
sh = subobj.Shape
if sh.ShapeType == 'Vertex':
pass
elif sh.ShapeType == 'Wire' or sh.ShapeType == 'Edge':
sh = Part.Wire(sh)
elif sh.ShapeType == 'Face':
sh = sh.OuterWire
else:
raise ValueError('Unrecognized Shape Type')
saveShape(self.csg,self.filename,sh,sectionname,None,\
self.cleanshape) # placement with shape
self.csg.write('addsweep %s %s\n' % \
(sectionname," ".join(addoptions)))
self.csg.write('buildsweep %s %s\n' % (ob.Name," ".join(buildoptions)))
def process_object(self,ob,checksupported=False,toplevel=False):
if not checksupported and ob.Name in self.objectcache:
return # object in present
if not checksupported:
self.objectcache.add(ob.Name)
d1 = {'name':ob.Name}
if hasattr(ob,'Placement'):
hasplacement = not ob.Placement.isNull()
else:
hasplacement = False
if ob.TypeId in ["Part::Cut","Part::Fuse","Part::Common",\
"Part::Section"]:
if checksupported: return True # The object is supported
d1.update({'part':ob.Base.Name,'tool':ob.Tool.Name,\
'command':'b%s' % ob.TypeId[6:].lower()})
self.process_object(ob.Base)
self.process_object(ob.Tool)
self.csg.write("%(command)s %(name)s %(part)s %(tool)s\n"%d1)
elif ob.TypeId == "Part::Plane" :
if checksupported: return True # The object is supported
d1.update({'uname':'%s-untrimmed' % d1['name'],\
'length': f2s(ob.Length),'width': f2s(ob.Width)})
self.csg.write("plane %s 0 0 0\n"%d1['uname'])
self.csg.write(\
"mkface %(name)s %(uname)s 0 %(length)s 0 %(width)s\n"%d1)
elif ob.TypeId == "Part::Ellipse" :
if checksupported: return True # The object is supported
d1.update({'uname':'%s-untrimmed'%d1['name'], 'maj':\
f2s(ob.MajorRadius), 'min': f2s(ob.MinorRadius),\
'pf':f2s(ob.Angle0.getValueAs('rad').Value), \
'pl':f2s(ob.Angle1.getValueAs('rad').Value)})
self.csg.write("ellipse %(uname)s 0 0 0 %(maj)s %(min)s\n"%d1)
self.csg.write('mkedge %(name)s %(uname)s %(pf)s %(pl)s\n' % d1)
elif ob.TypeId == "Part::Sphere" :
if checksupported: return True # The object is supported
d1.update({'radius':f2s(ob.Radius),'angle1':f2s(ob.Angle1),\
'angle2':f2s(ob.Angle2),'angle3':f2s(ob.Angle3)})
self.csg.write('psphere %(name)s %(radius)s %(angle1)s %(angle2)s '\
'%(angle3)s\n'%d1)
elif ob.TypeId == "Part::Box" :
if checksupported: return True # The object is supported
d1.update({'dx':f2s(ob.Length),'dy':f2s(ob.Width),'dz':f2s(ob.Height)})
self.csg.write('box %(name)s %(dx)s %(dy)s %(dz)s\n'%d1)
elif ob.TypeId == "Part::Cylinder" :
if checksupported: return True # The object is supported
d1.update({'radius':f2s(ob.Radius),'height':f2s(ob.Height),\
'angle':f2s(ob.Angle)})
self.csg.write('pcylinder %(name)s %(radius)s %(height)s %(angle)s\n'%d1)
elif ob.TypeId == "Part::Cone" :
if checksupported: return True # The object is supported
d1.update({'radius1':f2s(ob.Radius1),'radius2':f2s(ob.Radius2),\
'height':f2s(ob.Height),'angle':f2s(ob.Angle)})
self.csg.write('pcone %(name)s %(radius1)s %(radius2)s %(height)s %(angle)s\n'%d1)
elif ob.TypeId == "Part::Torus" :
if checksupported: return True # The object is supported
d1.update({'radius1':f2s(ob.Radius1),'radius2':f2s(ob.Radius2),\
'angle1': f2s(ob.Angle1),'angle2':f2s(ob.Angle2),\
'angle3': f2s(ob.Angle3)})
self.csg.write('ptorus %(name)s %(radius1)s %(radius2)s %(angle1)s '\
'%(angle2)s %(angle3)s\n' % d1)
elif ob.TypeId == "Part::Mirroring" :
if checksupported: return True # The object is supported
self.process_object(ob.Source)
self.csg.write('tcopy %s %s\n'%(ob.Source.Name,d1['name']))
b=ob.Base
d1['x']=f2s(ob.Base.x)
d1['y']=f2s(ob.Base.y)
d1['z']=f2s(ob.Base.z)
d1['dx']=f2s(ob.Normal.x)
d1['dy']=f2s(ob.Normal.y)
d1['dz']=f2s(ob.Normal.z)
self.csg.write('tmirror %(name)s %(x)s %(y)s %(z)s %(dx)s %(dy)s %(dz)s\n' \
% d1)
elif ob.TypeId == 'Part::Compound':
if len(ob.Links) == 0:
pass
elif len(ob.Links) == 1:
if checksupported:
return self.process_object(ob.Links[0],True)
self.process_object(ob.Links[0])
self.csg.write('tcopy %s %s\n'%(ob.Links[0].Name,d1['name']))
else:
if checksupported: return True # The object is supported
basenames=[]
for i,subobj in enumerate(ob.Links):
self.process_object(subobj)
basenames.append(subobj.Name)
self.csg.write('compound %s %s\n' % (' '.join(basenames),ob.Name))
elif ob.TypeId in ["Part::MultiCommon", "Part::MultiFuse"]:
if len(ob.Shapes) == 0:
pass
elif len(ob.Shapes) == 1:
if checksupported:
return self.process_object(ob.Shapes[0],True)
self.process_object(ob.Shapes[0],)
self.csg.write('tcopy %s %s\n'%(ob.Shapes[0].Name,d1['name']))
else:
if checksupported: return True # The object is supported
topname = ob.Name
command = 'b%s' % ob.TypeId[11:].lower()
lst1=ob.Shapes[:]
current=lst1.pop(0)
curname=current.Name
self.process_object(current)
i=1
while lst1:
if len(lst1) >= 2:
nxtname='to-%s-%03d-t'%(topname,i)
else:
nxtname=topname
nxt=lst1.pop(0)
self.process_object(nxt)
self.csg.write("%s %s %s %s\n"%(command,nxtname,curname,nxt.Name))
curname=nxtname
i+=1
elif (isDraftPolygon(ob) and ob.ChamferSize.Value == 0 and\
ob.FilletRadius.Value == 0 and ob.Support == None) or\
ob.TypeId == "Part::Prism" or \
ob.TypeId == "Part::RegularPolygon":
if checksupported: return True # The object is supported
draftpolygon = isDraftPolygon(ob)
if draftpolygon:
pcount = ob.FacesNumber
if ob.DrawMode =='inscribed':
r=ob.Radius.Value
elif ob.DrawMode =='circumscribed':
import math
r = ob.Radius.Value/math.cos(math.pi/pcount)
else:
raise ValueError
else:
pcount = ob.Polygon
r=ob.Circumradius.Value
justwire = ob.TypeId == "Part::RegularPolygon" or \
(draftpolygon and ob.MakeFace == False)
polyname = '%s-polyline' % d1['name']
if justwire:
wirename = d1['name']
else:
wirename = '%s-polywire' % d1['name']
if ob.TypeId == "Part::Prism":
facename = '%s-polyface' % d1['name']
else:
facename = d1['name']
self.csg.write('polyline %s %s\n' % (polyname,polygonstr(r,pcount)))
self.csg.write('wire %s %s\n' %(wirename,polyname))
if not justwire:
self.csg.write('mkplane %s %s\n' % (facename,polyname))
if ob.TypeId == "Part::Prism":
self.csg.write('prism %s %s 0 0 %s\n' % \
(d1['name'],facename, f2s(ob.Height.Value)))
elif ob.TypeId == "Part::Extrusion" and ob.TaperAngle.Value == 0:
if checksupported: return True # The object is supported
self.process_object(ob.Base)
#Warning does not fully ressemle the functionallity of
#Part::Extrusion
#csg.write('tcopy %s %s\n'%(ob.Base.Name,d1['name']))
facename=ob.Base.Name
self.csg.write('prism %s %s %s %s %s\n' % (d1['name'],facename,\
f2s(ob.Dir.x),f2s(ob.Dir.y),f2s(ob.Dir.z)))
elif ob.TypeId == "Part::Fillet" and True: #disabled
if checksupported: return True # The object is supported
self.process_object(ob.Base)
self.csg.write('explode %s E\n' % ob.Base.Name )
self.csg.write('blend %s %s %s\n' % (d1['name'],ob.Base.Name,\
' '.join(('%s %s'%(f2s(e[1]),'%s_%d' % (ob.Base.Name,e[0])) \
for e in ob.Edges))))
elif ob.TypeId == "Part::Thickness" and not ob.SelfIntersection and \
ob.Mode == 'Skin':
if checksupported: return True # The object is supported
jointype = {'Arc':'a','Intersection':'i','Tangent':'t'} #Join
inter = {False: 'p', True: 'c'} #Intersection
baseobj, facelist = ob.Faces
self.process_object(baseobj)
faces = ' '.join([('%s_%s' %(baseobj.Name,f[4:])) \
for f in facelist])
value = f2s(ob.Value)
self.csg.write('explode %s F\n' % baseobj.Name )
self.csg.write('offsetparameter 1e-7 %s %s\n' % \
(inter[ob.Intersection],jointype[ob.Join]))
self.csg.write('offsetload %s %s %s\n'%(baseobj.Name,value,faces))
self.csg.write('offsetperform %s\n' % d1['name'] )
elif ob.TypeId == "Part::Sweep" and True:
if checksupported: return True # The object is supported
self.saveSweep(ob)
elif ob.TypeId == "Part::Loft":
if checksupported: return True # The object is supported
sectionnames=[]
for i,subobj in enumerate(ob.Sections):
explodeshape = self.alwaysexplode or \
self.process_object(suboobj,True)
if explodeshape and False: #diabled TBD
try:
raise NotImplementedError
sectionname = '%s-%02d-section' % (ob.Name,i)
sh = subobj.Shape
if sh.isNull():
raise ValueError # hit the fallback
tempname=spine.Name
if sh.ShapeType == 'Compound':
sh = sh.childShapes()[0]
self.csg.write('explode %s\n' % tempname )
tempname = '%s_1' % tempname
if sh.ShapeType == 'Face':
#sh = sh.OuterWire #not available
if len(sh.Wires) == 1:
sh=sh.Wires[0]
self.csg.write('explode %s\n W' % tempname )
tempname = '%s_1' % tempname
else:
raise NotImplementedError
elif sh.ShapeType == 'Edge':
self.csg.write('wire %s %s\n' %(sectionname,tempname))
tempname = sectionname
sectionname = tempname
except NotImplementedError:
explodeshape = False # fallback
else:
explodeshape = False # fallback if we hit the False before
if not explodeshape: # extract only the used subshape
sh = subobj.Shape
if not sh.isNull():
if sh.ShapeType == 'Compound':
sh = sh.childShapes()[0]
if sh.ShapeType == 'Face':
sh = sh.OuterWire
elif sh.ShapeType == 'Edge':
import Part
sh = Part.Wire([sh])
elif sh.ShapeType == 'Wire':
import Part
sh = Part.Wire(sh)
elif sh.ShapeType == 'Vertex':
pass
else:
raise ValueError('Unsuitabel Shape Type')
sectionname = '%s-%02d-section' % (ob.Name,i)
saveShape(self.csg,self.filename, sh,sectionname,None,\
self.cleanshape) # placement with shape
sectionnames.append(sectionname)
if ob.Closed:
sectionnames.append(sectionnames[0])
self.csg.write('thrusections %s %d %d %s\n' % \
(ob.Name,int(ob.Solid),\
int(ob.Ruled), ' '.join(sectionnames)))
elif isDeform(ob): #non-uniform scaling
if checksupported: return True # The object is supported
m=ob.Matrix
self.process_object(ob.Base)
#csg.write('tcopy %s %s\n'%(ob.Base.Name,d1['name']))
d1['basename']=ob.Base.Name
d1['cx']=f2s(m.A11)
d1['cy']=f2s(m.A22)
d1['cz']=f2s(m.A33)
self.csg.write('deform %(name)s %(basename)s %(cx)s %(cy)s %(cz)s\n' % d1)
if m.A14 > 1e-8 or m.A24 > 1e-8 or m.A34 > 1e-8:
self.csg.write("ttranslate %s %s %s %s\n" % \
(ob.Name,f2s(m.A14),f2s(m.A24),f2s(m.A34)))
elif isDraftPoint(ob) or ob.TypeId == "Part::Vertex":
if checksupported: return True # The object is supported
d1['x']=f2s(ob.X)
d1['y']=f2s(ob.Y)
d1['z']=f2s(ob.Z)
self.csg.write('vertex %(name)s %(x)s %(y)s %(z)s\n' % d1)
elif isDraftCircle(ob) or ob.TypeId == "Part::Circle" or \
isDraftEllipse(ob):
if checksupported: return True # The object is supported
isdraftcircle=isDraftCircle(ob)
isdraftellipse=isDraftCircle(ob)
"circle name x y [z [dx dy dz]] [ux uy [uz]] radius"
curvename = '%s-curve' % d1['name']
if ob.TypeId == "Part::Circle":
radius=f2s(float(ob.Radius))
pfirst=f2s(ob.Angle0.getValueAs('rad').Value)
plast=f2s(ob.Angle1.getValueAs('rad').Value)
self.csg.write('circle %s 0 0 0 %s\n' % (curvename,radius))
self.csg.write('mkedge %s %s %s %s\n' % \
(d1['name'],curvename,pfirst,plast))
else: #draft
makeface = ob.MakeFace and \
(ob.Shape.isNull() or ob.Shape.ShapeType == 'Face')
#FreeCAD ignores a failed mkplane but it may
#break the model in DRAWEXE
edgename = '%s-edge' % d1['name']
if isdraftcircle:
pfirst=f2s(ob.FirstAngle.getValueAs('rad').Value)
plast=f2s(ob.LastAngle.getValueAs('rad').Value)
radius=f2s(ob.Radius.Value)
self.csg.write('circle %s 0 0 0 %s\n' % (curvename,radius))
else: #draft ellipse
import math
majr=f2s(float(ob.MajorRadius))
minr=f2s(float(ob.MinorRadius))
pfirst=f2s(math.radians(ob.FirstAngle))
plast =f2s(math.radians(ob.LastAngle))
self.csg.write('ellipse %s 0 0 0 %s %s\n' % \
(curvename,majr,minr))
self.csg.write('mkedge %s %s %s %s\n' % \
(edgename,curvename,pfirst,plast))
if makeface:
wirename = '%s-wire' % d1['name']
self.csg.write('wire %s %s\n' %(wirename,edgename))
self.csg.write('mkplane %s %s\n' % (d1['name'],wirename))
else:
self.csg.write('wire %s %s\n' %(d1['name'],edgename))
elif ob.TypeId == "Part::Line":
if checksupported: return True # The object is supported
self.csg.write('polyline %s %s %s %s %s %s %s\n' % \
(d1['name'],f2s(ob.X1),f2s(ob.Y1),f2s(ob.Z1),\
f2s(ob.X2),f2s(ob.Y2),f2s(ob.Z2)))
elif isDraftWire(ob):
if checksupported: return True # The object is supported
points=ob.Points
if ob.Closed:
points.append(points[0])
polyname = '%s-dwireline' % d1['name']
pointstr=' '.join('%s %s %s'%(f2s(v.x),f2s(v.y),f2s(v.z)) \
for v in points)
self.csg.write('polyline %s %s\n' % (polyname,pointstr))
if ob.MakeFace:
wirename = '%s-dwirewire' % d1['name']
self.csg.write('wire %s %s\n' %(wirename,polyname))
facename = d1['name']
self.csg.write('mkplane %s %s\n' % (facename,polyname))
else:
wirename = d1['name']
self.csg.write('wire %s %s\n' %(wirename,polyname))
elif isDraftClone(ob):
if checksupported: return True # The object is supported
x,y,z=ob.Scale.x
if x == y == z: #uniform scaling
d1['scale']=f2s(x)
else:
d1['cx']=f2s(x)
d1['cy']=f2s(y)
d1['cz']=f2s(z)
if len(ob.Objects) == 1:
d1['basename']=ob.Objects[0].Name
self.process_object(ob.Objects[0])
if x == y == z: #uniform scaling
self.csg.write('tcopy %(basename)s %(name)s\n' % d1)
self.csg.write('pscale %(name)s 0 0 0 %(scale)s\n' % d1)
else:
self.csg.write('deform %(name)s %(basename)s'\
' %(cx)s %(cy)s %(cz)s\n' % d1)
else: #compound
newnames=[]
for i,subobj in enumerate(ob.Objects):
self.process_object(subobj)
d1['basename']=subobj.Name
newname='%s-%2d' % (ob.Name,i)
d1['newname']=newname
newnames.append(newname)
if x == y == z: #uniform scaling
self.csg.write('tcopy %(basename)s %(newname)s\n' % d1)
self.csg.write('pscale %(newname)s 0 0 0 %(scale)s\n' % d1)
else:
self.csg.write('deform %(newname)s %(basename)s'\
' %(cx)s %(cy)s %(cz)s\n' % d1)
self.csg.write('compound %s %s\n' % (' '.join(newnames),ob.Name))
#elif ob.isDerivedFrom('Part::FeaturePython') and \
# hasattr(ob.Proxy,'__module__'):
# pass
elif ob.isDerivedFrom('Part::Feature') :
if ob.Shape.isNull(): #would crash in exportBrep otherwise
raise ValueError('Shape of %s is Null' % ob.Name)
if checksupported: return False # The object is not supported
self.csg.write('#saved shape of unsupported %s Object\n' % \
formatobjtype(ob))
hasplacement = saveShape(self.csg,self.filename,ob.Shape,ob.Name,\
hasplacement,self.cleanshape)
elif ob.isDerivedFrom('App::Annotation') :
return False # ignored here
#anntotations needs to be drawn after erase/donly
else: # not derived from Part::Feature
if not toplevel:
raise ValueError('Can not export child object')
else:
if ob.Name != ob.Label:
labelstr = 'Label %s' % ob.Label.encode('unicode-escape')
else:
labelstr = ''
self.csg.write('#omitted unsupported %s Object %s%s\n' %\
(formatobjtype(ob),ob.Name,labelstr))
self.csg.write('#Properties: %s\n' % \
','.join(ob.PropertiesList))
return False
#The object is not present and can not be referenced
if hasplacement:
self.csg.write(placement2draw(ob.Placement,ob.Name))
if ob.Name != ob.Label:
self.csg.write('#Object Label: %s\n' % ob.Label.encode('unicode-escape'))
return ob.Name #The object is present and can be referenced
def export_annotations(self,objlst):
for ob in objlst:
if ob.isDerivedFrom('App::Annotation') :
if ob.Name != ob.Label:
self.csg.write('#Annotation Name %s Label %s"\n' % \
(ob.Name,ob.Label.encode('unicode-escape')))
else:
self.csg.write('#Annotation %s\n' % (ob.Name))
v=ob.Position
self.csg.write('dtext %s %s %s "%s"\n' % \
(f2s(v.x),f2s(v.y),f2s(v.z), '\\n'.join(\
ob.LabelText).encode(\
'ascii', errors='xmlcharrefreplace')))
def export_objects(self,objlst,toplevel=True):
self.write_header()
toplevelobjs = [self.process_object(ob, toplevel=toplevel)\
for ob in objlst]
names = [name for name in toplevelobjs if name is not False]
self.csg.write('donly %s\n'%(' '.join(names)))
self.export_annotations(objlst)
#for ob in objlst:
# self.process_object(ob,toplevel=toplevel)
#self.write_displayonly(objlst)
def __exit__(self,exc_type, exc_val, exc_tb ):
self.csg.close()
def export(exportList,filename):
"called when freecad exports a file"
with Drawexporter(filename) as exporter:
exporter.export_objects(exportList)
|
timthelion/FreeCAD_sf_master
|
src/Mod/Sandbox/exportDRAWEXE.py
|
Python
|
lgpl-2.1
| 34,509
|
from datetime import date, timedelta, datetime
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.contrib.auth.models import User
from django.conf import settings
def next_week():
return datetime.now() + timedelta(days=7)
class JSLibraryGroup(models.Model):
"""
Main library to load - MooTools core, jQuery, Prototype, etc.
"""
name = models.CharField('Name', max_length=100, unique=True)
description = models.TextField(blank=True, null=True)
selected = models.BooleanField(blank=True, default=False)
def __unicode__(self):
return self.name
class Admin:
pass
class JSLibraryWrap(models.Model):
"""
how to wrap the code in specific library
"""
name = models.CharField(max_length=255)
code_start = models.TextField()
code_end = models.TextField()
def __unicode__(self):
return self.name
class Admin:
pass
class Meta:
verbose_name_plural = "JS Library Code Wrappers"
class JSLibrary(models.Model):
"""
Version of the library - Mootools 1.2.4, etc.
"""
library_group = models.ForeignKey(JSLibraryGroup, related_name="libs")
version = models.CharField(max_length=30, null=True, blank=True)
href = models.CharField('URL to the core library file', max_length=255, unique=True)
selected = models.BooleanField(blank=True, default=False)
wrap_d = models.ForeignKey(JSLibraryWrap, related_name='lib_for_domready')
wrap_l = models.ForeignKey(JSLibraryWrap, related_name='lib_for_load')
def __unicode__(self):
return ' '.join((self.library_group.name, self.version))
class Admin:
pass
class Meta:
verbose_name_plural = "JS Library versions"
ordering = ["version"]
class JSDependencyManager(models.Manager):
def get_active(self):
return self.get_query_set().filter(active=True)
class JSDependency(models.Model):
"""
Additional library file - MooTools more, Scriptaculous, etc.
"""
library = models.ForeignKey(JSLibrary)
name = models.CharField(max_length=150)
url = models.CharField('URL to the library file', max_length=255)
description = models.TextField(blank=True, null=True)
selected = models.BooleanField(blank=True, default=False)
ord = models.IntegerField("Order",default=0, blank=True, null=True)
active = models.BooleanField(default=True, blank=True)
objects = JSDependencyManager()
def __unicode__(self):
return self.name
class Admin:
pass
class Meta:
verbose_name_plural = "JS Dependencies"
ordering = ["-ord"]
WRAPCHOICE = (
('', 'none'),
('d', 'onDomready'),
('l', 'onLoad'),
)
class Pastie(models.Model):
"""
default metadata
"""
# slug - created automatically in the pastie_save view
title = models.CharField(max_length=255, null=True, blank=True)
slug = models.CharField(max_length=255, unique=True, blank=True)
# authoring
#author = models.ForeignKey(User, null=True, blank=True)
#private = models.BooleanField(default=False, blank=True)
# filled automatically
created_at = models.DateTimeField(default=datetime.now)
def set_slug(self):
from random import choice
allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'
check_slug = True
# repeat until the slug will be unique
while check_slug:
self.slug = ''.join([choice(allowed_chars) for i in range(settings.MOOSHELL_SLUG_LENGTH)]) #here some random stuff
try:
check_slug = Pastie.objects.get(slug=self.slug)
except:
check_slug = False
def __unicode__(self):
return self.slug
@models.permalink
def get_absolute_url(self):
return ('pastie',[self.slug])
class Admin:
pass
class Meta:
verbose_name_plural = "Pasties"
class Shell(models.Model):
"""
Holds shell data
"""
pastie = models.ForeignKey(Pastie)
version = models.IntegerField(default=0, blank=True)
revision = models.IntegerField(default=0, blank=True, null=True)
# authoring
author = models.ForeignKey(User, null=True, blank=True)
private = models.BooleanField(default=False, blank=True)
# meta
description = models.TextField(null=True, blank=True)
# STATISTICS (a bit)
displayed = models.PositiveIntegerField(default=1, null=True, blank=True)
# is the shell private (do not list in search)
# how long author she should be hold by the system ?
valid_until = models.DateTimeField('Valid until', default=None, null=True, blank=True)
# editors
code_css = models.TextField('CSS', null=True, blank=True)
code_html = models.TextField('HTML', null=True, blank=True)
code_js = models.TextField('Javascript', null=True, blank=True)
# filled automatically
created_at = models.DateTimeField(default=datetime.now)
# is it proposed to be an example
proposed_example = models.BooleanField(default=False, blank=True)
# loaded library
js_lib = models.ForeignKey(JSLibrary)
js_lib_option = models.CharField(max_length=255, null=True, blank=True)
js_dependency = models.ManyToManyField(JSDependency, null=True, blank=True)
js_wrap = models.CharField(max_length=1, choices=WRAPCHOICE, default='d', null=True, blank=True)
body_tag = models.CharField(max_length=255, null=True, blank=True, default="<body>")
def __str__(self):
past = ''
if self.code_js:
past += ': ' + self.code_js[:20]
elif self.code_html:
past += ': ' + self.code_html[:20]
elif self.code_css:
past += ': ' + self.code_css[:20]
return self.pastie.slug + '-' + str(self.version) + past
@models.permalink
def get_absolute_url(self):
if self.author:
args = [self.author.username]
rev = 'author_'
else:
args=[]
rev = ''
if not self.revision or self.revision == 0:
if not self.version or self.version == 0:
rev += 'pastie'
args.append(self.pastie.slug)
else:
rev += 'shell'
args.extend([self.pastie.slug,self.version])
else:
rev += 'revision'
args.extend([self.pastie.slug,self.version,self.revision])
return (rev, args)
@models.permalink
def get_embedded_url(self):
if self.author:
args = [self.author.username]
rev = 'author_'
else:
args=[]
rev = ''
rev += 'embedded'
if not self.revision or self.revision == 0:
if not self.version or self.version == 0:
args.append(self.pastie.slug)
else:
rev += '_with_version'
args.extend([self.pastie.slug,self.version])
else:
rev += '_revision'
args.extend([self.pastie.slug,self.version,self.revision])
return (rev, args)
@models.permalink
def get_show_url(self):
if self.author:
args = [self.author.username]
rev = 'author_'
else:
args=[]
rev = ''
rev += 'pastie_show'
if not self.revision or self.revision == 0:
if not self.version or self.version == 0:
args.append(self.pastie.slug)
else:
rev += '_with_version'
args.extend([self.pastie.slug,self.version])
else:
rev += '_revision'
args.extend([self.pastie.slug,self.version,self.revision])
return (rev, args)
def get_next_version(self):
shell_with_highest_version = Shell.objects.filter(pastie=self.pastie).order_by('-version')[0]
return shell_with_highest_version.version + 1
def set_next_version(self):
self.version = self.get_next_version()
class Meta:
ordering = ["-version", "revision"]
class Admin:
pass
def increase_version_on_save(instance, **kwargs):
if kwargs.get('raw',False): return
if kwargs.get('created'):
# check if any shell exists for the pastie
try:
shells = Shell.objects.select(pastie_id=instance.pastie_id).orderBy('-version')
version = list(shells)[0].version + 1
except:
version = 0
print version
instance.version = version
instance.save()
pre_save.connect(increase_version_on_save, sender=Shell)
class Example(models.Model):
"""
List of examples
"""
name = models.CharField(max_length=255)
shell = models.ForeignKey(Shell, related_name='example', unique=True)
class Meta:
ordering = ["name"]
|
chriswong/mooshell
|
models.py
|
Python
|
mit
| 7,839
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains routines to verify the correctness of UCD strings.
"""
# STDLIB
import re
# LOCAL
from astropy.utils import data
__all__ = ['parse_ucd', 'check_ucd']
class UCDWords:
"""
Manages a list of acceptable UCD words.
Works by reading in a data file exactly as provided by IVOA. This
file resides in data/ucd1p-words.txt.
"""
def __init__(self):
self._primary = set()
self._secondary = set()
self._descriptions = {}
self._capitalization = {}
with data.get_pkg_data_fileobj(
"data/ucd1p-words.txt", encoding='ascii') as fd:
for line in fd.readlines():
type, name, descr = [
x.strip() for x in line.split('|')]
name_lower = name.lower()
if type in 'QPEV':
self._primary.add(name_lower)
if type in 'QSEV':
self._secondary.add(name_lower)
self._descriptions[name_lower] = descr
self._capitalization[name_lower] = name
def is_primary(self, name):
"""
Returns True if *name* is a valid primary name.
"""
return name.lower() in self._primary
def is_secondary(self, name):
"""
Returns True if *name* is a valid secondary name.
"""
return name.lower() in self._secondary
def get_description(self, name):
"""
Returns the official English description of the given UCD
*name*.
"""
return self._descriptions[name.lower()]
def normalize_capitalization(self, name):
"""
Returns the standard capitalization form of the given name.
"""
return self._capitalization[name.lower()]
_ucd_singleton = None
def parse_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Parse the UCD into its component parts.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
parts : list
The result is a list of tuples of the form:
(*namespace*, *word*)
If no namespace was explicitly specified, *namespace* will be
returned as ``'ivoa'`` (i.e., the default namespace).
Raises
------
ValueError
if *ucd* is invalid
"""
global _ucd_singleton
if _ucd_singleton is None:
_ucd_singleton = UCDWords()
if has_colon:
m = re.search(r'[^A-Za-z0-9_.:;\-]', ucd)
else:
m = re.search(r'[^A-Za-z0-9_.;\-]', ucd)
if m is not None:
raise ValueError(f"UCD has invalid character '{m.group(0)}' in '{ucd}'")
word_component_re = r'[A-Za-z0-9][A-Za-z0-9\-_]*'
word_re = fr'{word_component_re}(\.{word_component_re})*'
parts = ucd.split(';')
words = []
for i, word in enumerate(parts):
colon_count = word.count(':')
if colon_count == 1:
ns, word = word.split(':', 1)
if not re.match(word_component_re, ns):
raise ValueError(f"Invalid namespace '{ns}'")
ns = ns.lower()
elif colon_count > 1:
raise ValueError(f"Too many colons in '{word}'")
else:
ns = 'ivoa'
if not re.match(word_re, word):
raise ValueError(f"Invalid word '{word}'")
if ns == 'ivoa' and check_controlled_vocabulary:
if i == 0:
if not _ucd_singleton.is_primary(word):
if _ucd_singleton.is_secondary(word):
raise ValueError(
f"Secondary word '{word}' is not valid as a primary word")
else:
raise ValueError(f"Unknown word '{word}'")
else:
if not _ucd_singleton.is_secondary(word):
if _ucd_singleton.is_primary(word):
raise ValueError(
f"Primary word '{word}' is not valid as a secondary word")
else:
raise ValueError(f"Unknown word '{word}'")
try:
normalized_word = _ucd_singleton.normalize_capitalization(word)
except KeyError:
normalized_word = word
words.append((ns, normalized_word))
return words
def check_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Returns False if *ucd* is not a valid `unified content descriptor`_.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
valid : bool
"""
if ucd is None:
return True
try:
parse_ucd(ucd,
check_controlled_vocabulary=check_controlled_vocabulary,
has_colon=has_colon)
except ValueError:
return False
return True
|
aleksandr-bakanov/astropy
|
astropy/io/votable/ucd.py
|
Python
|
bsd-3-clause
| 5,662
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
cellName = get_program_parameters()
# Store the cell class names in a dictionary.
cellMap = dict()
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_LINE)] = vtk.VTK_LINE
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUADRATIC_EDGE)] = vtk.VTK_QUADRATIC_EDGE
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_CUBIC_LINE)] = vtk.VTK_CUBIC_LINE
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_TRIANGLE)] = vtk.VTK_TRIANGLE
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUADRATIC_TRIANGLE)] = vtk.VTK_QUADRATIC_TRIANGLE
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUAD)] = vtk.VTK_QUAD
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUADRATIC_QUAD)] = vtk.VTK_QUADRATIC_QUAD
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_TETRA)] = vtk.VTK_TETRA
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_HEXAHEDRON)] = vtk.VTK_HEXAHEDRON
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_WEDGE)] = vtk.VTK_WEDGE
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_PYRAMID)] = vtk.VTK_PYRAMID
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUADRATIC_WEDGE)] = vtk.VTK_QUADRATIC_WEDGE
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUADRATIC_PYRAMID)] = vtk.VTK_QUADRATIC_PYRAMID
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUADRATIC_HEXAHEDRON)] = vtk.VTK_QUADRATIC_HEXAHEDRON
cellMap[vtk.vtkCellTypes.GetClassNameFromTypeId(vtk.VTK_QUADRATIC_TETRA)] = vtk.VTK_QUADRATIC_TETRA
if cellName not in cellMap:
print("Cell type ", cellName, " is not supported.")
return
source = vtk.vtkCellTypeSource()
source.SetCellType(cellMap[cellName])
source.Update()
print("Cell: ", cellName)
originalPoints = source.GetOutput().GetPoints()
points = vtk.vtkPoints()
points.SetNumberOfPoints(source.GetOutput().GetNumberOfPoints())
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(5070) # for testing
vtk.vtkMath.RandomSeed(5070) # for testing
for i in range(0, points.GetNumberOfPoints()):
perturbation = [0.0] * 3
for j in range(0, 3):
rng.Next()
perturbation[j] = rng.GetRangeValue(-0.1, 0.1)
currentPoint = [0.0] * 3
originalPoints.GetPoint(i, currentPoint)
points.SetPoint(i, currentPoint[0] + perturbation[0],
currentPoint[1] + perturbation[1],
currentPoint[2] + perturbation[2])
source.GetOutput().SetPoints(points)
numCells = source.GetOutput().GetNumberOfCells()
print("Number of cells: ", numCells)
idArray = vtk.vtkIntArray()
idArray.SetNumberOfTuples(numCells)
for i in range(0, numCells):
idArray.InsertTuple1(i, i + 1)
idArray.SetName("Ids")
source.GetOutput().GetCellData().AddArray(idArray)
source.GetOutput().GetCellData().SetActiveScalars("Ids")
shrink = vtk.vtkShrinkFilter()
shrink.SetInputConnection(source.GetOutputPort())
shrink.SetShrinkFactor(.8)
tessellate = vtk.vtkTessellatorFilter()
tessellate.SetInputConnection(shrink.GetOutputPort())
tessellate.SetMaximumNumberOfSubdivisions(3)
# Create a lookup table to map cell data to colors.
lut = vtk.vtkLookupTable()
colorSeries = vtk.vtkColorSeries()
seriesEnum = colorSeries.BREWER_QUALITATIVE_SET3
colorSeries.SetColorScheme(seriesEnum)
colorSeries.BuildLookupTable(lut, colorSeries.ORDINAL)
# Fill in a few known colors, the rest will be generated if needed.
colors = vtk.vtkNamedColors()
# Create a mapper and actor.
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(source.GetOutputPort())
mapper.SetInputConnection(shrink.GetOutputPort())
mapper.SetScalarRange(0, numCells + 1)
mapper.SetLookupTable(lut)
mapper.SetScalarModeToUseCellData()
mapper.SetResolveCoincidentTopologyToPolygonOffset()
if (source.GetCellType() == vtk.VTK_QUADRATIC_PYRAMID or
source.GetCellType() == vtk.VTK_QUADRATIC_WEDGE):
mapper.SetInputConnection(shrink.GetOutputPort())
else:
mapper.SetInputConnection(tessellate.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().EdgeVisibilityOn()
# actor.GetProperty().SetLineWidth(3)
textProperty = vtk.vtkTextProperty()
textProperty.SetFontSize(20)
textProperty.SetJustificationToCentered()
textProperty.SetColor(colors.GetColor3d("Lamp_Black"))
textMapper = vtk.vtkTextMapper()
textMapper.SetInput(cellName)
textMapper.SetTextProperty(textProperty)
textActor = vtk.vtkActor2D()
textActor.SetMapper(textMapper)
textActor.SetPosition(320, 20)
# Create a renderer, render window, and interactor.
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Cell Type Source")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actors to the scene.
renderer.AddViewProp(textActor)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("Silver"))
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(30)
renderer.GetActiveCamera().Elevation(30)
renderer.ResetCameraClippingRange()
# Render and interact.
renderWindow.SetSize(640, 480)
renderWindow.Render()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'Cell Type Source.'
epilogue = '''
You can supply an optional argument consisting of a vtkCell name e.g: vtkTriangle.
The default is vtkTetra.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('cell_name', nargs='?', const="vtkTetra", default="vtkTetra", type=str, help="The cell name.")
args = parser.parse_args()
return args.cell_name
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/GeometricObjects/CellTypeSource.py
|
Python
|
apache-2.0
| 6,235
|
# coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from itertools import chain
from .internals import ConfigurationSettingsType, json_encode_string
from .decorators import ConfigurationSetting, Option
from .streaming_command import StreamingCommand
from .search_command import SearchCommand
from .validators import Set
class ReportingCommand(SearchCommand):
""" Processes search result records and generates a reporting data structure.
Reporting search commands run as either reduce or map/reduce operations. The reduce part runs on a search head and
is responsible for processing a single chunk of search results to produce the command's reporting data structure.
The map part is called a streaming preop. It feeds the reduce part with partial results and by default runs on the
search head and/or one or more indexers.
You must implement a :meth:`reduce` method as a generator function that iterates over a set of event records and
yields a reporting data structure. You may implement a :meth:`map` method as a generator function that iterates
over a set of event records and yields :class:`dict` or :class:`list(dict)` instances.
ReportingCommand configuration
==============================
Configure the :meth:`map` operation using a Configuration decorator on your :meth:`map` method. Configure it like
you would a :class:`StreamingCommand`. Configure the :meth:`reduce` operation using a Configuration decorator on
your :meth:`ReportingCommand` class.
You can configure your command for operation under Search Command Protocol (SCP) version 1 or 2. SCP 2 requires
Splunk 6.3 or later.
"""
# region Special methods
def __init__(self):
SearchCommand.__init__(self)
# endregion
# region Options
phase = Option(doc='''
**Syntax:** phase=[map|reduce]
**Description:** Identifies the phase of the current map-reduce operation.
''', default='reduce', validate=Set('map', 'reduce'))
# endregion
# region Methods
def map(self, records):
""" Override this method to compute partial results.
:param records:
:type records:
You must override this method, if :code:`requires_preop=True`.
"""
return NotImplemented
def prepare(self):
phase = self.phase
if phase == 'map':
# noinspection PyUnresolvedReferences
self._configuration = self.map.ConfigurationSettings(self)
return
if phase == 'reduce':
streaming_preop = chain((self.name, 'phase="map"', str(self._options)), self.fieldnames)
self._configuration.streaming_preop = ' '.join(streaming_preop)
return
raise RuntimeError('Unrecognized reporting command phase: {}'.format(json_encode_string(unicode(phase))))
def reduce(self, records):
""" Override this method to produce a reporting data structure.
You must override this method.
"""
raise NotImplementedError('reduce(self, records)')
def _execute(self, ifile, process):
SearchCommand._execute(self, ifile, getattr(self, self.phase))
# endregion
# region Types
class ConfigurationSettings(SearchCommand.ConfigurationSettings):
""" Represents the configuration settings for a :code:`ReportingCommand`.
"""
# region SCP v1/v2 Properties
required_fields = ConfigurationSetting(doc='''
List of required fields for this search which back-propagates to the generating search.
Setting this value enables selected fields mode under SCP 2. Under SCP 1 you must also specify
:code:`clear_required_fields=True` to enable selected fields mode. To explicitly select all fields,
specify a value of :const:`['*']`. No error is generated if a specified field is missing.
Default: :const:`None`, which implicitly selects all fields.
Supported by: SCP 1, SCP 2
''')
requires_preop = ConfigurationSetting(doc='''
Indicates whether :meth:`ReportingCommand.map` is required for proper command execution.
If :const:`True`, :meth:`ReportingCommand.map` is guaranteed to be called. If :const:`False`, Splunk
considers it to be an optimization that may be skipped.
Default: :const:`False`
Supported by: SCP 1, SCP 2
''')
streaming_preop = ConfigurationSetting(doc='''
Denotes the requested streaming preop search string.
Computed.
Supported by: SCP 1, SCP 2
''')
# endregion
# region SCP v1 Properties
clear_required_fields = ConfigurationSetting(doc='''
:const:`True`, if required_fields represent the *only* fields required.
If :const:`False`, required_fields are additive to any fields that may be required by subsequent commands.
In most cases, :const:`True` is appropriate for reporting commands.
Default: :const:`True`
Supported by: SCP 1
''')
retainsevents = ConfigurationSetting(readonly=True, value=False, doc='''
Signals that :meth:`ReportingCommand.reduce` transforms _raw events to produce a reporting data structure.
Fixed: :const:`False`
Supported by: SCP 1
''')
streaming = ConfigurationSetting(readonly=True, value=False, doc='''
Signals that :meth:`ReportingCommand.reduce` runs on the search head.
Fixed: :const:`False`
Supported by: SCP 1
''')
# endregion
# region SCP v2 Properties
maxinputs = ConfigurationSetting(doc='''
Specifies the maximum number of events that can be passed to the command for each invocation.
This limit cannot exceed the value of `maxresultrows` in limits.conf_. Under SCP 1 you must specify this
value in commands.conf_.
Default: The value of `maxresultrows`.
Supported by: SCP 2
.. _limits.conf: http://docs.splunk.com/Documentation/Splunk/latest/admin/Limitsconf
''')
run_in_preview = ConfigurationSetting(doc='''
:const:`True`, if this command should be run to generate results for preview; not wait for final output.
This may be important for commands that have side effects (e.g., outputlookup).
Default: :const:`True`
Supported by: SCP 2
''')
type = ConfigurationSetting(readonly=True, value='reporting', doc='''
Command type name.
Fixed: :const:`'reporting'`.
Supported by: SCP 2
''')
# endregion
# region Methods
@classmethod
def fix_up(cls, command):
""" Verifies :code:`command` class structure and configures the :code:`command.map` method.
Verifies that :code:`command` derives from :class:`ReportingCommand` and overrides
:code:`ReportingCommand.reduce`. It then configures :code:`command.reduce`, if an overriding implementation
of :code:`ReportingCommand.reduce` has been provided.
:param command: :code:`ReportingCommand` class
Exceptions:
:code:`TypeError` :code:`command` class is not derived from :code:`ReportingCommand`
:code:`AttributeError` No :code:`ReportingCommand.reduce` override
"""
if not issubclass(command, ReportingCommand):
raise TypeError('{} is not a ReportingCommand'.format( command))
if command.reduce == ReportingCommand.reduce:
raise AttributeError('No ReportingCommand.reduce override')
if command.map == ReportingCommand.map:
cls._requires_preop = False
return
f = vars(command)[b'map'] # Function backing the map method
# EXPLANATION OF PREVIOUS STATEMENT: There is no way to add custom attributes to methods. See [Why does
# setattr fail on a method](http://stackoverflow.com/questions/7891277/why-does-setattr-fail-on-a-bound-method) for a discussion of this issue.
try:
settings = f._settings
except AttributeError:
f.ConfigurationSettings = StreamingCommand.ConfigurationSettings
return
# Create new StreamingCommand.ConfigurationSettings class
module = command.__module__ + b'.' + command.__name__ + b'.map'
name = b'ConfigurationSettings'
bases = (StreamingCommand.ConfigurationSettings,)
f.ConfigurationSettings = ConfigurationSettingsType(module, name, bases)
ConfigurationSetting.fix_up(f.ConfigurationSettings, settings)
del f._settings
pass
# endregion
pass
# endregion
|
Teisei/TaxiRobot
|
lib/splunklib/searchcommands/reporting_command.py
|
Python
|
apache-2.0
| 9,668
|
# -*- coding: UTF-8 -*-
__all__ = ['EMOJI_CARACTER','EMOJI_CARACTER_NEW']
EMOJI_CARACTER_NEW = {}
EMOJI_CARACTERteste = {
':heavy_black_heart:':'<3',
':simple_smile:':':)'
}
EMOJI_CARACTER = {
':soccer_ball:':' ',
':smile:':':)',
':simple_smile:':':)',
':kissing_closed_eyes:':'',
':kissing_heart:':':-*',
':flushed:':'o-o',
':grin:':':-D',
':wink:':';-)',
':stuck_out_tongue_winking_eye:':':-P',
':stuck_out_tongue_closed_eyes:':':-P',
':grinning:':':-D',
':kissing:':':*',
':stuck_out_tongue:':':-P',
':frowning:':':-(',
':open_mouth:':':-O',
':confused:':":'\'",
':hushed:':':-O',
':expressionless:':'-_-',
':disappointed_relieved:':":'-(",
':disappointed:':':-(',
':heavy_black_heart:':'<3',
':persevere:':'>.<',
':cry:':":'(",
':angry:':'>:(',
':sunglasses:':'B-)',
':innocent:':'O:)',
':yellow_heart:':'<3',
':blue_heart:':'<3',
':purple_heart:':'<3',
':heart:':'<3',
':green_heart:':'<3',
':broken_heart:':'</3',
':heartbeat:':'<3',
':heartpulse:':'<3',
':two_hearts:':'<3<3',
':angel:':'O:)',
':smiley_cat:':':3',
':smile_cat:':':3',
':tongue:':':-P'
}
|
beatrizChagas/scrm-solutions
|
extracao/rsoservices/preprocessing_dict.py
|
Python
|
gpl-3.0
| 1,198
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class A(AutotoolsPackage):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
version('2.0', '2.0_a_hash')
variant(
'foo',
values=('bar', 'baz', 'fee'),
default='bar',
description='',
multi=True
)
variant(
'foobar',
values=('bar', 'baz', 'fee'),
default='bar',
description='',
multi=False
)
variant('bvv', default=True, description='The good old BV variant')
depends_on('b', when='foobar=bar')
def with_or_without_fee(self, activated):
if not activated:
return '--no-fee'
return '--fee-all-the-time'
def autoreconf(self, spec, prefix):
pass
def configure(self, spec, prefix):
pass
def build(self, spec, prefix):
pass
def install(self, spec, prefix):
pass
|
skosukhin/spack
|
var/spack/repos/builtin.mock/packages/a/package.py
|
Python
|
lgpl-2.1
| 2,262
|
# Time: O(n)
# Space: O(n)
# Given an unsorted array, find the maximum difference between
#
# the successive elements in its sorted form.
#
# Try to solve it in linear time/space.
#
# Return 0 if the array contains less than 2 elements.
#
# You may assume all elements in the array are non-negative integers
#
# and fit in the 32-bit signed integer range.
# bucket sort
# Time: O(n)
# Space: O(n)
class Solution(object):
def maximumGap(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return 0
# Init bucket.
max_val, min_val = max(nums), min(nums)
gap = max(1, (max_val - min_val) / (len(nums) - 1))
bucket_size = (max_val - min_val) / gap + 1
bucket = [{'min':float("inf"), 'max':float("-inf")} \
for _ in xrange(bucket_size)]
# Find the bucket where the n should be put.
for n in nums:
# min_val / max_val is in the first / last bucket.
if n in (max_val, min_val):
continue
i = (n - min_val) / gap
bucket[i]['min'] = min(bucket[i]['min'], n)
bucket[i]['max'] = max(bucket[i]['max'], n)
# Count each bucket gap between the first and the last bucket.
max_gap, pre_bucket_max = 0, min_val
for i in xrange(bucket_size):
# Skip the bucket it empty.
if bucket[i]['min'] == float("inf") and \
bucket[i]['max'] == float("-inf"):
continue
max_gap = max(max_gap, bucket[i]['min'] - pre_bucket_max)
pre_bucket_max = bucket[i]['max']
# Count the last bucket.
max_gap = max(max_gap, max_val - pre_bucket_max)
return max_gap
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def maximumGap(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return 0
nums.sort()
pre = nums[0]
max_gap = float("-inf")
for i in nums:
max_gap = max(max_gap, i - pre)
pre = i
return max_gap
if __name__ == "__main__":
print Solution().maximumGap([3, 1, 1, 1, 5, 5, 5, 5])
|
yiwen-luo/LeetCode
|
Python/maximum-gap.py
|
Python
|
mit
| 2,326
|
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Feathercoin:0.9.6.2/|/Feathercoin:0.13.(0|1|2|99)/|/Feathercoin:0.16.(0|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
wellenreiter01/Feathercoin
|
contrib/seeds/makeseeds.py
|
Python
|
mit
| 5,457
|
# Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr import app
from jormungandr.street_network.streetnetwork_backend_manager import StreetNetworkBackendManager
from navitiacommon.models.streetnetwork_backend import StreetNetworkBackend
from .tests_mechanism import config, NewDefaultScenarioAbstractTestFixture
from mock import MagicMock
from .journey_common_tests import *
import operator
import datetime
import math
"""
This unit runs all the common tests in journey_common_tests.py along with locals tests added in this
unit for scenario experimental
"""
@config(
{
"scenario": "distributed",
"instance_config": {
"street_network": [
{
"modes": ["bike", "bss", "car", "walking"],
"class": "jormungandr.street_network.tests.MockKraken",
}
]
},
}
)
class TestJourneysDistributedWithMock(JourneyMinBikeMinCar, NewDefaultScenarioAbstractTestFixture):
def test_first_and_last_section_multi_modes(self):
"""Test to verify optimization of direct path calls
"""
# Initialize counter value in the object MockKraken
sn_service = i_manager.instances['main_routing_test'].get_all_street_networks()[0]
sn_service.direct_path_call_count = 0
sn_service.routing_matrix_call_count = 0
query = (
"journeys?from={from_coord}&to={to_coord}&datetime={datetime}&"
"first_section_mode[]=bike&first_section_mode[]=walking&"
"last_section_mode[]=walking&min_nb_journeys=10&last_section_mode[]=bike&debug=true".format(
from_coord=s_coord, to_coord=r_coord, datetime="20120614T075500"
)
)
assert sn_service.direct_path_call_count == 0
assert sn_service.routing_matrix_call_count == 0
response = self.query_region(query)
check_best(response)
# Without optimization (context.partial_response_is_empty = True in distributed._compute_journeys()
# journey count = 18 / direct_path_call_count = 26 / routing_matrix_call_count = 20
# get_directpath_count_by_mode(response, 'walking') == 5
# get_directpath_count_by_mode(response, 'bike') == 5
assert len(response["journeys"]) == 8
assert sn_service.direct_path_call_count == 4
assert sn_service.routing_matrix_call_count == 4
assert get_directpath_count_by_mode(response, 'walking') == 1
assert get_directpath_count_by_mode(response, 'bike') == 1
# This will call jormun so we check our counter before
self.is_valid_journey_response(response, query)
def test_first_and_last_section_multi_modes_no_debug(self):
"""Test to verify optimization of direct path calls
"""
# Initialize counter value in the object MockKraken
sn_service = i_manager.instances['main_routing_test'].get_all_street_networks()[0]
sn_service.direct_path_call_count = 0
sn_service.routing_matrix_call_count = 0
query = (
"journeys?from={from_coord}&to={to_coord}&datetime={datetime}&"
"first_section_mode[]=bike&first_section_mode[]=walking&"
"last_section_mode[]=walking&min_nb_journeys=10&last_section_mode[]=bike".format(
from_coord=s_coord, to_coord=r_coord, datetime="20120614T075500"
)
)
assert sn_service.direct_path_call_count == 0
assert sn_service.routing_matrix_call_count == 0
response = self.query_region(query)
check_best(response)
# Without optimization (context.partial_response_is_empty = True in distributed._compute_journeys()
# journey count = 18 / direct_path_call_count = 26 / routing_matrix_call_count = 20
# get_directpath_count_by_mode(response, 'walking') == 5
# get_directpath_count_by_mode(response, 'bike') == 5
assert len(response["journeys"]) == 6
assert sn_service.direct_path_call_count == 4
assert sn_service.routing_matrix_call_count == 4
assert get_directpath_count_by_mode(response, 'walking') == 1
assert get_directpath_count_by_mode(response, 'bike') == 1
# This will call jormun so we check our counter before
self.is_valid_journey_response(response, query)
@config({'scenario': 'distributed'})
class TestJourneysDistributed(
JourneyCommon,
DirectPath,
JourneyMinBikeMinCar,
NewDefaultScenarioAbstractTestFixture,
JourneysDirectPathMode,
):
"""
Test the experiental scenario
All the tests are defined in "TestJourneys" class, we only change the scenario
NOTE: for the moment we cannot import all routing tests, so we only get 2, but we need to add some more
"""
def test_journey_with_different_fallback_modes(self):
"""
Test when departure/arrival fallback modes are different
"""
query = (
journey_basic_query
+ "&first_section_mode[]=walking&last_section_mode[]=car&debug=true&_car_park_duration=5"
)
response = self.query_region(query)
check_best(response)
jrnys = response['journeys']
assert jrnys
assert jrnys[0]['sections'][0]['mode'] == 'walking'
assert jrnys[0]['sections'][-1]['mode'] == 'car'
context = response['context']
assert 'car_direct_path' in context
assert 'co2_emission' in context['car_direct_path']
assert len(response["terminus"]) == 1
def test_journey_with_limited_nb_crowfly(self):
"""
Test when max_nb_crowfly_by_car=0, we cannot fallback with car..
"""
query = (
journey_basic_query
+ "&first_section_mode[]=walking&last_section_mode[]=car&_max_nb_crowfly_by_car=0"
)
response = self.query_region(query)
check_best(response)
jrnys = response['journeys']
assert len(jrnys) == 1
assert jrnys[0]['sections'][0]['mode'] == 'walking'
assert jrnys[0]['sections'][-1]['mode'] == 'walking'
query = journey_basic_query + "&first_section_mode[]=walking&_max_nb_crowfly_by_walking=0"
response = self.query_region(query)
check_best(response)
jrnys = response['journeys']
assert len(jrnys) == 1
assert 'walking' in jrnys[0]['tags']
assert 'non_pt' in jrnys[0]['tags']
"""
Test when max_nb_crowfly_by_walking=1
"""
query = journey_basic_query + "&first_section_mode[]=walking&_max_nb_crowfly_by_walking=1"
response = self.query_region(query)
check_best(response)
jrnys = response['journeys']
assert len(jrnys) == 2
# we should find at least one pt journey in the response
assert any('non_pt' not in j['tags'] for j in jrnys)
def test_best_filtering(self):
"""
This feature is no longer supported"""
pass
def test_journeys_wheelchair_profile(self):
"""
This feature is no longer supported
"""
pass
def test_not_existent_filtering(self):
"""
This feature is no longer supported
"""
pass
def test_other_filtering(self):
"""
This feature is no longer supported
"""
pass
def test_street_network_routing_matrix(self):
from jormungandr import i_manager
from navitiacommon import response_pb2
instance = i_manager.instances['main_routing_test']
origin = instance.georef.place("stopB", None)
assert origin
destination = instance.georef.place("stopA", None)
assert destination
max_duration = 18000
mode = 'walking'
kwargs = {
"walking": instance.walking_speed,
"bike": instance.bike_speed,
"car": instance.car_speed,
"bss": instance.bss_speed,
"ridesharing": instance.car_no_park_speed,
"taxi": instance.taxi_speed,
}
request = {
"walking_speed": instance.walking_speed,
"bike_speed": instance.bike_speed,
"car_speed": instance.car_speed,
"bss_speed": instance.bss_speed,
"car_no_park_speed": instance.car_no_park_speed,
"taxi_speed": instance.taxi_speed,
"bss_rent_duration": instance.bss_rent_duration,
"bss_rent_penalty": instance.bss_rent_penalty,
"bss_return_duration": instance.bss_return_duration,
"bss_return_penalty": instance.bss_return_penalty,
"_asgard_max_walking_duration_coeff": 1,
"_asgard_max_bike_duration_coeff": 1,
"_asgard_max_bss_duration_coeff": 1,
"_asgard_max_car_duration_coeff": 1,
}
service = instance.get_street_network(mode, request)
request_id = None
resp = service.get_street_network_routing_matrix(
instance, [origin], [destination], mode, max_duration, request, request_id, **kwargs
)
assert len(resp.rows[0].routing_response) == 1
assert resp.rows[0].routing_response[0].duration == 107
assert resp.rows[0].routing_response[0].routing_status == response_pb2.reached
max_duration = 106
resp = service.get_street_network_routing_matrix(
instance, [origin], [destination], mode, max_duration, request, request_id, **kwargs
)
assert len(resp.rows[0].routing_response) == 1
assert resp.rows[0].routing_response[0].duration == 0
assert resp.rows[0].routing_response[0].routing_status == response_pb2.unreached
def test_intersection_objects(self):
# The coordinates of arrival and the stop point are separated by 20m
r = self.query(
'/v1/coverage/main_routing_test/journeys?from=stopA&to=coord%3A8.98311981954709e-05%3A8.98311981954709e-05&datetime=20120614080000&'
)
assert len(r['journeys'][0]['sections']) == 3
# destination of crow_fly section and origin of next pt section should be the same object.
assert r['journeys'][0]['sections'][0]['type'] == 'crow_fly'
assert r['journeys'][0]['sections'][1]['type'] == 'public_transport'
assert r['journeys'][0]['sections'][0]['to'] == r['journeys'][0]['sections'][1]['from']
# destination of pt section and origin of next street_network section should be the same object.
assert r['journeys'][0]['sections'][-1]['type'] == 'street_network'
assert r['journeys'][0]['sections'][1]['to'] == r['journeys'][0]['sections'][-1]['from']
r = self.query(
'/v1/coverage/main_routing_test/journeys?from=coord%3A8.98311981954709e-05%3A8.98311981954709e-05&to=stopA&datetime=20120614080000'
)
assert len(r['journeys'][0]['sections']) == 3
# destination of crow_fly section and origin of next pt section should be the same object.
assert r['journeys'][0]['sections'][0]['type'] == 'street_network'
assert r['journeys'][0]['sections'][1]['type'] == 'public_transport'
assert r['journeys'][0]['sections'][0]['to'] == r['journeys'][0]['sections'][1]['from']
# destination of pt section and origin of next street_network section should be the same object.
assert r['journeys'][0]['sections'][-1]['type'] == 'crow_fly'
assert r['journeys'][0]['sections'][1]['to'] == r['journeys'][0]['sections'][-1]['from']
def test_walking_bss_duplicate_journey(self):
query = (
'/v1/coverage/main_routing_test/journeys?'
'from=0.0000898312;0.0000898312&to=0.00188646;0.00071865&'
'datetime=20120614T080000&'
'first_section_mode[]=walking&first_section_mode[]=bss&'
'last_section_mode[]=walking&last_section_mode[]=bss&'
'bss_speed=1&walking_speed=0.2735'
)
r = self.query(query)
# only one pt in the response
assert sum(int('non_pt' not in j['tags']) for j in r['journeys']) == 1
# The pt_journey is computed by the BSS mode, since there is no bss section, the travel mode should be walking
pt_journey = next((j for j in r['journeys'] if 'non_pt' not in j['tags']), None)
assert pt_journey['sections'][0]['mode'] == 'walking'
assert pt_journey['sections'][-1]['mode'] == 'walking'
assert len(r["terminus"]) == 1
assert r["terminus"][0]["id"] == "stopA"
r = self.query(query + "&debug=true")
# find all pt_journeys
pt_journeys = [j for j in r['journeys'] if 'non_pt' not in j['tags']]
# should be two pt in the response
assert len(pt_journeys) == 2
pt_1 = pt_journeys[0]
pt_2 = pt_journeys[1]
for s1, s2 in zip(pt_1['sections'], pt_2['sections']):
assert s1['type'] == s2['type']
if s1['type'] == 'public_transport':
s1_vj = next(l['id'] for l in s1['links'] if l['type'] == 'vehicle_journey')
s2_vj = next(l['id'] for l in s2['links'] if l['type'] == 'vehicle_journey')
assert s1_vj == s2_vj
# there should be one journey deleted because of duplicate journey
assert sum(int('deleted_because_duplicate_journey' in j['tags']) for j in r['journeys']) == 1
def test_walking_bss_override_mode_journey(self):
query = (
'/v1/coverage/main_routing_test/journeys?'
'from=0.0000898312;0.0000898312&to=0.00188646;0.00071865&'
'datetime=20120614T080000&'
'first_section_mode[]=walking&first_section_mode[]=bss&'
'last_section_mode[]=walking&last_section_mode[]=bss&'
'bss_speed=1&walking_speed=0.2735&debug=true'
)
# for the first request, the walking duration to the stop_point is equal to the bss duration
r = self.query(query)
assert sum(int('non_pt' not in j['tags']) for j in r['journeys']) == 2
debug = "true"
query = (
'/v1/coverage/main_routing_test/journeys?'
'from=0.0000898312;0.0000898312&to=0.00188646;0.00071865&'
'datetime=20120614T080000&'
'first_section_mode[]=walking&first_section_mode[]=bss&'
'last_section_mode[]=walking&last_section_mode[]=bss&'
'bss_speed=1&walking_speed=1&debug={debug}'.format(debug=debug)
)
r = self.query(query)
assert sum(int('non_pt' not in j['tags']) for j in r['journeys']) == 2
debug = "false"
query = (
'/v1/coverage/main_routing_test/journeys?'
'from=0.0000898312;0.0000898312&to=0.00188646;0.00071865&'
'datetime=20120614T080000&'
'first_section_mode[]=walking&first_section_mode[]=bss&'
'last_section_mode[]=walking&last_section_mode[]=bss&'
'bss_speed=1&walking_speed=1&debug={debug}'.format(debug=debug)
)
r = self.query(query)
assert sum(int('non_pt' not in j['tags']) for j in r['journeys']) == 1
def test_duplicate_journey_crow_fly_walking_bss(self):
query_template = (
'/v1/coverage/main_routing_test/journeys?'
'from=0.000898311981954709;0.0008084807837592382&to=0.00898311981954709;0.0001796623963909418&'
'datetime=20120614T080000&'
'first_section_mode[]=walking&first_section_mode[]=bss&'
'last_section_mode[]=walking&last_section_mode[]=bss&'
'bss_speed=1.5&walking_speed=1&_final_line_filter=true&debug=true'
)
r = self.query(query_template.format(debug='false'))
# should be two journeys in the response
assert len(r['journeys']) == 2
# first journey should finish with a crow_fly bss (fallback)
# So it should be also tagged 'to_delete' & 'deleted_because_duplicate_journey'
assert r['journeys'][0]['sections'][-1]['type'] == 'crow_fly'
assert r['journeys'][0]['sections'][-1]['mode'] == 'bss'
assert 'deleted_because_duplicate_journey' in r['journeys'][0]['tags']
assert 'to_delete' in r['journeys'][0]['tags']
# second journey should finish with a crow_fly walking (fallback), so we keep it
assert r['journeys'][1]['sections'][-1]['type'] == 'crow_fly'
assert r['journeys'][1]['sections'][-1]['mode'] == 'walking'
assert 'deleted_because_duplicate_journey' not in r['journeys'][1]['tags']
assert 'to_delete' not in r['journeys'][1]['tags']
def test_direct_path_bss_bike(self):
query = (
journey_basic_query
+ "&direct_path_mode[]=bike"
+ "&direct_path_mode[]=bss"
+ "&max_duration_to_pt=0"
)
response = self.query_region(query)
assert len(response['journeys']) == 2
# we should find two journeys: one is bike and another is bss
assert any('bss' in j['tags'] for j in response['journeys'])
assert any('bike' in j['tags'] for j in response['journeys'])
def test_direct_path_bss_walking(self):
# In case we ask for :
# - direct_path_mode=walking
# - first_section_mode=walking&bss
# - last_section_mode=bss
# We expect to have a walking direct_path journey
query = (
journey_basic_query
+ "&direct_path_mode[]=walking"
+ "&first_section_mode[]=walking&first_section_mode[]=bss"
+ "&last_section_mode[]=bss"
)
response = self.query_region(query)
assert len(response['journeys']) == 2
# we should find at least a walking direct_path journey
assert any('non_pt_walking' in j['tags'] for j in response['journeys'])
assert any('walking' in j['tags'] for j in response['journeys'])
assert len(response['journeys'][1]['sections']) == 1
def test_journey_with_access_points(self):
query = journey_basic_query + "&_access_points=true"
response = self.query_region(query)
assert len(response['journeys']) == 2
pt_journey = next((j for j in response['journeys'] if 'non_pt' not in j['tags']), None)
assert pt_journey
assert len(pt_journey['sections'][0]['vias']) == 1
access_point = pt_journey['sections'][0]['vias'][0]['access_point']
assert access_point["id"] == "access_point:B1"
assert access_point["is_entrance"]
assert not access_point["is_exit"]
assert access_point["traversal_time"] == 2
assert access_point["length"] == 1
path = pt_journey['sections'][0]['path'][-1]
assert path['duration'] == 2
assert path['length'] == 1
assert path['via_uri'] == "access_point:B1"
assert path['instruction'] == "Then Enter stop_point:stopB (Condom) via access_point:B1."
path_sum = sum(p['duration'] for p in pt_journey['sections'][0]['path'])
assert pt_journey['sections'][0]['duration'] == pytest.approx(path_sum, 1.0)
access_point = pt_journey['sections'][2]['vias'][0]['access_point']
assert access_point["id"] == "access_point:A2"
assert not access_point["is_entrance"]
assert access_point["is_exit"]
assert access_point["traversal_time"] == 4
assert access_point["length"] == 3
path = pt_journey['sections'][2]['path'][0]
assert path['duration'] == 4
assert path['length'] == 3
assert path['via_uri'] == "access_point:A2"
assert path['instruction'] == "Exit stop_point:stopA (Condom) via access_point:A2."
path_sum = sum(p['duration'] for p in pt_journey['sections'][2]['path'])
assert pt_journey['sections'][2]['duration'] == pytest.approx(path_sum, 1.0)
@config({"scenario": "distributed"})
class TestDistributedJourneysWithPtref(JourneysWithPtref, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedOnBasicRouting(OnBasicRouting, NewDefaultScenarioAbstractTestFixture):
def test_isochrone(self):
super(TestDistributedOnBasicRouting, self).test_isochrone()
@config({"scenario": "distributed"})
class TestDistributedMinNbJourneys(JourneysMinNbJourneys, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedWithNightBusFilter(JourneysWithNightBusFilter, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedTimeFrameDuration(JourneysTimeFrameDuration, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedJourneyTickets(JourneysTickets, NewDefaultScenarioAbstractTestFixture):
pass
@config({"scenario": "distributed"})
class TestDistributedJourneyTicketsWithDebug(JourneysTicketsWithDebug, NewDefaultScenarioAbstractTestFixture):
pass
def _make_function_distance_over_upper_limit(from_coord, to_coord, mode, op):
def test_ko_crow_fly_longer_than_max_mode_direct_path_distance(self):
query = (
'journeys?'
'from={from_coord}'
'&to={to_coord}'
'&datetime={datetime}'
'&first_section_mode[]={mode}'
'&last_section_mode[]={mode}'
'&max_duration=0'
'&_min_bike=0'
'&_min_car=0'
'&_min_taxi=0'
).format(from_coord=from_coord, to_coord=to_coord, datetime="20120614T080000", mode=mode)
response = self.query_region(query)
assert len(response['journeys']) == 1
assert mode in response['journeys'][0]['tags']
assert 'non_pt' in response['journeys'][0]['tags']
direct_path_distance = response['journeys'][0]['distances'][mode]
# crow_fly is unknown so we divide distance in response by 10
# so we make sure we are under crow_fly distance
query = (query + '&max_{mode}_direct_path_distance={max_dp_distance}').format(
mode=mode, max_dp_distance=int(direct_path_distance / 10)
)
response = self.query_region(query)
# New Default -> 'journeys' in response
# Distributed -> 'journeys' not in response
assert op('journeys' not in response)
return test_ko_crow_fly_longer_than_max_mode_direct_path_distance
@dataset({"main_routing_test": {"scenario": "distributed"}})
class TestDistributedMaxDistanceForDirectPathUpperLimit(NewDefaultScenarioAbstractTestFixture):
"""
Test max_{mode}_direct_path_distance's upper limit
Direct path should be filtered if its crow_fly distance is greater than max_{mode}_direct_path_distance
"""
s = '8.98311981954709e-05;8.98311981954709e-05'
r = '0.0018864551621048887;0.0007186495855637672'
test_max_walking_direct_path_distance = _make_function_distance_over_upper_limit(
s, r, 'walking', operator.truth
)
test_max_car_direct_path_distance = _make_function_distance_over_upper_limit(s, r, 'car', operator.truth)
test_max_bike_direct_path_distance = _make_function_distance_over_upper_limit(s, r, 'bike', operator.truth)
a = '0.001077974378345651;0.0007186495855637672'
b = '8.98311981954709e-05;0.0002694935945864127'
test_max_taxi_direct_path_distance = _make_function_distance_over_upper_limit(a, b, 'taxi', operator.truth)
def _make_function_distance_under_lower_limit(from_coord, to_coord, mode):
def test_ko_crow_fly_smaller_than_max_mode_direct_path_distance(self):
query = (
'journeys?'
'from={from_coord}'
'&to={to_coord}'
'&datetime={datetime}'
'&first_section_mode[]={mode}'
'&last_section_mode[]={mode}'
'&max_duration=0'
'&max_{mode}_direct_path_distance={max_dp_distance}'
'&_min_bike=0'
'&_min_car=0'
'&_min_taxi=0'
).format(
from_coord=from_coord,
to_coord=to_coord,
datetime="20120614T080000",
mode=mode,
max_dp_distance=50000,
)
response = self.query_region(query)
assert len(response['journeys']) == 1
assert mode in response['journeys'][0]['tags']
assert 'non_pt' in response['journeys'][0]['tags']
return test_ko_crow_fly_smaller_than_max_mode_direct_path_distance
@dataset({"main_routing_test": {"scenario": "distributed"}})
class TestDistributedMaxDistanceForDirectPathLowerLimit(NewDefaultScenarioAbstractTestFixture):
"""
Test max_{mode}_direct_path_distance's lower limit
Direct path should be found if its crow_fly distance is lower than max_{mode}_direct_path_duration.
"""
s = '8.98311981954709e-05;8.98311981954709e-05'
r = '0.0018864551621048887;0.0007186495855637672'
test_max_walking_direct_path_distance = _make_function_distance_under_lower_limit(s, r, 'walking')
test_max_car_direct_path_distance = _make_function_distance_under_lower_limit(s, r, 'car')
test_max_bike_direct_path_distance = _make_function_distance_under_lower_limit(s, r, 'bike')
a = '0.001077974378345651;0.0007186495855637672'
b = '8.98311981954709e-05;0.0002694935945864127'
test_max_taxi_direct_path_distance = _make_function_distance_under_lower_limit(a, b, 'taxi')
@dataset({"main_routing_test": {"scenario": "new_default"}})
class TestNewDefaultMaxDistanceForDirectPath(NewDefaultScenarioAbstractTestFixture):
"""
the max_{mode}_direct_path_distance should be deactivated in new_default
"""
s = '8.98311981954709e-05;8.98311981954709e-05'
r = '0.0018864551621048887;0.0007186495855637672'
test_max_walking_direct_path_duration = _make_function_distance_over_upper_limit(
s, r, 'walking', operator.not_
)
test_max_car_direct_path_duration = _make_function_distance_over_upper_limit(s, r, 'car', operator.not_)
test_max_bike_direct_path_duration = _make_function_distance_over_upper_limit(s, r, 'bike', operator.not_)
def _make_function_duration_over_upper_limit(from_coord, to_coord, mode, op):
def test_ko_direct_path_longer_than_max_mode_direct_path_duration(self):
query = (
'journeys?'
'from={from_coord}'
'&to={to_coord}'
'&datetime={datetime}'
'&first_section_mode[]={mode}'
'&last_section_mode[]={mode}'
'&max_duration=0'
'&_min_bike=0'
'&_min_car=0'
'&_min_taxi=0'
).format(from_coord=from_coord, to_coord=to_coord, datetime="20120614T080000", mode=mode)
response = self.query_region(query)
assert len(response['journeys']) == 1
assert mode in response['journeys'][0]['tags']
assert 'non_pt' in response['journeys'][0]['tags']
direct_path_duration = response['journeys'][0]['duration']
query = (query + '&max_{mode}_direct_path_duration={max_dp_duration}').format(
mode=mode, max_dp_duration=direct_path_duration - 1
)
response = self.query_region(query)
# New Default -> 'journeys' in response
# Distributed -> 'journeys' not in response
assert op('journeys' not in response)
return test_ko_direct_path_longer_than_max_mode_direct_path_duration
@dataset({"main_routing_test": {"scenario": "distributed"}})
class TestDistributedMaxDurationForDirectPathUpperLimit(NewDefaultScenarioAbstractTestFixture):
"""
Test max_{mode}_direct_path_duration's upper limit
Direct path should be filtered if its duration is greater than max_{mode}_direct_path_duration
"""
s = '8.98311981954709e-05;8.98311981954709e-05'
r = '0.0018864551621048887;0.0007186495855637672'
test_max_walking_direct_path_duration = _make_function_duration_over_upper_limit(
s, r, 'walking', operator.truth
)
test_max_car_direct_path_duration = _make_function_duration_over_upper_limit(s, r, 'car', operator.truth)
test_max_bss_direct_path_duration = _make_function_duration_over_upper_limit(s, r, 'bss', operator.truth)
test_max_bike_direct_path_duration = _make_function_duration_over_upper_limit(s, r, 'bike', operator.truth)
a = '0.001077974378345651;0.0007186495855637672'
b = '8.98311981954709e-05;0.0002694935945864127'
test_max_taxi_direct_path_duration = _make_function_duration_over_upper_limit(a, b, 'taxi', operator.truth)
def _make_function_duration_under_upper_limit(from_coord, to_coord, mode):
def test_get_direct_path_smaller_than_max_mode_direct_path_duration(self):
query = (
'journeys?'
'from={from_coord}'
'&to={to_coord}'
'&datetime={datetime}'
'&first_section_mode[]={mode}'
'&last_section_mode[]={mode}'
'&max_duration=0'
'&{mode}_speed=1'
'&max_{mode}_direct_path_duration={max_dp_duration}'
'&_min_bike=0'
'&_min_car=0'
'&_min_taxi=0'
).format(
from_coord=from_coord, to_coord=to_coord, datetime="20120614T080000", mode=mode, max_dp_duration=3600
)
response = self.query_region(query)
assert len(response['journeys']) == 1
assert mode in response['journeys'][0]['tags']
assert 'non_pt' in response['journeys'][0]['tags']
return test_get_direct_path_smaller_than_max_mode_direct_path_duration
@dataset({"main_routing_test": {"scenario": "distributed"}})
class TestDistributedMaxDurationForDirectPathLowerLimit(NewDefaultScenarioAbstractTestFixture):
"""
Test max_{mode}_direct_path_duration's lower limit
Direct path should be found if its duration is lower than max_{mode}_direct_path_duration.
Especially, when the direct path's duration is large and the street network calculator is Kraken
"""
s = '8.98311981954709e-05;8.98311981954709e-05'
r = '0.0018864551621048887;0.0007186495855637672'
test_max_walking_direct_path_duration = _make_function_duration_under_upper_limit(s, r, 'walking')
test_max_car_direct_path_duration = _make_function_duration_under_upper_limit(s, r, 'car')
test_max_bss_direct_path_duration = _make_function_duration_under_upper_limit(s, r, 'bss')
test_max_bike_direct_path_duration = _make_function_duration_under_upper_limit(s, r, 'bike')
a = '0.001077974378345651;0.0007186495855637672'
b = '8.98311981954709e-05;0.0002694935945864127'
test_max_taxi_direct_path_duration = _make_function_duration_under_upper_limit(a, b, 'taxi')
@dataset({"main_routing_test": {"scenario": "new_default"}})
class TestNewDefaultMaxDurationForDirectPath(NewDefaultScenarioAbstractTestFixture):
"""
the max_{mode}_direct_path_duration should be deactivated in new_default
"""
s = '8.98311981954709e-05;8.98311981954709e-05'
r = '0.0018864551621048887;0.0007186495855637672'
test_max_walking_direct_path_duration = _make_function_duration_over_upper_limit(
s, r, 'walking', operator.not_
)
test_max_car_direct_path_duration = _make_function_duration_over_upper_limit(s, r, 'car', operator.not_)
test_max_bss_direct_path_duration = _make_function_duration_over_upper_limit(s, r, 'bss', operator.not_)
test_max_bike_direct_path_duration = _make_function_duration_over_upper_limit(s, r, 'bike', operator.not_)
@config(
{
"scenario": "distributed",
'instance_config': {
"ridesharing": [
{
"class": "jormungandr.scenarios.ridesharing.instant_system.InstantSystem",
"args": {
"service_url": "http://distributed_ridesharing.wtf",
"api_key": "key",
"network": "Super Covoit 3000",
"rating_scale_min": 0,
"rating_scale_max": 5,
},
}
]
},
}
)
class TestJourneysRidesharingDistributed(
JourneysRidesharing, JourneyCommon, DirectPath, JourneyMinBikeMinCar, NewDefaultScenarioAbstractTestFixture
):
def test_best_filtering(self):
"""
This feature is not supported
"""
pass
def test_journeys_wheelchair_profile(self):
"""
This feature is not supported
"""
pass
def test_not_existent_filtering(self):
"""
This feature is not supported
"""
pass
def test_other_filtering(self):
"""
This feature is not supported
"""
pass
@dataset({"main_routing_test": {"scenario": "distributed"}})
class TestTaxiDistributed(NewDefaultScenarioAbstractTestFixture):
def test_first_section_mode_taxi(self):
query = sub_query + "&datetime=20120614T075000" + "&first_section_mode[]=taxi" + "&debug=true"
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 1
taxi_direct = journeys[0]
assert taxi_direct.get('departure_date_time') == '20120614T075000'
assert taxi_direct.get('arrival_date_time') == '20120614T075027'
assert taxi_direct.get('duration') == 27
assert taxi_direct.get('durations', {}).get("car") == 0
assert taxi_direct.get('durations', {}).get("taxi") == 27
assert taxi_direct.get('durations', {}).get("total") == 27
assert taxi_direct.get('distances', {}).get("car") == 0
assert taxi_direct.get('distances', {}).get("taxi") == 304
sections = taxi_direct.get('sections')
assert len(sections) == 1
assert sections[0].get('mode') == 'taxi'
assert sections[0].get('departure_date_time') == '20120614T075000'
assert sections[0].get('arrival_date_time') == '20120614T075027'
assert sections[0].get('duration') == 27
assert sections[0].get('type') == 'street_network'
query += "&taxi_speed=0.15"
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 2
taxi_direct = next((j for j in journeys if 'non_pt' in j['tags']), None)
assert taxi_direct
assert taxi_direct.get('departure_date_time') == '20120614T075000'
assert taxi_direct.get('arrival_date_time') == '20120614T082349'
assert taxi_direct.get('duration') == 2029
assert taxi_direct.get('durations', {}).get("car") == 0
assert taxi_direct.get('durations', {}).get("taxi") == 2029
assert taxi_direct.get('durations', {}).get("total") == 2029
assert taxi_direct.get('distances', {}).get("car") == 0
assert taxi_direct.get('distances', {}).get("taxi") == 304
sections = taxi_direct.get('sections')
assert len(sections) == 1
assert sections[0].get('mode') == 'taxi'
assert sections[0].get('departure_date_time') == '20120614T075000'
assert sections[0].get('arrival_date_time') == '20120614T082349'
assert sections[0].get('duration') == 2029
assert sections[0].get('type') == 'street_network'
taxi_fallback = next((j for j in journeys if 'non_pt' not in j['tags']), None)
assert taxi_fallback
assert taxi_fallback.get('departure_date_time') == '20120614T075355'
assert taxi_fallback.get('arrival_date_time') == '20120614T080222'
assert taxi_fallback.get('durations', {}).get('taxi') == 125
assert taxi_fallback.get('durations', {}).get('walking') == 80
assert taxi_fallback.get('distances', {}).get('taxi') == 18
assert taxi_fallback.get('distances', {}).get('walking') == 89
sections = taxi_fallback.get('sections')
assert len(sections) == 4
assert sections[0].get('mode') == 'taxi'
assert sections[0].get('departure_date_time') == '20120614T075355'
assert sections[0].get('arrival_date_time') == '20120614T075600'
assert sections[0].get('duration') == 125
assert sections[0].get('type') == 'street_network'
assert sections[1].get('departure_date_time') == '20120614T075600'
assert sections[1].get('arrival_date_time') == '20120614T080100'
assert sections[1].get('duration') == 300
assert sections[1].get('type') == 'waiting'
assert sections[2].get('departure_date_time') == '20120614T080100'
assert sections[2].get('arrival_date_time') == '20120614T080102'
assert sections[2].get('duration') == 2
assert sections[2].get('type') == 'public_transport'
assert sections[3].get('mode') == 'walking'
assert sections[3].get('departure_date_time') == '20120614T080102'
assert sections[3].get('arrival_date_time') == '20120614T080222'
assert sections[3].get('duration') == 80
assert sections[3].get('type') == 'street_network'
query += "&max_duration=0"
response = self.query_region(query)
# the pt journey is eliminated
self.is_valid_journey_response(response, query)
assert len(response['journeys']) == 1
def test_last_section_mode_taxi(self):
query = journey_basic_query + "&walking_speed=0.5" + "&last_section_mode[]=taxi&_min_taxi=0"
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 2
taxi_fallback = next((j for j in journeys if 'non_pt' not in j['tags']), None)
assert taxi_fallback
assert taxi_fallback.get('departure_date_time') == '20120614T080021'
assert taxi_fallback.get('arrival_date_time') == '20120614T080610'
assert taxi_fallback.get('durations', {}).get('taxi') == 8
assert taxi_fallback.get('durations', {}).get('walking') == 39
assert taxi_fallback.get('durations', {}).get('total') == 349
assert taxi_fallback.get('distances', {}).get('taxi') == 88
assert taxi_fallback.get('distances', {}).get('walking') == 19
sections = taxi_fallback.get('sections')
assert len(sections) == 4
assert sections[0].get('mode') == 'walking'
assert sections[0].get('departure_date_time') == '20120614T080021'
assert sections[0].get('arrival_date_time') == '20120614T080100'
assert sections[0].get('duration') == 39
assert sections[0].get('type') == 'street_network'
assert sections[1].get('departure_date_time') == '20120614T080100'
assert sections[1].get('arrival_date_time') == '20120614T080102'
assert sections[1].get('duration') == 2
assert sections[1].get('type') == 'public_transport'
assert sections[2].get('departure_date_time') == '20120614T080102'
assert sections[2].get('arrival_date_time') == '20120614T080602'
assert sections[2].get('duration') == 300
assert sections[2].get('type') == 'waiting'
assert sections[3].get('mode') == 'taxi'
assert sections[3].get('departure_date_time') == '20120614T080602'
assert sections[3].get('arrival_date_time') == '20120614T080610'
assert sections[3].get('duration') == 8
assert sections[3].get('type') == 'street_network'
def test_min_taxi(self):
query = (
sub_query
+ "&datetime=20120614T075000"
+ "&first_section_mode[]=taxi"
+ "&first_section_mode[]=walking"
+ "&debug=true"
)
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
# the taxi direct_path because it's duration in less than min_taxi default
taxi_direct = journeys[0]
assert 'deleted_because_too_short_heavy_mode_fallback' in taxi_direct['tags']
query = (
sub_query
+ "&datetime=20120614T075000"
+ "&first_section_mode[]=taxi"
+ "&first_section_mode[]=walking"
+ "&_min_taxi=652"
+ "&debug=true"
+ "&taxi_speed=0.15"
)
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
taxi_fallback = next((j for j in response['journeys'] if "taxi" in j['tags']), None)
assert 'deleted_because_too_short_heavy_mode_fallback' in taxi_fallback['tags']
def test_max_taxi_duration_to_pt(self):
# we begin with a normal request to get the fallback duration in taxi
query = sub_query + "&datetime=20120614T075000" + "&first_section_mode[]=taxi" + "&taxi_speed=0.05"
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 2
# find the pt journey with taxi as it fallback mode
taxi_with_pt = next((j for j in journeys if 'non_pt_taxi' not in j['tags']), None)
assert taxi_with_pt
# get the fallback duration (it's the addition of wait time and taxi journey's duration)
taxi_fallback_time = taxi_with_pt['sections'][0]['duration'] + taxi_with_pt['sections'][1]['duration']
query = (
sub_query
+ "&datetime=20120614T075000"
+ "&first_section_mode[]=taxi"
+ "&taxi_speed=0.05"
# Now we set the max_taxi_duration_to_pt
+ "&max_taxi_duration_to_pt={}".format(taxi_fallback_time - 1)
)
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 1
# the pt journey is gone....
assert 'non_pt_taxi' in journeys[0]['tags']
def test_additional_time(self):
# we begin with a normal request to get the fallback duration in taxi
first_additional_time = 42
last_additional_time = 20
query = (
sub_query
+ "&datetime=20120614T075000"
+ "&first_section_mode[]=taxi"
+ "&last_section_mode[]=taxi"
+ "&taxi_speed=0.05"
+ "&additional_time_after_first_section_taxi={}".format(first_additional_time)
+ "&additional_time_before_last_section_taxi={}".format(last_additional_time)
)
response = self.query_region(query)
self.is_valid_journey_response(response, query)
pt_with_taxi = next((j for j in response['journeys'] if 'non_pt' not in j['tags']), None)
assert len(pt_with_taxi['sections']) == 5
assert pt_with_taxi['sections'][1]['type'] == 'waiting'
assert pt_with_taxi['sections'][1]['duration'] == first_additional_time
assert pt_with_taxi['sections'][3]['type'] == 'waiting'
assert pt_with_taxi['sections'][3]['duration'] == last_additional_time
@dataset({'main_routing_test': {"scenario": "distributed"}, 'min_nb_journeys_test': {"scenario": "distributed"}})
class TestKrakenDistributedWithDatabase(NewDefaultScenarioAbstractTestFixture):
def setUp(self):
self.old_db_val = app.config['DISABLE_DATABASE']
app.config['DISABLE_DATABASE'] = False
def tearDown(self):
app.config['DISABLE_DATABASE'] = self.old_db_val
def _call_and_check_journeys_on_coverage(self, coverage, query_from, query_to, datetime):
query = 'v1/coverage/{coverage}/journeys?from={query_from}&to={query_to}&datetime={datetime}&debug=true'.format(
coverage=coverage, query_from=query_from, query_to=query_to, datetime=datetime
)
response = self.query(query)
self.is_valid_journey_response(response, query)
assert response['debug']['regions_called'][0]['name'] == coverage
def sn_backends_getter(self):
kraken = StreetNetworkBackend(id='kraken')
kraken.klass = "jormungandr.street_network.tests.MockKraken"
kraken.args = {'timeout': 10}
kraken.created_at = datetime.datetime.utcnow()
return [kraken]
def test_call_with_two_krakens(self):
"""
Checks that in distributed mode with streetnetwork_backends in database
There is no error when multiples krakens are up and we call one of them after an other
"""
manager = StreetNetworkBackendManager(self.sn_backends_getter)
manager._can_connect_to_database = MagicMock(return_value=True)
i_manager.instances["main_routing_test"]._streetnetwork_backend_manager = manager
i_manager.instances["min_nb_journeys_test"]._streetnetwork_backend_manager = manager
self._call_and_check_journeys_on_coverage("main_routing_test", "stopA", "stopB", "20120614T080000")
self._call_and_check_journeys_on_coverage(
"min_nb_journeys_test", "stop_point:sa1:s1", "stop_point:sa3:s1", "20180309T080000"
)
self._call_and_check_journeys_on_coverage("main_routing_test", "stopB", "stopC", "20120614T080000")
@dataset({"main_routing_test": {"scenario": "distributed"}})
class TestCarNoParkDistributed(NewDefaultScenarioAbstractTestFixture):
def test_max_car_no_park_duration_to_pt(self):
# we begin with a normal request to get the fallback duration in taxi
query = (
sub_query
+ "&datetime=20120614T075000"
+ "&first_section_mode[]=car_no_park"
+ "&car_no_park_speed=0.1"
)
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 2
# find the pt journey with car_no_pt as it fallback mode
car_no_park_direct_path = next((j for j in journeys if 'non_pt' in j['tags']), None)
assert car_no_park_direct_path
assert "car_no_park" in car_no_park_direct_path['tags']
# find the pt journey with car_no_pt as it fallback mode
car_no_park_with_pt = next((j for j in journeys if 'non_pt' not in j['tags']), None)
assert car_no_park_with_pt
# get the fallback duration (it's the addition of wait time and taxi journey's duration)
car_no_park_with_pt_fallback_time = car_no_park_with_pt['sections'][0]['duration']
query = (
sub_query
+ "&datetime=20120614T075000"
+ "&first_section_mode[]=car_no_park"
+ "&car_no_park_speed=0.1"
# Now we set the max_taxi_duration_to_pt
+ "&max_car_no_park_duration_to_pt={}".format(car_no_park_with_pt_fallback_time - 1)
)
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 1
# the pt journey is gone....
assert 'non_pt_car_no_park' in journeys[0]['tags']
@dataset({"main_routing_test": {"scenario": "distributed"}})
class TestCarDistributed(NewDefaultScenarioAbstractTestFixture):
def test_stop_points_nearby_duration_park_n_ride(self):
# we begin with a normal request to get the fallback duration in taxi
query = (
sub_query
+ "&datetime=20120614T080000"
+ "&_min_car=0"
+ "&first_section_mode[]=car"
+ "&car_speed=1"
)
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 2
# find the pt journey with taxi as it fallback mode
car_with_pt = next((j for j in journeys if 'non_pt' not in j['tags']), None)
assert car_with_pt
# get the fallback duration (it's the addition of wait time and taxi journey's duration)
park_to_stop_point_duration = car_with_pt['sections'][2]['duration']
query = (
sub_query
+ "&datetime=20120614T080000"
+ "&first_section_mode[]=car"
+ "&car_speed=1"
+ "&_min_car=0"
# now we set _stop_points_nearby_duration
+ "&_stop_points_nearby_duration={}".format(int(park_to_stop_point_duration / math.sqrt(2)) - 1)
)
response = self.query_region(query)
check_best(response)
self.is_valid_journey_response(response, query)
journeys = get_not_null(response, 'journeys')
assert len(journeys) == 1
# the pt journey is gone....
assert 'non_pt' in journeys[0]['tags']
@config({"scenario": "distributed"})
class TesDistributedJourneyNoCoverageParams(NoCoverageParams, NewDefaultScenarioAbstractTestFixture):
pass
@dataset({"routing_with_transfer_test": {"scenario": "distributed"}})
class TestRoutingWithTransfer(NewDefaultScenarioAbstractTestFixture):
def test_complete_transfer_path_bus_coach(self):
"""
We first query without requesting walking _transfer_path and then with _transfer_path
With _transfer_path enabled we expect in transfer section :
- a path
- detailed geojson instead of a simple line (crow_fly)
- same duration as with _transfer_path=false
"""
query = (
'/v1/coverage/routing_with_transfer_test/journeys?'
'from={}&to={}&'
'datetime=20120614T100000&_override_scenario=distributed'
).format("stopF", "stopA")
response = self.query(query)
assert 'journeys' in response
journeys = response['journeys']
assert len(journeys) == 1
assert len(journeys[0]['sections']) == 6
assert journeys[0]['sections'][1]['display_informations']['physical_mode'] == 'Bus'
assert journeys[0]['sections'][2]['type'] == 'transfer'
assert journeys[0]['sections'][2]['transfer_type'] == 'walking'
assert journeys[0]['sections'][3]['type'] == 'waiting'
assert journeys[0]['sections'][4]['display_informations']['physical_mode'] == 'Autocar'
assert 'path' not in journeys[0]['sections'][2]
assert 'geojson' in journeys[0]['sections'][2]
assert 'coordinates' in journeys[0]['sections'][2]['geojson']
assert len(journeys[0]['sections'][2]['geojson']['coordinates']) == 2
query = (
'/v1/coverage/routing_with_transfer_test/journeys?'
'from={}&to={}&_transfer_path=true&'
'datetime=20120614T100000&_override_scenario=distributed'
).format("stopF", "stopA")
response = self.query(query)
assert 'journeys' in response
journeys = response['journeys']
assert len(journeys) == 1
assert len(journeys[0]['sections']) == 6
assert journeys[0]['sections'][1]['display_informations']['physical_mode'] == 'Bus'
assert journeys[0]['sections'][2]['type'] == 'transfer'
assert journeys[0]['sections'][2]['transfer_type'] == 'walking'
assert journeys[0]['sections'][3]['type'] == 'waiting'
assert journeys[0]['sections'][4]['display_informations']['physical_mode'] == 'Autocar'
assert 'path' in journeys[0]['sections'][2]
assert 'path' in journeys[0]['sections'][2]
assert len(journeys[0]['sections'][2]['path']) == 3
assert journeys[0]['sections'][2]['path'][0]['name'] == 'rue de'
assert journeys[0]['sections'][2]['path'][1]['name'] == 'rue cd'
assert journeys[0]['sections'][2]['path'][2]['name'] == 'rue bc'
assert 'geojson' in journeys[0]['sections'][2]
assert 'coordinates' in journeys[0]['sections'][2]['geojson']
assert len(journeys[0]['sections'][2]['geojson']['coordinates']) == 4
def test_complete_transfer_path_metro_coach(self):
"""
We first query without requesting walking _transfer_path and then with _transfer_path
In this case : transfer tramway <-> Metro, we expect to receive the same response
"""
query = (
'/v1/coverage/routing_with_transfer_test/journeys?'
'from={}&to={}&forbidden_uris[]=physical_mode:Coach&'
'datetime=20120614T100000&_override_scenario=distributed'
).format("stopF", "stopA")
response = self.query(query)
assert 'journeys' in response
journeys = response['journeys']
assert len(journeys) == 1
assert len(journeys[0]['sections']) == 6
assert journeys[0]['sections'][1]['display_informations']['physical_mode'] == 'Bus'
assert journeys[0]['sections'][2]['type'] == 'transfer'
assert journeys[0]['sections'][2]['transfer_type'] == 'walking'
assert journeys[0]['sections'][3]['type'] == 'waiting'
assert journeys[0]['sections'][4]['display_informations']['physical_mode'] == 'Metro'
assert 'path' not in journeys[0]['sections'][2]
assert 'geojson' in journeys[0]['sections'][2]
assert 'coordinates' in journeys[0]['sections'][2]['geojson']
assert len(journeys[0]['sections'][2]['geojson']['coordinates']) == 2
query = (
'/v1/coverage/routing_with_transfer_test/journeys?'
'from={}&to={}&_transfer_path=true&forbidden_uris[]=physical_mode:Coach&'
'datetime=20120614T100000&_override_scenario=distributed'
).format("stopF", "stopA")
response = self.query(query)
assert 'journeys' in response
journeys = response['journeys']
assert len(journeys) == 1
assert len(journeys[0]['sections']) == 6
assert journeys[0]['sections'][1]['display_informations']['physical_mode'] == 'Bus'
assert journeys[0]['sections'][2]['type'] == 'transfer'
assert journeys[0]['sections'][2]['transfer_type'] == 'walking'
assert journeys[0]['sections'][3]['type'] == 'waiting'
assert journeys[0]['sections'][4]['display_informations']['physical_mode'] == 'Metro'
assert 'path' not in journeys[0]['sections'][2]
assert 'geojson' in journeys[0]['sections'][2]
assert 'coordinates' in journeys[0]['sections'][2]['geojson']
assert len(journeys[0]['sections'][2]['geojson']['coordinates']) == 2
|
CanalTP/navitia
|
source/jormungandr/tests/routing_tests_experimental.py
|
Python
|
agpl-3.0
| 55,937
|
from setuptools import setup
setup(name='scitools',
version='0.1',
description='The funniest joke in the world',
url='http://github.com/storborg/funniest',
author='Flying Circus',
author_email='flyingcircus@example.com',
license='MIT',
packages=['scitools'],
zip_safe=False)
|
binghongcha08/pyQMD
|
setup.py
|
Python
|
gpl-3.0
| 324
|
from __future__ import unicode_literals
__author__ = "Pymatgen Development Team"
__email__ ="pymatgen@googlegroups.com"
__maintainer__ = "Shyue Ping Ong"
__maintainer_email__ ="shyuep@gmail.com"
__date__ = "Nov 19 2015"
__version__ = "3.2.7"
# Useful aliases for commonly used objects and modules.
# Allows from pymatgen import X for quick usage.
from .core import *
from .serializers.json_coders import pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .io.smart import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
from monty.json import MontyEncoder, MontyDecoder, MSONable
|
migueldiascosta/pymatgen
|
pymatgen/__init__.py
|
Python
|
mit
| 650
|
from flask import Blueprint
from werkzeug.exceptions import NotFound
from skylines.api.schemas import ClubSchema
from skylines.model import Club
from skylines.api.views.json import jsonify
clubs_blueprint = Blueprint('clubs', 'skylines')
club_schema = ClubSchema()
@clubs_blueprint.route('/clubs/<int:club_id>')
def read(club_id):
club = Club.get(club_id)
if club is None:
raise NotFound()
result = club_schema.dump(club)
return jsonify(result.data)
|
kerel-fs/skylines
|
skylines/api/views/clubs.py
|
Python
|
agpl-3.0
| 479
|
import pytest
class TestStrace:
@pytest.mark.complete("strace -", require_cmd=True)
def test_1(self, completion):
assert completion
|
scop/bash-completion
|
test/t/test_strace.py
|
Python
|
gpl-2.0
| 150
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dannysite_web.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
manyunkai/dannysite4
|
src/manage.py
|
Python
|
apache-2.0
| 256
|
from plex.interfaces.core.base import Interface
class PluginInterface(Interface):
path = ':/plugins'
def reload_services(self, plugin_id):
response = self.http.get(plugin_id, 'services/reload')
return response.status_code == 200
def restart(self, plugin_id):
response = self.http.get(plugin_id, 'restart')
return response.status_code == 200
|
fuzeman/plex.py
|
plex/interfaces/plugin/__init__.py
|
Python
|
mit
| 389
|
# -*- coding: utf-8 -*-
# @author: vuolter
from __future__ import absolute_import, unicode_literals
import os
import re
from future import standard_library
from pyload.utils import convert, purge, web
from pyload.utils.convert import to_str
from pyload.utils.layer.legacy import hashlib
from pyload.utils.time import seconds_to_midnight
standard_library.install_aliases()
_RE_ALIAS = re.compile(r'[\d.-_]+')
def alias(text):
chunks = _RE_ALIAS.split(purge.name(text))
return ''.join(word.capitalize() for word in chunks if word)
_BOOLEANMAP = {
'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def boolean(text):
return _BOOLEANMAP.get(text.strip().lower())
def entries(text, allow_whitespaces=False):
chars = ';,|'
if not allow_whitespaces:
chars += r'\s'
pattr = r'[{0}]+'.format(chars)
return [entry for entry in re.split(pattr, text) if entry]
def hash(text):
text = text.replace('-', '').lower()
algop = '|'.join(hashlib.algorithms + ('adler32', 'crc(32)?'))
pattr = r'(?P<D1>{}|)\s*[:=]?\s*(?P<H>[\w^_]{8,}?)\s*[:=]?\s*(?P<D2>{}|)'
pattr = pattr.format(algop, algop)
m = re.search(pattr, text)
if m is None:
return None, None
checksum = m.group('H')
algorithm = m.group('D1') or m.group('D2')
if algorithm == 'crc':
algorithm = 'crc32'
return checksum, algorithm
def name(text, strict=True):
try:
name = web.parse.name(text)
except Exception:
name = os.path.basename(text).strip()
return name if strict else purge.name(name)
_ONEWORDS = (
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen')
_TENWORDS = (
'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty',
'ninety')
_RE_NUMBER = re.compile(r'[\s-]+')
def number(text):
try:
text = web.misc.translate(text).lower()
except Exception:
text = text.lower()
o_tuple = [(w, i) for i, w in enumerate(_ONEWORDS)]
t_tuple = [(w, i * 10) for i, w in enumerate(_TENWORDS, 2)]
numwords = dict(o_tuple + t_tuple)
tokens = _RE_NUMBER.split(text)
numbers = [_f for _f in (numwords.get(word) for word in tokens) if _f]
return sum(numbers) if numbers else None
_RE_PACKS = re.compile(r'[^a-z0-9]+(?:(cd|part).*?\d+)?', flags=re.I)
def packs(nameurls):
DEFAULT_URLNAME = 'Unknown'
packs = {}
for urlname, url in nameurls:
urlname = name(urlname, strict=False)
urlname = os.path.splitext(urlname)[0].strip()
urlname = _RE_PACKS.sub('_', urlname).strip('_')
if not urlname:
urlname = DEFAULT_URLNAME
packs.setdefault(urlname, []).append(url)
return packs
_RE_SIZE = re.compile(r'(?P<S>-?[\d.,]+)\s*(?P<U>[a-zA-Z]*)')
def bytesize(text, unit=None): # returns integer bytes
DEFAULT_INPUTUNIT = 'byte'
m = _RE_SIZE.match(to_str(text))
if m is None:
return None
if unit is None:
unit = m.group('U') or DEFAULT_INPUTUNIT
size = float(m.group('S').replace(',', '.'))
unit = unit[0].lower()
return int(convert.size(size, unit, 'byte'))
_TIMEWORDS = ('this', 'a', 'an', 'next')
_TIMEMAP = {
'day': 60 ** 2 * 12, 'hr': 60 ** 2, 'hour': 60 ** 2, 'min': 60, 'sec': 1}
_RE_TIME = re.compile(r'(\d+|[a-zA-Z-]+)\s*(day|hr|hour|min|sec)|(\d+)')
def seconds(text):
def to_int(obj):
try:
return int(obj)
except ValueError:
return None
try:
text = web.misc.translate(text).lower()
except Exception:
text = text.lower()
pattr = r'({0})\s+day|today|daily'.format('|'.join(_TIMEWORDS))
m = re.search(pattr, text)
if m is not None:
return seconds_to_midnight()
seconds = sum(
(w in _TIMEWORDS or to_int(i or w) or number(w) or 1) *
_TIMEMAP.get(u, 1) for w, u, i in _RE_TIME.findall(text))
return seconds
def minutes(text):
return seconds(text) / 60
def hours(text):
return seconds(text) / 60 ** 2
|
pyblub/pyload
|
pyload/utils/parse.py
|
Python
|
agpl-3.0
| 4,211
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.db import models
# from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
def __unicode__(self):
return self.username
twitter_followers = models.BigIntegerField(default=0)
instagram_followers = models.BigIntegerField(default=0)
def get_total_followers(self):
return self.twitter_followers + self.instagram_followers
|
henryfjordan/django_beginnings
|
django_beginnings/users/models.py
|
Python
|
bsd-3-clause
| 526
|
# -*- coding: utf-8 -*-
import xbmc
import guitables
from base import WindowReaderBase
from lib import util
class PVRWindowReaderBase(WindowReaderBase):
def controlIsOnView(self,controlID):
return not xbmc.getCondVisibility('ControlGroup(9000).HasFocus(0)')
def init(self):
self.mode = False
def updateMode(self,controlID):
if self.controlIsOnView(controlID):
self.mode = 'VIEW'
else:
self.mode = None
return self.mode
def getControlDescription(self,controlID):
old = self.mode
new = self.updateMode(controlID)
if new == None and old != None:
return 'View Options'
class PVRGuideWindowReader(PVRWindowReaderBase):
ID = 'pvrguide'
timelineInfo = ( util.T(32171), #PVR
'$INFO[ListItem.ChannelNumber]',
'$INFO[ListItem.ChannelName]',
'$INFO[ListItem.StartTime]',
19160,
'$INFO[ListItem.EndTime]',
'$INFO[ListItem.Plot]'
)
nowNextInfo = ( util.T(32171),
'$INFO[ListItem.ChannelNumber]',
'$INFO[ListItem.ChannelName]',
'$INFO[ListItem.StartTime]',
'$INFO[ListItem.Plot]'
)
def getControlText(self,controlID):
if not controlID: return (u'',u'')
if self.slideoutHasFocus(): return self.getSlideoutText(controlID)
text = xbmc.getInfoLabel('System.CurrentControl')
if not text: return (u'',u'')
compare = text + xbmc.getInfoLabel('ListItem.StartTime') + xbmc.getInfoLabel('ListItem.EndTime')
return (text.decode('utf-8'),compare)
def getItemExtraTexts(self,controlID):
text = None
if self.controlIsOnView(controlID):
if controlID == 10: #EPG: Timeline
text = guitables.convertTexts(self.winID,self.timelineInfo)
elif controlID == 11 or controlID == 12 or controlID == 13: #EPG: Now/Next/Channel
info = list(self.nowNextInfo)
if xbmc.getCondVisibility('ListItem.IsRecording'):
info.append(19043)
elif xbmc.getCondVisibility('ListItem.HasTimer'):
info.append(31510)
text = guitables.convertTexts(self.winID,info)
return text
class PVRChannelsWindowReader(PVRWindowReaderBase):
ID = 'pvrchannels'
channelInfo = ( '$INFO[ListItem.StartTime]',
19160,
'$INFO[ListItem.EndTime]',
'$INFO[ListItem.Plot]'
)
def getControlText(self,controlID):
if not controlID: return (u'',u'')
if self.slideoutHasFocus(): return self.getSlideoutText(controlID)
text = '{0}... {1}... {2}'.format(xbmc.getInfoLabel('ListItem.ChannelNumber'),xbmc.getInfoLabel('ListItem.Label'),xbmc.getInfoLabel('ListItem.Title'))
if not text: return (u'',u'')
compare = text + xbmc.getInfoLabel('ListItem.StartTime') + xbmc.getInfoLabel('ListItem.EndTime')
return (text.decode('utf-8'),compare)
def getItemExtraTexts(self,controlID):
text = None
if self.controlIsOnView(controlID):
if controlID == 50: #Channel (TV or Radio)
info = list(self.channelInfo)
if xbmc.getCondVisibility('ListItem.IsRecording'):
info.insert(0,19043)
text = guitables.convertTexts(self.winID,info)
return text
class PVRRecordingsWindowReader(PVRWindowReaderBase):
ID = 'pvrrecordings'
def getControlText(self,controlID):
if not controlID: return (u'',u'')
if self.slideoutHasFocus(): return self.getSlideoutText(controlID)
text = xbmc.getInfoLabel('System.CurrentControl')
if not text: return (u'',u'')
return (text.decode('utf-8'),text)
def getItemExtraTexts(self,controlID):
text = None
if self.controlIsOnView(controlID):
text = text = guitables.convertTexts(self.winID,('$INFO[ListItem.Plot]',))
return text
class PVRTimersWindowReader(PVRWindowReaderBase):
ID = 'pvrtimers'
timerInfo = ( '$INFO[ListItem.ChannelName]',
'$INFO[ListItem.Label]',
'$INFO[ListItem.Date]',
'$INFO[ListItem.Comment]'
)
def getControlText(self,controlID):
if not controlID: return (u'',u'')
if self.slideoutHasFocus(): return self.getSlideoutText(controlID)
text = xbmc.getInfoLabel('System.CurrentControl')
if not text: return (u'',u'')
compare = text + xbmc.getInfoLabel('ListItem.StartTime') + xbmc.getInfoLabel('ListItem.EndTime')
return (text.decode('utf-8'),compare)
def getItemExtraTexts(self,controlID):
text = None
if self.controlIsOnView(controlID):
text = guitables.convertTexts(self.winID,self.timerInfo)
return text
class PVRSearchWindowReader(PVRWindowReaderBase):
ID = 'pvrsearch'
searchInfo = ( '$INFO[ListItem.ChannelNumber]',
'$INFO[ListItem.ChannelName]',
'$INFO[ListItem.Date]'
)
def getControlText(self,controlID):
if not controlID: return (u'',u'')
if self.slideoutHasFocus(): return self.getSlideoutText(controlID)
text = xbmc.getInfoLabel('System.CurrentControl')
if not text: return (u'',u'')
compare = text + xbmc.getInfoLabel('ListItem.Date')
return (text.decode('utf-8'),compare)
def getItemExtraTexts(self,controlID):
text = None
if self.controlIsOnView(controlID):
info = list(self.searchInfo)
if xbmc.getCondVisibility('ListItem.IsRecording'):
info.append(19043)
elif xbmc.getCondVisibility('ListItem.HasTimer'):
info.append(31510)
text = guitables.convertTexts(self.winID,info)
return text
class PVRWindowReader(PVRWindowReaderBase):
ID = 'pvr'
timelineInfo = ( util.T(32171), #PVR
'$INFO[ListItem.ChannelNumber]',
'$INFO[ListItem.ChannelName]',
'$INFO[ListItem.StartTime]',
19160,
'$INFO[ListItem.EndTime]',
'$INFO[ListItem.Plot]'
)
channelInfo = ( '$INFO[ListItem.StartTime]',
19160,
'$INFO[ListItem.EndTime]',
'$INFO[ListItem.Plot]'
)
nowNextInfo = ( util.T(32171),
'$INFO[ListItem.ChannelNumber]',
'$INFO[ListItem.ChannelName]',
'$INFO[ListItem.StartTime]',
'$INFO[ListItem.Plot]'
)
def controlIsOnView(self,controlID):
return controlID > 9 and controlID < 18
def getControlText(self,controlID):
if not controlID: return (u'',u'')
text = None
if controlID == 11 or controlID == 12: #Channel (TV or Radio)
text = '{0}... {1}... {2}'.format(xbmc.getInfoLabel('ListItem.ChannelNumber'),xbmc.getInfoLabel('ListItem.Label'),xbmc.getInfoLabel('ListItem.Title'))
else:
text = xbmc.getInfoLabel('System.CurrentControl')
if not text: return (u'',u'')
compare = text + xbmc.getInfoLabel('ListItem.StartTime') + xbmc.getInfoLabel('ListItem.EndTime')
return (text.decode('utf-8'),compare)
def getItemExtraTexts(self,controlID):
text = None
if self.controlIsOnView(controlID):
if controlID == 10: #EPG: Timeline
text = guitables.convertTexts(self.winID,self.timelineInfo)
elif controlID == 11 or controlID == 12: #Channel (TV or Radio)
text = guitables.convertTexts(self.winID,self.channelInfo)
elif controlID == 16: #EPG: Now/Next
text = guitables.convertTexts(self.winID,self.nowNextInfo)
return text
|
ruuk/service.xbmc.tts
|
lib/windows/pvr.py
|
Python
|
gpl-2.0
| 8,135
|
from django.utils.functional import wraps
from rest_framework import exceptions
from ESSArch_Core.configuration.models import Feature
def feature_enabled_or_404(name):
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
try:
Feature.objects.get(name=name, enabled=True)
except Feature.DoesNotExist:
raise exceptions.NotFound
else:
return view_func(request, *args, **kwargs)
return wraps(view_func)(_wrapped_view)
return decorator
|
ESSolutions/ESSArch_Core
|
ESSArch_Core/configuration/decorators.py
|
Python
|
gpl-3.0
| 586
|
"""
Support for Homekit number ranges.
These are mostly used where a HomeKit accessory exposes additional non-standard
characteristics that don't map to a Home Assistant feature.
"""
from __future__ import annotations
from aiohomekit.model.characteristics import Characteristic, CharacteristicsTypes
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import KNOWN_DEVICES, CharacteristicEntity
NUMBER_ENTITIES: dict[str, NumberEntityDescription] = {
CharacteristicsTypes.Vendor.VOCOLINC_HUMIDIFIER_SPRAY_LEVEL: NumberEntityDescription(
key=CharacteristicsTypes.Vendor.VOCOLINC_HUMIDIFIER_SPRAY_LEVEL,
name="Spray Quantity",
icon="mdi:water",
entity_category=EntityCategory.CONFIG,
),
CharacteristicsTypes.Vendor.EVE_DEGREE_ELEVATION: NumberEntityDescription(
key=CharacteristicsTypes.Vendor.EVE_DEGREE_ELEVATION,
name="Elevation",
icon="mdi:elevation-rise",
entity_category=EntityCategory.CONFIG,
),
CharacteristicsTypes.Vendor.AQARA_GATEWAY_VOLUME: NumberEntityDescription(
key=CharacteristicsTypes.Vendor.AQARA_GATEWAY_VOLUME,
name="Volume",
icon="mdi:volume-high",
entity_category=EntityCategory.CONFIG,
),
CharacteristicsTypes.Vendor.AQARA_E1_GATEWAY_VOLUME: NumberEntityDescription(
key=CharacteristicsTypes.Vendor.AQARA_E1_GATEWAY_VOLUME,
name="Volume",
icon="mdi:volume-high",
entity_category=EntityCategory.CONFIG,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Homekit numbers."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_characteristic(char: Characteristic):
if not (description := NUMBER_ENTITIES.get(char.type)):
return False
info = {"aid": char.service.accessory.aid, "iid": char.service.iid}
async_add_entities([HomeKitNumber(conn, info, char, description)], True)
return True
conn.add_char_factory(async_add_characteristic)
class HomeKitNumber(CharacteristicEntity, NumberEntity):
"""Representation of a Number control on a homekit accessory."""
def __init__(
self,
conn,
info,
char,
description: NumberEntityDescription,
):
"""Initialise a HomeKit number control."""
self.entity_description = description
super().__init__(conn, info, char)
@property
def name(self) -> str:
"""Return the name of the device if any."""
if prefix := super().name:
return f"{prefix} {self.entity_description.name}"
return self.entity_description.name
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [self._char.type]
@property
def min_value(self) -> float:
"""Return the minimum value."""
return self._char.minValue
@property
def max_value(self) -> float:
"""Return the maximum value."""
return self._char.maxValue
@property
def step(self) -> float:
"""Return the increment/decrement step."""
return self._char.minStep
@property
def value(self) -> float:
"""Return the current characteristic value."""
return self._char.value
async def async_set_value(self, value: float):
"""Set the characteristic to this value."""
await self.async_put_characteristics(
{
self._char.type: value,
}
)
|
mezz64/home-assistant
|
homeassistant/components/homekit_controller/number.py
|
Python
|
apache-2.0
| 3,940
|
from .lims_calibratorsAndMixes_postgresql_models import *
from .lims_calibratorsAndMixes_query import lims_calibratorsAndMixes_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class lims_calibratorsAndMixes_io(lims_calibratorsAndMixes_query,sbaas_template_io):
def import_calibratorConcentrations_add(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_calibratorConcentrations(data.data);
data.clear_data();
def export_calibratorConcentrations_csv(self,filename,met_ids_I=[]):
'''export calibrator concentrations'''
data_O = [];
if met_ids_I:
met_ids = met_ids_I;
else:
met_ids = [];
met_ids = self.get_metIDs_calibratorConcentrations();
for met_id in met_ids:
rows = [];
rows = self.get_rows_metID_calibratorConcentrations(met_id);
data_O.extend(rows);
export = base_exportData(data_O);
export.write_dict2csv(filename);
|
dmccloskey/SBaaS_LIMS
|
SBaaS_LIMS/lims_calibratorsAndMixes_io.py
|
Python
|
mit
| 1,217
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Asumi Kamikaze Inc.
# Licensed under the MIT License.
# Author: Alejandro M. Bernardis
# Email: alejandro (dot) bernardis (at) asumikamikaze (dot) com
# Created: 08/Jun/2015 12:42
|
alejandrobernardis/tornado-heroku
|
settings_example.py
|
Python
|
mit
| 251
|
"""
Created on 27 May 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
South Coast Science PID Digital Single Interface (DSI) Type 1
Compatible with:
https://github.com/south-coast-science/scs_dsi_t1_f1
"""
import time
from scs_core.data.datum import Decode
from scs_dfe.gas.isi.dsi import DSI
from scs_host.bus.i2c import I2C
from scs_host.lock.lock import Lock
# --------------------------------------------------------------------------------------------------------------------
class PIDDSIt1(DSI):
"""
South Coast Science PID DSI Type 1 microcontroller
"""
DEFAULT_ADDR = 0x3c
CONVERSION_TIME = 0.05 # seconds
# ----------------------------------------------------------------------------------------------------------------
__RESPONSE_ACK = 1
__RESPONSE_NACK = 2
__SEND_WAIT_TIME = 0.010 # seconds
__LOCK_TIMEOUT = 2.0
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, addr):
"""
Constructor
"""
super().__init__(addr)
# ----------------------------------------------------------------------------------------------------------------
def power_sensor(self, on):
cmd = '1' if on else '0'
response = self.__cmd(ord(cmd), 1)
if response != self.__RESPONSE_ACK:
raise RuntimeError("response: %s" % response)
def start_conversion(self):
response = self.__cmd(ord('s'), 1)
if response != self.__RESPONSE_ACK:
raise RuntimeError("response: %s" % response)
def read_conversion_count(self):
response = self.__cmd(ord('c'), 2)
count = Decode.unsigned_int(response[0:2], '<')
return count
def read_conversion_voltage(self):
response = self.__cmd(ord('v'), 4)
v = Decode.float(response[0:4], '<')
return round(v, 5)
def version_ident(self):
response = self.__cmd(ord('i'), 40)
return ''.join([chr(byte) for byte in response]).strip()
def version_tag(self):
response = self.__cmd(ord('t'), 11)
return ''.join([chr(byte) for byte in response]).strip()
# ----------------------------------------------------------------------------------------------------------------
def __cmd(self, cmd, response_size):
try:
self.obtain_lock()
I2C.Sensors.start_tx(self.addr)
response = I2C.Sensors.read_cmd(cmd, response_size, self.__SEND_WAIT_TIME)
time.sleep(self.__SEND_WAIT_TIME)
return response
finally:
I2C.Sensors.end_tx()
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
def obtain_lock(self):
Lock.acquire(self.__lock_name, self.__LOCK_TIMEOUT)
def release_lock(self):
Lock.release(self.__lock_name)
@property
def __lock_name(self):
return "%s-0x%02x" % (self.__class__.__name__, self.addr)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "PIDDSIt1:{addr:0x%0.2x}" % self.addr
|
south-coast-science/scs_dfe_eng
|
src/scs_dfe/gas/isi/pid_dsi_t1.py
|
Python
|
mit
| 3,404
|
# gimbal_macro.py
# the macro to generate a gimbal, a mechanism with the roll-pitch angles as degrees of freedom
# created by charlyoleg on 2013/12/11
#
# (C) Copyright 2013 charlyoleg
#
# This file is part of the Cnc25D Python package.
#
# Cnc25D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cnc25D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnc25D. If not, see <http://www.gnu.org/licenses/>.
################################################################
# this file intends being included in the file bin/cnc25d_example_generator.py
# for this purpose, there is some syntaxe restrictions
# don't use triple single-quotes (') and return character ('\'.'n') in this file
# but you can still use triple double-quote (")
################################################################
"""
this piece of code is an example of how to use the parametric design gimbal
You can also use this file as a FreeCAD macro from the GUI
You can also copy-paste this code in your own design files
If you don't know which value to set to a constraint-parameter, just comment it. Default value is used, if you don't set a constraint explicitly.
"""
################################################################
# Installation pre-request
################################################################
# This script needs freecad and Cnc25D installed on your system
# visit those sites for more information:
# http://www.freecadweb.org/
# https://pypi.python.org/pypi/Cnc25D
#
# To install FreeCAD on Ubuntu, run the following command:
# > sudo apt-get install freecad
# or to get the newest version:
# > sudo add-apt-repository ppa:freecad-maintainers/freecad-stable
# > sudo apt-get update
# > sudo apt-get install freecad
# and optionally:
# > sudo apt-get install freecad-doc freecad-dev
# To install the python package cnc25d, run the following command:
# > sudo pip install Cnc25D
# or
# > sudo pip install Cnc25D -U
################################################################
# header for Python / FreeCAD compatibility
################################################################
try: # when working with an installed Cnc25D package
from cnc25d import cnc25d_api
except: # when working on the source files
import importing_cnc25d # give access to the cnc25d package
from cnc25d import cnc25d_api
cnc25d_api.importing_freecad()
#print("FreeCAD.Version:", FreeCAD.Version())
################################################################
# import
################################################################
#
from cnc25d import cnc25d_design
#
import Part
#
import math
################################################################
# parameters value
################################################################
#
# choose the values of the parameters by editing this file
# feature request : create a GUI with PyQt4 to edit those parameter values
gimbal_constraint = {} # This python-dictionary contains all the constraint-parameters to build the gimbal
####### bell_bagel
###### bell
### bell_face
## bulk
gimbal_constraint['axle_internal_diameter'] = 20.0
gimbal_constraint['axle_external_diameter'] = 0.0
gimbal_constraint['leg_length'] = 60.0
gimbal_constraint['bell_face_height'] = 80.0
gimbal_constraint['bell_face_width'] = 80.0
### bell_base_disc
gimbal_constraint['base_diameter'] = 160.0
## wall_thickness
gimbal_constraint['face_thickness'] = 6.0
gimbal_constraint['side_thickness'] = 4.0
gimbal_constraint['base_thickness'] = 8.0
## axle_hole
gimbal_constraint['axle_hole_nb'] = 6
gimbal_constraint['axle_hole_diameter'] = 4.0
gimbal_constraint['axle_hole_position_diameter'] = 0.0
gimbal_constraint['axle_hole_angle'] = 0.0
## leg
gimbal_constraint['leg_spare_width'] = 10.0
gimbal_constraint['leg_smoothing_radius'] = 30.0
## motor_hole
gimbal_constraint['motor_hole_diameter'] = 4.0
gimbal_constraint['motor_hole_x_distance'] = 40.0
gimbal_constraint['motor_hole_z_distance'] = 50.0
gimbal_constraint['motor_hole_z_position'] = 40.0
## internal_buttress
gimbal_constraint['int_buttress_x_length'] = 10.0
gimbal_constraint['int_buttress_z_width'] = 5.0
gimbal_constraint['int_buttress_z_distance'] = 50.0
gimbal_constraint['int_buttress_x_position'] = 10.0
gimbal_constraint['int_buttress_z_position'] = 10.0
gimbal_constraint['int_buttress_int_corner_length'] = 5.0
gimbal_constraint['int_buttress_ext_corner_length'] = 5.0
gimbal_constraint['int_buttress_bump_length'] = 10.0
gimbal_constraint['int_buttress_arc_height'] = -2.0
gimbal_constraint['int_buttress_smoothing_radius'] = 10.0
## external_buttress
gimbal_constraint['ext_buttress_z_length'] = 10.0
gimbal_constraint['ext_buttress_x_width'] = 5.0
gimbal_constraint['ext_buttress_x_distance'] = 20.0
gimbal_constraint['ext_buttress_z_position'] = 40.0
gimbal_constraint['ext_buttress_y_length'] = 10.0
gimbal_constraint['ext_buttress_y_position'] = 20.0
gimbal_constraint['ext_buttress_face_int_corner_length'] = 5.0
gimbal_constraint['ext_buttress_face_ext_corner_length'] = 5.0
gimbal_constraint['ext_buttress_face_bump_length'] = 10.0
gimbal_constraint['ext_buttress_base_int_corner_length'] = 5.0
gimbal_constraint['ext_buttress_base_ext_corner_length'] = 5.0
gimbal_constraint['ext_buttress_base_bump_length'] = 10.0
gimbal_constraint['ext_buttress_arc_height'] = -5.0
gimbal_constraint['ext_buttress_smoothing_radius'] = 10.0
### bell_side
## hollow
gimbal_constraint['hollow_z_height'] = 10.0
gimbal_constraint['hollow_y_width'] = 20.0
gimbal_constraint['hollow_spare_width'] = 10.0
## base_hole
gimbal_constraint['base_hole_nb'] = 8
gimbal_constraint['base_hole_diameter'] = 4.0
gimbal_constraint['base_hole_position_diameter'] = 0.0
gimbal_constraint['base_hole_angle'] = 0.0
### xyz-axles
## y_hole
gimbal_constraint['y_hole_diameter'] = 4.0
gimbal_constraint['y_hole_z_top_position'] = 10.0 # int_buttress_z_width + y_hole_diameter/2 + delta or -1*(y_hole_diameter/2 + delta)
gimbal_constraint['y_hole_z_bottom_position'] = 10.0 # int_buttress_z_width + y_hole_diameter/2 + delta or -1*(y_hole_diameter/2 + delta)
gimbal_constraint['y_hole_x_position'] = 6.0
## x_hole
gimbal_constraint['x_hole_diameter'] = 4.0
gimbal_constraint['x_hole_z_top_position'] = -6.0 # int_buttress_z_width + x_hole_diameter/2 + delta or -1*(x_hole_diameter/2 + delta)
gimbal_constraint['x_hole_z_bottom_position'] = -6.0 # int_buttress_z_width + x_hole_diameter/2 + delta or -1*(x_hole_diameter/2 + delta)
gimbal_constraint['x_hole_y_position'] = 6.0
## z_hole
gimbal_constraint['z_hole_diameter'] = 4.0
gimbal_constraint['z_hole_external_diameter'] = 0.0
gimbal_constraint['z_hole_position_length'] = 15.0
### bell manufacturing
gimbal_constraint['bell_cnc_router_bit_radius'] = 1.0
gimbal_constraint['bell_extra_cut_thickness'] = 0.0 #0.0, 1.0
###### bagel
## bagel diameter
gimbal_constraint['bagel_axle_diameter'] = 10.0 # a bit bigger than gimbal_constraint['axle_diameter']
gimbal_constraint['bagel_axle_internal_diameter'] = 0.0
gimbal_constraint['bagel_axle_external_diameter'] = 0.0
## bagel thickness
gimbal_constraint['external_bagel_thickness'] = 2.0
gimbal_constraint['internal_bagel_thickness'] = 2.0
### bagel manufacturing
gimbal_constraint['bagel_extra_cut_thickness'] = 0.0 #0.0, 1.0
####### cross_cube
##### cross_cube bare
### face A1, A2, B1 and B2
# height
gimbal_constraint['axle_diameter'] = 10.0
gimbal_constraint['inter_axle_length'] = 15.0
gimbal_constraint['height_margin'] = 10.0
gimbal_constraint['top_thickness'] = 5.0
# width
gimbal_constraint['cube_width'] = 60.0
gimbal_constraint['face_A1_thickness'] = 9.0
gimbal_constraint['face_A2_thickness'] = 7.0
gimbal_constraint['face_B1_thickness'] = 8.0
gimbal_constraint['face_B2_thickness'] = 6.0
### threaded rod
# face
gimbal_constraint['face_rod_hole_diameter'] = 4.0
gimbal_constraint['face_rod_hole_h_position'] = 5.0
gimbal_constraint['face_rod_hole_v_distance'] = 5.0 # must be bigger than face_rod_hole_diameter
gimbal_constraint['face_rod_hole_v_position'] = 5.0 # must be bigger than face_rod_hole_radius
# top
gimbal_constraint['top_rod_hole_diameter'] = 4.0
gimbal_constraint['top_rod_hole_h_position'] = 10.0
### hollow
# face hollow
gimbal_constraint['face_hollow_leg_nb'] = 1 # possible values: 1 (filled), 4, 8
gimbal_constraint['face_hollow_border_width'] = 0.0
gimbal_constraint['face_hollow_axle_width'] = 0.0
gimbal_constraint['face_hollow_leg_width'] = 0.0
gimbal_constraint['face_hollow_smoothing_radius'] = 0.0
# top hollow
gimbal_constraint['top_hollow_leg_nb'] = 0 # possible values: 0 (empty), 1 (filled), 4, 8
gimbal_constraint['top_hollow_border_width'] = 0.0
gimbal_constraint['top_hollow_leg_width'] = 0.0
gimbal_constraint['top_hollow_smoothing_radius'] = 0.0
### axle
gimbal_constraint['axle_length'] = 0.0
gimbal_constraint['spacer_diameter'] = 0.0
gimbal_constraint['spacer_length'] = 0.0
### manufacturing
gimbal_constraint['cross_cube_cnc_router_bit_radius'] = 1.0
gimbal_constraint['cross_cube_extra_cut_thickness'] = 0.0 #0.0, 1.0
### select crest on face: (True, False, False, True) is the combination for the gimbal angle convention
gimbal_constraint['face_A1_crest'] = True
gimbal_constraint['face_A2_crest'] = False
gimbal_constraint['face_B1_crest'] = False
gimbal_constraint['face_B2_crest'] = True
####### crest option
##### parameter inheritance from gear_profile
### first gear
# general
gimbal_constraint['gear_addendum_dedendum_parity'] = 50.0
# tooth height
gimbal_constraint['gear_tooth_half_height'] = 0.0
gimbal_constraint['gear_addendum_height_pourcentage'] = 100.0
gimbal_constraint['gear_dedendum_height_pourcentage'] = 100.0
gimbal_constraint['gear_hollow_height_pourcentage'] = 25.0
gimbal_constraint['gear_router_bit_radius'] = 0.1
# positive involute
gimbal_constraint['gear_base_diameter'] = 0.0
gimbal_constraint['gear_force_angle'] = 0.0
gimbal_constraint['gear_tooth_resolution'] = 2
gimbal_constraint['gear_skin_thickness'] = 0.0
# negative involute (if zero, negative involute'] = positive involute)
gimbal_constraint['gear_base_diameter_n'] = 0.0
gimbal_constraint['gear_force_angle_n'] = 0.0
gimbal_constraint['gear_tooth_resolution_n'] = 0
gimbal_constraint['gear_skin_thickness_n'] = 0.0
### second gear
# general
gimbal_constraint['second_gear_type'] = 'e'
gimbal_constraint['second_gear_tooth_nb'] = 0
gimbal_constraint['second_gear_primitive_diameter'] = 0.0
gimbal_constraint['second_gear_addendum_dedendum_parity'] = 0.0
# tooth height
gimbal_constraint['second_gear_tooth_half_height'] = 0.0
gimbal_constraint['second_gear_addendum_height_pourcentage'] = 100.0
gimbal_constraint['second_gear_dedendum_height_pourcentage'] = 100.0
gimbal_constraint['second_gear_hollow_height_pourcentage'] = 25.0
gimbal_constraint['second_gear_router_bit_radius'] = 0.0
# positive involute
gimbal_constraint['second_gear_base_diameter'] = 0.0
gimbal_constraint['second_gear_tooth_resolution'] = 0
gimbal_constraint['second_gear_skin_thickness'] = 0.0
# negative involute (if zero, negative involute'] = positive involute)
gimbal_constraint['second_gear_base_diameter_n'] = 0.0
gimbal_constraint['second_gear_tooth_resolution_n'] = 0
gimbal_constraint['second_gear_skin_thickness_n'] = 0.0
### gearbar specific
gimbal_constraint['gearbar_slope'] = 0.0
gimbal_constraint['gearbar_slope_n'] = 0.0
### position
# second gear position
gimbal_constraint['second_gear_position_angle'] = 0.0
gimbal_constraint['second_gear_additional_axis_length'] = 0.0
##### crest specific
### outline
gimbal_constraint['gear_module'] = 3.0
gimbal_constraint['virtual_tooth_nb'] = 40
gimbal_constraint['portion_tooth_nb'] = 20
gimbal_constraint['free_mounting_width'] = 15.0 # minimal recommended value: max(face_thickness) + cross_cube_cnc_router_bit_radius
### crest_hollow
gimbal_constraint['crest_hollow_leg_nb'] = 4 # possible values: 1(filled), 2(end-legs only), 3, 4 ...
gimbal_constraint['end_leg_width'] = 10.0
gimbal_constraint['middle_leg_width'] = 0.0
gimbal_constraint['crest_hollow_external_diameter'] = 0.0
gimbal_constraint['crest_hollow_internal_diameter'] = 0.0
gimbal_constraint['floor_width'] = 0.0
gimbal_constraint['crest_hollow_smoothing_radius'] = 0.0
### gear_holes
gimbal_constraint['fastening_hole_diameter'] = 5.0
gimbal_constraint['fastening_hole_position'] = 0.0
gimbal_constraint['centring_hole_diameter'] = 1.0
gimbal_constraint['centring_hole_distance'] = 8.0
gimbal_constraint['centring_hole_position'] = 0.0
##### gimbal angles
### roll-pitch angles
gimbal_constraint['bottom_angle'] = 0.0
gimbal_constraint['top_angle'] = 0.0
### pan_tilt angles # can be set only if roll-pitch angles are left to 0.0
gimbal_constraint['pan_angle'] = -30*math.pi/180 #0.0
gimbal_constraint['tilt_angle'] = 45*math.pi/180 #0.0
################################################################
# action
################################################################
my_gimbal = cnc25d_design.gimbal(gimbal_constraint)
my_gimbal.outline_display()
my_gimbal.write_info_txt("test_output/gimbal_macro")
my_gimbal.write_figure_svg("test_output/gimbal_macro")
my_gimbal.write_figure_dxf("test_output/gimbal_macro")
my_gimbal.write_figure_brep("test_output/gimbal_macro")
my_gimbal.write_assembly_brep("test_output/gimbal_macro")
my_gimbal.write_freecad_brep("test_output/gimbal_macro")
my_gimbal.run_simulation("")
my_gimbal.view_design_configuration()
#my_gimbal.run_self_test("")
#my_gimbal.cli("--output_file_basename test_output/gm.dxf") # Warning: all constraint values are reset to their default values
if(cnc25d_api.interpretor_is_freecad()):
Part.show(my_gimbal.get_fc_obj_function('gimbal')) # Your attention please: here we use get_fc_obj_function() instead of get_fc_obj_3dconf()
|
charlyoleg/Cnc25D
|
cnc25d/tests/gimbal_macro.py
|
Python
|
gpl-3.0
| 15,143
|
"""
WSGI config for demo project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
import site
os.environ['DJANGO_SETTINGS_MODULE'] = 'demo.settings'
SITE_ROOT = os.path.dirname(os.path.dirname( __file__ ))
site.addsitedir( SITE_ROOT + '/venv/local/lib/python2.7/site-packages' )
sys.path.append( SITE_ROOT )
exc_dir = 'scripts' if os.name == 'nt' else 'bin'
venv = '%s/venv/%s/activate_this.py' % (SITE_ROOT, exc_dir )
activate_env = os.path.expanduser( venv )
execfile( activate_env, dict(__file__ = activate_env ))
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
Alem/django-jfu
|
demo/demo/wsgi.py
|
Python
|
bsd-3-clause
| 1,208
|
import logging
logging.basicConfig(level=logging.INFO)
# Create objects to manipulate - they hold your data
#
from ImageD11 import peakmerge, indexing, transformer
mypeakmerger = peakmerge.peakmerger()
mytransformer = transformer.transformer()
myindexer = indexing.indexer()
#
# Your work starts here:
#
mypeakmerger.readpeaks( 'eu.pks' )
mypeakmerger.harvestpeaks( numlim=(0.0, 1000.0) , omlim=(-100.0, 100.0) )
mypeakmerger.mergepeaks( )
mypeakmerger.filter( )
mypeakmerger.savepeaks( 'eu.flt' )
mytransformer.loadfiltered( 'eu.flt' )
mytransformer.loadfileparameters( 'eu2.pars' )
mytransformer.compute_tth_eta( )
mytransformer.addcellpeaks( )
mytransformer.fit( 0.0 , 14.0 )
mytransformer.saveparameters( 'eu3fitted.pars' )
mytransformer.computegv( )
mytransformer.savegv( 'eu3.gve' )
myindexer.readgvfile( 'eu3.gve' )
myindexer.updateparameters( )
p = {'ds_tol': '0.005', 'minpks': '100', 'uniqueness': '0.5', 'hkl_tol': '0.05', 'eta_range': '0.0', 'ring_1': '1', 'wavelength': '0.153684', 'ring_2': '1', 'cosine_tol': '0.002'}
myindexer.parameterobj.set_parameters( p )
myindexer.loadpars( )
myindexer.assigntorings( )
myindexer.find( )
myindexer.scorethem( )
myindexer.histogram_drlv_fit( )
myindexer.saveubis( 'eu3.ubi' )
myindexer.saveindexing( 'eu3.idx' )
|
jonwright/ImageD11
|
test/demo/test.py
|
Python
|
gpl-2.0
| 1,286
|
#!/usr/bin/python
import socket
import platform
import time
import sys
import smtplib
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
class Mail:
def __init__(self,user_defined=None):
self.user_defined = user_defined
def sendMail(self, SUBJECT, BODY, TO, FROM, ATTACHMENT):
"""With this function we send out our html email"""
# Create message container - the correct MIME type is multipart/alternative here!
MESSAGE = MIMEMultipart('alternative')
MESSAGE['subject'] = SUBJECT
MESSAGE['To'] = ", ".join(TO)
MESSAGE['From'] = FROM
MESSAGE.preamble = ""
ATTACHMENT_NAME = os.path.basename(ATTACHMENT)
# Record the MIME type text/html...
HTML_BODY = MIMEText(BODY, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
MESSAGE.attach(HTML_BODY)
part = MIMEBase('application', "octet-stream")
part.set_payload(open(ATTACHMENT,"rb").read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % ATTACHMENT_NAME)
MESSAGE.attach(part)
# The actual sending of the e-mail
server = smtplib.SMTP('outlook.td.teradata.com')
# Print debugging output when testing
if __name__ == "__main__":
server.set_debuglevel(1)
# Credentials (if needed) for sending the mail
#password = "mypassword"
#server.starttls() "TLS (transport layer security) encryption option is not working on soem machines"
#server.login(FROM,password)
server.sendmail(FROM, TO, MESSAGE.as_string())
server.quit()
if __name__ == "__main__":
"""Executes if the script is run as main script (for testing purposes)"""
hostname = socket.gethostname()
os = platform.platform()
localtime = time.asctime( time.localtime(time.time()) )
email_content = """
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>html title</title>
<style type="text/css" media="screen">
table{
empty-cells:hide;
}
td.cell{
background-color: white;
}
</style>
</head>
<body>
<STYLE>
.boldtable, .boldtable TD, .boldtable TH
{
font-size:10pt;
border: 1px solid #CCCCCC;
}
</STYLE>
<table>
<table width= "430" cellpadding="1" border="0" style="border-top:1px solid; border-left:1px solid; border-right:1px solid; border-bottom:1px solid">
<TR bgcolor="#336699"><TD colspan="2"><B><FONT color = "white" size="3">Overall Summary</B></TD></tr>
<tr valign="top">
<td>
<table style="border: black 1px solid;width:100%">
<tr><td bgcolor="#f4e9d3"><B>Overall Status</B></td>
<td bgcolor="#f4e9d3"><B><FONT COLOR="red">REJECTED</FONT></B></td></tr>
<tr><td bgcolor="#f4e9d3"><B>Hostname</B></td><td bgcolor="#f4e9d3">""" + hostname + """</td></tr>
<tr><td bgcolor="#f4e9d3"><B>OS</B></td><td bgcolor="#f4e9d3">""" + os + """</td></tr>
<tr><td bgcolor="#f4e9d3"><B>Date and Time</B></td><td bgcolor="#f4e9d3">""" + localtime + """</td></tr>
</table>
</td>
</tr>
</table>
<table width= "430" cellpadding="1" border="0" style="border-top:1px solid; border-left:1px solid; border-right:1px solid; border-bottom:1px solid">
<tr bgcolor="#336699"><td><B><FONT color = "white" size="3">Test Case Status Summary</FONT></B></td></tr>
<tr valign="top">
<td>
<table style="border: black 1px solid;width:100%">
<TR bgcolor="#f4e9d3"><TD align="center"><B>Passed</B></TD><TD align="center"><B>Failed</B></TD>
<TD align="center"><B>NonAttempted</B></TD>
<TD align="center"><B>Total Test Cases</B></TD></TR>
|
tuxfux-hlp/Python-examples
|
logging/sending_mail.py
|
Python
|
gpl-2.0
| 4,045
|
ann = MLP(2, 10, 1)
%timeit -n 1 -r 1 ann.train(zip(X,y), iterations=2)
plot_decision_boundary(ann)
plt.title("Our next model with 10 hidden units")
|
leriomaggio/deep-learning-keras-tensorflow
|
solutions/sol_111.py
|
Python
|
mit
| 149
|
import numpy as np
from random import shuffle
from past.builtins import xrange
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
diff_n = 0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
diff_n = 0
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
dW[:,j] += X[i,:]
diff_n += 1
dW[:,y[i]] += -diff_n*X[i,:]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
dW /= num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
dW += 2*reg*W
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero
num_classes = W.shape[1]
num_train = X.shape[0]
#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
scores = X.dot(W)
correct_class_score = scores[np.arange(num_train), y]
margins = np.maximum(0, scores - correct_class_score[:, np.newaxis] + 1)
margins[np.arange(num_train), y] = 0
loss = np.sum(margins)/num_train + 0.5*reg*np.sum(W * W)
# scores = X.dot(W)
# correct_class_score = scores[np.arange(num_train), y]
# margins = np.maximum(0, scores - correct_class_score[:, np.newaxis] + 1)
# margins[np.arange(num_train), y] = 0
# loss = np.sum(margins)
# loss /= num_train # get mean
# loss += reg * np.sum(W * W) # regularization
#############################################################################
# END OF YOUR CODE #
#############################################################################
#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################
X_mask = np.zeros(margins.shape)
# column maps to class, row maps to sample; a value v in X_mask[i, j]
# adds a row sample i to column class j with multiple of v
X_mask[margins > 0] = 1
# for each sample, find the total number of classes where margin > 0
incorrect_counts = np.sum(X_mask, axis=1)
X_mask[np.arange(num_train), y] = -incorrect_counts
dW = X.T.dot(X_mask)
dW /= num_train # average out weights
dW += reg*W # regularize the weights
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
|
UltronAI/Deep-Learning
|
CS231n/assignment1/cs231n/classifiers/linear_svm.py
|
Python
|
mit
| 5,478
|
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.core import exceptions
from iprestrict import models
from iprestrict import restrictor
from iprestrict.middleware import IPRestrictMiddleware
LOCAL_IP = '192.168.1.1'
PROXY = '1.1.1.1'
class MiddlewareRestrictsTest(TestCase):
'''
When the middleware is enabled it should restrict all IPs(but localhost)/URLs by default.
'''
def setUp(self):
models.ReloadRulesRequest.request_reload()
def assert_url_is_restricted(self, url):
response = self.client.get(url, REMOTE_ADDR = LOCAL_IP)
self.assertEqual(response.status_code, 403)
def assert_ip_is_restricted(self, ip):
response = self.client.get('', REMOTE_ADDR = ip)
self.assertEqual(response.status_code, 403)
def test_middleware_restricts_every_url(self):
self.assert_url_is_restricted('')
self.assert_url_is_restricted('/every')
self.assert_url_is_restricted('/url')
self.assert_url_is_restricted('/is_restricted')
self.assert_url_is_restricted('/every/url/is_restricted')
def test_middleware_restricts_ips(self):
self.assert_ip_is_restricted('192.168.1.1')
self.assert_ip_is_restricted('10.10.10.1')
self.assert_ip_is_restricted('169.254.0.1')
def test_middleware_allows_localhost(self):
response = self.client.get('/some/url', REMOTE_ADDR = '127.0.0.1')
self.assertEqual(response.status_code, 404)
def create_ip_allow_rule(ip=LOCAL_IP):
localip = models.IPGroup.objects.create(name='localip')
models.IPRange.objects.create(ip_group=localip, first_ip=LOCAL_IP)
models.Rule.objects.create(url_pattern='ALL', ip_group = localip, action='A')
class MiddlewareAllowsTest(TestCase):
def setUp(self):
create_ip_allow_rule()
models.ReloadRulesRequest.request_reload()
def test_middleware_allows_localhost(self):
response = self.client.get('')
self.assertEqual(response.status_code, 404)
def test_middleware_allows_ip_just_added(self):
response = self.client.get('', REMOTE_ADDR = LOCAL_IP)
self.assertEqual(response.status_code, 404)
def test_middleware_restricts_other_ip(self):
response = self.client.get('', REMOTE_ADDR = '10.1.1.1')
self.assertEqual(response.status_code, 403)
@override_settings(IPRESTRICT_TRUSTED_PROXIES=(PROXY,), ALLOW_PROXIES=False)
def test_middleware_allows_if_proxy_is_trusted(self):
response = self.client.get('', REMOTE_ADDR = PROXY, HTTP_X_FORWARDED_FOR= LOCAL_IP)
self.assertEqual(response.status_code, 404)
def test_middleware_restricts_if_proxy_is_not_trusted(self):
response = self.client.get('', REMOTE_ADDR = PROXY, HTTP_X_FORWARDED_FOR = LOCAL_IP)
self.assertEqual(response.status_code, 403)
class ReloadRulesTest(TestCase):
def setUp(self):
create_ip_allow_rule()
def test_reload_with_custom_command(self):
from django.core.management import call_command
call_command('reloadrules', verbosity=0)
response = self.client.get('', REMOTE_ADDR = LOCAL_IP)
self.assertEqual(response.status_code, 404)
class MiddlewareExtractClientIpTest(TestCase):
def setUp(self):
self.middleware = IPRestrictMiddleware()
self.factory = RequestFactory()
def test_remote_addr_only(self):
self.middleware = IPRestrictMiddleware()
request = self.factory.get('', REMOTE_ADDR=LOCAL_IP)
client_ip = self.middleware.extract_client_ip(request)
self.assertEquals(client_ip, LOCAL_IP)
def test_remote_addr_empty(self):
self.middleware = IPRestrictMiddleware()
request = self.factory.get('', REMOTE_ADDR='')
client_ip = self.middleware.extract_client_ip(request)
self.assertEquals(client_ip, '')
@override_settings(IPRESTRICT_TRUSTED_PROXIES=(PROXY,))
def test_single_proxy(self):
self.middleware = IPRestrictMiddleware()
request = self.factory.get('', REMOTE_ADDR=PROXY, HTTP_X_FORWARDED_FOR = LOCAL_IP)
client_ip = self.middleware.extract_client_ip(request)
self.assertEquals(client_ip, LOCAL_IP)
@override_settings(IPRESTRICT_TRUSTED_PROXIES=(PROXY,'2.2.2.2','4.4.4.4'))
def test_multiple_proxies_one_not_trusted(self):
self.middleware = IPRestrictMiddleware()
proxies = ['2.2.2.2', '3.3.3.3', '4.4.4.4']
request = self.factory.get('', REMOTE_ADDR=PROXY,
HTTP_X_FORWARDED_FOR = ', '.join([LOCAL_IP] + proxies))
try:
client_ip = self.middleware.extract_client_ip(request)
except exceptions.PermissionDenied:
pass
else:
self.fail('Should raise PermissionDenied exception')
@override_settings(IPRESTRICT_TRUSTED_PROXIES=(PROXY,'2.2.2.2','3.3.3.3', '4.4.4.4'))
def test_multiple_proxies_all_trusted(self):
self.middleware = IPRestrictMiddleware()
proxies = ['2.2.2.2', '3.3.3.3', '4.4.4.4']
request = self.factory.get('', REMOTE_ADDR=PROXY,
HTTP_X_FORWARDED_FOR = ', '.join([LOCAL_IP] + proxies))
client_ip = self.middleware.extract_client_ip(request)
self.assertEquals(client_ip, LOCAL_IP)
|
whyflyru/django-iprestrict
|
tests/test_middleware.py
|
Python
|
bsd-3-clause
| 5,362
|
"""
.. module:: bottle_utils.http
:synopsis: HTTP decorators
.. moduleauthor:: Outernet Inc <hello@outernet.is>
"""
from __future__ import unicode_literals
import os
import time
import functools
from bottle import (HTTPResponse, HTTPError, parse_date, parse_range_header,
request, response)
MIME_TYPES = {
# Text/Code
'txt': 'text/plain',
'html': 'text/html',
'css': 'text/css',
'js': 'text/javascript',
# Image
'gif': 'image/gif',
'jpg': 'image/jpeg',
'tiff': 'image/tiff',
'png': 'image/png',
'svg': 'image/svg+xml',
# Data/Document
'pdf': 'application/pdf',
'xml': 'text/xml',
'json': 'application/json',
# Video
'mp4': 'video/mp4',
'm4v': 'video/mp4',
'ogv': 'video/ogg',
'flv': 'video/x-flv',
'webm': 'video/webm',
'3gp': 'video/3gpp',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
# Audio
'mp3': 'audio/mpeg',
'ogg': 'audio/ogg',
'flac': 'audio/flac',
'm4a': 'audio/mp4',
'mpg': 'video/mpeg',
# Other
'zip': 'application/zip',
'gz': 'application/gzip',
}
EXTENSIONS = list(MIME_TYPES.keys())
DEFAULT_TYPE = MIME_TYPES['txt']
TIMESTAMP_FMT = '%a, %d %b %Y %H:%M:%S GMT'
def no_cache(func):
"""
Disable caching on a handler. The decorated handler will have
``Cache-Control`` header set to ``private, no-cache``.
This is useful for responses that contain data that cannot be reused.
Simply deocrate a handler with it::
@app.get('/foo')
@no_cache
def not_cached():
return 'sensitive data'
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
resp = func(*args, **kwargs)
response.headers[b'Cache-Control'] = b'private, no-cache'
return resp
return wrapper
def get_mimetype(filename):
"""
Guess mime-type based on file's extension.
"""
name, ext = os.path.splitext(filename)
return MIME_TYPES.get(ext[1:], DEFAULT_TYPE)
def format_ts(seconds=None):
"""
Given a timestamp in seconds since UNIX epoch, return a string
representation suitable for use in HTTP headers according to RFC.
If ``seconds`` is omitted, the time is asumed to be current time.
"""
return time.strftime(TIMESTAMP_FMT, time.gmtime(seconds))
def iter_read_range(fd, offset, length, chunksize=1024*1024):
"""
Return an iterator that allows reading files in chunks. The ``fd`` should
be a file-like object that has a ``read()`` method. The ``offset`` value
sets the start offset of the read. If the ``fd`` object does not support
``seek()``, the file will be simply read up until offset, and the read data
discarded.
``length`` argument specifies the amount of data to read. The read is not
done in one go, but in chunks. The size of a chunk is specified using
``chunksize``.
This function is similar to ``bottle._file_iter_range`` but does not fail
on missing ``seek()`` attribute (e.g., ``StringIO`` objects).
"""
try:
fd.seek(offset)
except AttributeError:
# this object does not support ``seek()`` so simply discard the first
# ``offset - 1`` bytes
fd.read(offset - 1)
while length > 0:
chunk = fd.read(min(length, chunksize))
if not chunk:
break
length -= len(chunk)
yield chunk
def send_file(content, filename, size=None, timestamp=None):
"""
Convert file data into an HTTP response.
This method is used when the file data does not exist on disk, such as when
it is dynamically generated.
Because the file does not exist on disk, the basic metadata which is
usually read from the file itself must be supplied as arguments. The
``filename`` argument is the supposed filename of the file data. It is only
used to set the Content-Type header, and you may safely pass in just the
extension with leading period.
The ``size`` argument is the payload size in bytes. For streaming files,
this can be particularly important as the ranges are calculated baed on
content length. If ``size`` is omitted, then support for ranges is not
advertise and ranges are never returned.
``timestamp`` is expected to be in seconds since UNIX epoch, and is used to
calculate Last-Modified HTTP headers, as well as handle If-Modified-Since
header. If omitted, current time is used, and If-Modified-Since is never
checked.
.. note::
The returned response is a completely new response object.
Modifying the reponse object in the current request context is not
going to affect the object returned by this function. You should modify
the object returned by this function instead.
Example::
def some_handler():
import StringIO
f = StringIO.StringIO('foo')
return send_file(f, 'file.txt', 3, 1293281312)
The code is partly based on ``bottle.static_file``, with the main
difference being the use of file-like objects instead of files on disk.
"""
headers = {}
ctype = get_mimetype(filename)
if ctype.startswith('text/'):
# We expect and assume all text files are encoded UTF-8. It's
# user's job to ensure this is true.
ctype += '; charset=UTF-8'
# Set basic headers
headers['Content-Type'] = ctype
if size:
headers['Content-Length'] = size
headers['Last-Modified'] = format_ts(timestamp)
# Check if If-Modified-Since header is in request and respond early if so
if timestamp:
modsince = request.environ.get('HTTP_IF_MODIFIED_SINCE')
modsince = modsince and parse_date(modsince.split(';')[0].strip())
if modsince is not None and modsince >= timestamp:
headers['Date'] = format_ts()
return HTTPResponse(status=304, **headers)
if request.method == 'HEAD':
# Request is a HEAD, so remove any content body
content = ''
if size:
headers['Accept-Ranges'] = 'bytes'
ranges = request.environ.get('HTTP_RANGE')
if ranges and size:
ranges = list(parse_range_header(ranges, size))
if not ranges:
return HTTPError(416, "Request Range Not Satisfiable")
start, end = ranges[0]
headers['Content-Range'] = 'bytes %d-%d/%d' % (start, end - 1, size)
headers['Content-Length'] = str(end - start)
content = iter_read_range(content, start, end - start)
return HTTPResponse(content, **headers)
|
Outernet-Project/bottle-utils
|
bottle_utils/http.py
|
Python
|
bsd-2-clause
| 6,569
|
# -*- coding: utf-8 -*-
import os
import KBEngine
from KBEDebug import *
"""
loginapp进程主要处理KBEngine服务端登陆、创建账号等工作。
目前脚本支持几种功能:
1: 注册账号检查
2:登陆检查
3:自定义socket回调,参考interface中Poller实现
"""
def onLoginAppReady():
"""
KBEngine method.
loginapp已经准备好了
"""
INFO_MSG('onLoginAppReady: bootstrapGroupIndex=%s, bootstrapGlobalIndex=%s' % \
(os.getenv("KBE_BOOTIDX_GROUP"), os.getenv("KBE_BOOTIDX_GLOBAL")))
#KBEngine.addTimer(0.01, 1.0, onTick)
def onTick(timerID):
"""
"""
INFO_MSG('onTick()')
def onLoginAppShutDown():
"""
KBEngine method.
这个loginapp被关闭前的回调函数
"""
INFO_MSG('onLoginAppShutDown()')
def onReuqestLogin(loginName, password, clientType, datas):
"""
KBEngine method.
账号请求登陆时回调
此处还可以对登陆进行排队,将排队信息存放于datas
"""
INFO_MSG('onReuqestLogin() loginName=%s, clientType=%s' % (loginName, clientType))
errorno = KBEngine.SERVER_SUCCESS
if len(loginName) > 64:
errorno = KBEngine.SERVER_ERR_NAME;
if len(password) > 64:
errorno = KBEngine.SERVER_ERR_PASSWORD;
return (errorno, loginName, password, clientType, datas)
def onLoginCallbackFromDB(loginName, accountName, errorno, datas):
"""
KBEngine method.
账号请求登陆后db验证回调
loginName:登录名既登录时客户端输入的名称。
accountName: 账号名则是dbmgr查询得到的名称。
errorno: KBEngine.SERVER_ERR_*
这个机制用于一个账号多名称系统或者多个第三方账号系统登入服务器。
客户端得到baseapp地址的同时也会返回这个账号名称,客户端登陆baseapp应该使用这个账号名称登陆
"""
INFO_MSG('onLoginCallbackFromDB() loginName=%s, accountName=%s, errorno=%s' % (loginName, accountName, errorno))
def onRequestCreateAccount(accountName, password, datas):
"""
KBEngine method.
请求账号创建时回调
"""
INFO_MSG('onRequestCreateAccount() %s' % (accountName))
errorno = KBEngine.SERVER_SUCCESS
if len(accountName) > 64:
errorno = KBEngine.SERVER_ERR_NAME;
if len(password) > 64:
errorno = KBEngine.SERVER_ERR_PASSWORD;
return (errorno, accountName, password, datas)
def onCreateAccountCallbackFromDB(accountName, errorno, datas):
"""
KBEngine method.
账号请求注册后db验证回调
errorno: KBEngine.SERVER_ERR_*
"""
INFO_MSG('onCreateAccountCallbackFromDB() accountName=%s, errorno=%s' % (accountName, errorno))
|
Orav/kbengine
|
assets/scripts/login/kbemain.py
|
Python
|
lgpl-3.0
| 2,617
|
# encoding: utf-8
# module unicodedata
# from (built-in)
# by generator 1.135
"""
This module provides access to the Unicode Character Database which
defines character properties for all Unicode characters. The data in
this database is based on the UnicodeData.txt file version
5.2.0 which is publically available from ftp://ftp.unicode.org/.
The module uses the same names and symbols as defined by the
UnicodeData File Format 5.2.0 (see
http://www.unicode.org/reports/tr44/tr44-4.html).
"""
# no imports
# Variables with simple values
unidata_version = '5.2.0'
# functions
def bidirectional(unichr): # real signature unknown; restored from __doc__
"""
bidirectional(unichr)
Returns the bidirectional class assigned to the Unicode character
unichr as string. If no such value is defined, an empty string is
returned.
"""
pass
def category(unichr): # real signature unknown; restored from __doc__
"""
category(unichr)
Returns the general category assigned to the Unicode character
unichr as string.
"""
pass
def combining(unichr): # real signature unknown; restored from __doc__
"""
combining(unichr)
Returns the canonical combining class assigned to the Unicode
character unichr as integer. Returns 0 if no combining class is
defined.
"""
pass
def decimal(unichr, default=None): # real signature unknown; restored from __doc__
"""
decimal(unichr[, default])
Returns the decimal value assigned to the Unicode character unichr
as integer. If no such value is defined, default is returned, or, if
not given, ValueError is raised.
"""
pass
def decomposition(unichr): # real signature unknown; restored from __doc__
"""
decomposition(unichr)
Returns the character decomposition mapping assigned to the Unicode
character unichr as string. An empty string is returned in case no
such mapping is defined.
"""
pass
def digit(unichr, default=None): # real signature unknown; restored from __doc__
"""
digit(unichr[, default])
Returns the digit value assigned to the Unicode character unichr as
integer. If no such value is defined, default is returned, or, if
not given, ValueError is raised.
"""
pass
def east_asian_width(unichr): # real signature unknown; restored from __doc__
"""
east_asian_width(unichr)
Returns the east asian width assigned to the Unicode character
unichr as string.
"""
pass
def lookup(name): # real signature unknown; restored from __doc__
"""
lookup(name)
Look up character by name. If a character with the
given name is found, return the corresponding Unicode
character. If not found, KeyError is raised.
"""
pass
def mirrored(unichr): # real signature unknown; restored from __doc__
"""
mirrored(unichr)
Returns the mirrored property assigned to the Unicode character
unichr as integer. Returns 1 if the character has been identified as
a "mirrored" character in bidirectional text, 0 otherwise.
"""
pass
def name(unichr, default=None): # real signature unknown; restored from __doc__
"""
name(unichr[, default])
Returns the name assigned to the Unicode character unichr as a
string. If no name is defined, default is returned, or, if not
given, ValueError is raised.
"""
pass
def normalize(form, unistr): # real signature unknown; restored from __doc__
"""
normalize(form, unistr)
Return the normal form 'form' for the Unicode string unistr. Valid
values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'.
"""
pass
def numeric(unichr, default=None): # real signature unknown; restored from __doc__
"""
numeric(unichr[, default])
Returns the numeric value assigned to the Unicode character unichr
as float. If no such value is defined, default is returned, or, if
not given, ValueError is raised.
"""
pass
# classes
from object import object
class UCD(object):
# no doc
def bidirectional(self, unichr): # real signature unknown; restored from __doc__
"""
bidirectional(unichr)
Returns the bidirectional class assigned to the Unicode character
unichr as string. If no such value is defined, an empty string is
returned.
"""
pass
def category(self, unichr): # real signature unknown; restored from __doc__
"""
category(unichr)
Returns the general category assigned to the Unicode character
unichr as string.
"""
pass
def combining(self, unichr): # real signature unknown; restored from __doc__
"""
combining(unichr)
Returns the canonical combining class assigned to the Unicode
character unichr as integer. Returns 0 if no combining class is
defined.
"""
pass
def decimal(self, unichr, default=None): # real signature unknown; restored from __doc__
"""
decimal(unichr[, default])
Returns the decimal value assigned to the Unicode character unichr
as integer. If no such value is defined, default is returned, or, if
not given, ValueError is raised.
"""
pass
def decomposition(self, unichr): # real signature unknown; restored from __doc__
"""
decomposition(unichr)
Returns the character decomposition mapping assigned to the Unicode
character unichr as string. An empty string is returned in case no
such mapping is defined.
"""
pass
def digit(self, unichr, default=None): # real signature unknown; restored from __doc__
"""
digit(unichr[, default])
Returns the digit value assigned to the Unicode character unichr as
integer. If no such value is defined, default is returned, or, if
not given, ValueError is raised.
"""
pass
def east_asian_width(self, unichr): # real signature unknown; restored from __doc__
"""
east_asian_width(unichr)
Returns the east asian width assigned to the Unicode character
unichr as string.
"""
pass
def lookup(self, name): # real signature unknown; restored from __doc__
"""
lookup(name)
Look up character by name. If a character with the
given name is found, return the corresponding Unicode
character. If not found, KeyError is raised.
"""
pass
def mirrored(self, unichr): # real signature unknown; restored from __doc__
"""
mirrored(unichr)
Returns the mirrored property assigned to the Unicode character
unichr as integer. Returns 1 if the character has been identified as
a "mirrored" character in bidirectional text, 0 otherwise.
"""
pass
def name(self, unichr, default=None): # real signature unknown; restored from __doc__
"""
name(unichr[, default])
Returns the name assigned to the Unicode character unichr as a
string. If no name is defined, default is returned, or, if not
given, ValueError is raised.
"""
pass
def normalize(self, form, unistr): # real signature unknown; restored from __doc__
"""
normalize(form, unistr)
Return the normal form 'form' for the Unicode string unistr. Valid
values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'.
"""
pass
def numeric(self, unichr, default=None): # real signature unknown; restored from __doc__
"""
numeric(unichr[, default])
Returns the numeric value assigned to the Unicode character unichr
as float. If no such value is defined, default is returned, or, if
not given, ValueError is raised.
"""
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
unidata_version = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
# variables with complex values
ucd_3_2_0 = None # (!) real value is ''
ucnhash_CAPI = None # (!) real value is ''
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/unicodedata.py
|
Python
|
gpl-2.0
| 8,480
|
# Copyright (C) 2011, CloudCaptive
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.utils import simplejson
from serverside.dao import accounts_dao
from serverside.dao import badges_dao
from entities.accounts import Accounts
from entities.badges import *
from entities.users import *
from entities.logs import *
from entities.counter import *
from google.appengine.ext import db, webapp
from serverside import constants
from serverside.session import Session
from tools.utils import account_login_required
from tools.xss import XssCleaner, XssCleaner
from serverside.fantasm.action import FSMAction, DatastoreContinuationFSMAction
from serverside.fantasm import fsm
import cgi
import logging
import os
import wsgiref.handlers
import string
import datetime
json = simplejson
def stripMilSecs(d):
return datetime.datetime(d.year, d.month, d.day, d.hour, d.minute, d.second)
def stripHours(d):
return datetime.datetime(d.year, d.month, d.day)
class RunAnalytics(webapp.RequestHandler):
def get(self):
now = datetime.datetime.now()
a_day_ago = now - datetime.timedelta(days=1)
context = {}
context['start_time'] = str(stripMilSecs(a_day_ago))
context['end_time'] = str(stripMilSecs(now))
fsm.startStateMachine('CountAwardedBadges', [context])
fsm.startStateMachine('CountAwardedPoints', [context])
fsm.startStateMachine('CountAwardedBadgePoints', [context])
fsm.startStateMachine('CountAPICalls', [context])
VALID_ANALYTICS = ["badges", "badgepoints", "points", "apicalls"]
class GetAnalytics(webapp.RequestHandler):
@account_login_required
def get(self):
current_session = Session().get_current_session(self)
acc = current_session.get_account_entity()
a_type = self.request.get("type")
if a_type not in VALID_ANALYTICS:
return json.dumps({'success':'false'})
values = getattr(self, a_type + "_values")(acc)
self.response.out.write(json.dumps(values))
@account_login_required
def post(self):
current_session = Session().get_current_session(self)
acc = current_session.get_account_entity()
a_type = self.request.get("type")
if a_type not in VALID_ANALYTICS:
return json.dumps({'success':'false'})
values = getattr(self, a_type + "_values")(acc)
self.response.out.write(json.dumps(values))
def badges_values(self, acc):
q = BadgeBatch.all().filter("account_key =", acc.key().name())
values = {"success": "true"}
res = q.fetch(1000)
values['total'] = q.count()
values['entry'] = []
values['badges'] = []
values['numbadges'] = 0
badges = set()
for ii in res:
ent = {'date':ii.date.strftime("%Y-%m-%d"),
'count':str(ii.counter),
'badgeid':ii.badgeid}
values['entry'].append(ent)
badges.add(ii.badgeid)
badges = list(badges)
#badges = badges_dao.get_all_badges_for_account(acc)
for ii in badges:
values['badges'].append(ii)
values['numbadges'] += 1
return values
def badgepoints_values(self, acc):
q = BadgePointsBatch.all().filter("account_key =", acc.key().name())
values = {"success": "true"}
res = q.fetch(1000)
values['total'] = q.count()
values['entry'] = []
values['badges'] = []
values['numbadges'] = 0
badges = set()
for ii in res:
ent = {'date':ii.date.strftime("%Y-%m-%d"),
'count':str(ii.counter),
'badgeid':ii.badgeid}
values['entry'].append(ent)
badges.add(ii.badgeid)
# = badges_dao.get_all_badges_for_account(acc)
badges = list(badges)
for ii in badges:
values['badges'].append(ii)
values['numbadges'] += 1
return values
def points_values(self, acc):
q = PointBatch.all().filter("account_key =", acc.key().name())
values = {"success": "true"}
res = q.fetch(1000)
values['total'] = q.count()
values['entry'] = []
for ii in res:
ent = {'date':ii.date.strftime("%Y-%m-%d"),
'count':str(ii.counter)}
values['entry'].append(ent)
return values
def apicalls_values(self, acc):
q = APICountBatch.all().filter("account_key =", acc.key().name())
values = {"success": "true"}
res = q.fetch(1000)
values['total'] = q.count()
values['entry'] = []
for ii in res:
ent = {'date':ii.date.strftime("%Y-%m-%d"),
'count':str(ii.counter)}
values['entry'].append(ent)
return values
###############################################
# Start of State Machine
# CountAwardedBadges
###############################################
"""
Badge Award Counting State Machine
This class starts a task for each account
"""
class AllAccountsClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
return Accounts.all()
def execute(self, context, obj):
if not obj['result']:
return None
acc = obj['result']
if acc:
context['account_key'] = acc.key().name()
return "peraccount"
"""
Second state for each account's badges to count over
"""
class PerAccountClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
account_key = context['account_key']
account_ref = accounts_dao.get(account_key)
return Badges.all().filter('creator =', account_ref)
def execute(self, context, obj):
if not obj['result']:
return None
ii = obj['result']
context['badgeid'] = ii.theme + '-' + ii.name + '-' + ii.permissions
return "perbadge"
"""
Awarded Badge Counting State Machine
"""
class PerBadgeClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
start_time = datetime.datetime.strptime(context['start_time'], "%Y-%m-%d %H:%M:%S")
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
return Logs.all().filter("account =", context['account_key']).filter("badgeid =", context['badgeid']).filter("event =", "notify_badge").filter("date >", start_time).filter("date <", end_time)
def execute(self, context, obj):
# Create a counter initialized the count to 0
# This way we'll at least know when its a sum of 0
# rather than having nothing to signify that it ran
def tx():
batch_key = context['account_key'] + '-' + \
context['badgeid'] + '-' + \
context['end_time']
batch = BadgeBatch.get_by_key_name(batch_key)
if not batch:
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
batch = BadgeBatch(key_name=batch_key,
badgeid=context['badgeid'],
account_key=context['account_key'],
date=end_time)
batch.put()
if not obj['result']:
return None
db.run_in_transaction(tx)
return "count"
"""
This class spawns a task for each log.
"""
class CountAwardedBadgesClass(FSMAction):
def execute(self, context, obj):
"""Transactionally update our batch counter"""
batch_key = context['account_key'] + '-' + \
context['badgeid'] + '-' + \
context['end_time']
def tx():
batch = BadgeBatch.get_by_key_name(batch_key)
if not batch:
# For whatever reason it was not already created in previous state
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
batch = BadgeBatch(key_name=batch_key,
badgeid=context['badgeid'],
account_key=context['account_key'],
date=end_time)
batch.put()
batch.counter += 1
batch.put()
db.run_in_transaction(tx)
###############################################
# End of State Machine
# CountAwardedBadges
###############################################
###############################################
###############################################
# Start of State Machine
# CountAPICallsInitState
###############################################
"""
API Call Counting State Machine
This class starts a task for each account
"""
class APICallsAllAccountsClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
return Accounts.all()
def execute(self, context, obj):
if not obj['result']:
return None
acc = obj['result']
if acc:
context['account_key'] = acc.key().name()
return "apicallsperaccount"
"""
Second state for each account's api calls for counting
"""
class APICallsPerAccountClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
start_time = datetime.datetime.strptime(context['start_time'],
"%Y-%m-%d %H:%M:%S")
end_time = datetime.datetime.strptime(context['end_time'],
"%Y-%m-%d %H:%M:%S")
return Logs.all().filter("account =", context['account_key']).filter("is_api =", "yes").filter("date >", start_time).filter("date <", end_time)
def execute(self, context, obj):
if not obj['result']:
return None
batch_key = context['account_key'] + '-' + \
context['end_time']
def tx():
batch = APICountBatch.get_by_key_name(batch_key)
if not batch:
# Initialize counter
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
batch = APICountBatch(key_name=batch_key,
account_key=context['account_key'],
date=end_time)
batch.put()
db.run_in_transaction(tx)
return "count"
"""
This class fans in multiple logs
There may be failures in the computation
but its meant only for a rough estimate
"""
class CountAPICallsClass(FSMAction):
def execute(self, contexts, obj):
"""Transactionally update our batch counter"""
allcontext = {}
context_account = {}
context_datetime = {}
batch_key = None
if len(contexts) < 1:
return
for index,ii in enumerate(contexts):
batch_key = ii['account_key'] + '-' + \
ii['end_time']
if batch_key in allcontext:
allcontext[batch_key] += 1
else:
allcontext[batch_key] = 1
context_account[batch_key] = ii['account_key']
end_time = datetime.datetime.strptime(ii['end_time'], "%Y-%m-%d %H:%M:%S")
context_datetime[batch_key] = end_time
def tx(batch_key, count):
batch = APICountBatch.get_by_key_name(batch_key)
if not batch:
batch = APICountBatch(key_name=batch_key,
account_key=context_account[batch_key],
date=context_datetime[batch_key])
batch.put()
batch.counter += count
batch.put()
# if a failure happens while in this loop, numbers will be inflated
for ii in allcontext:
db.run_in_transaction(tx, ii, allcontext[ii])
###############################################
# End of State Machine
# CountAPICallsInitState
###############################################
###############################################
###############################################
# Start of State Machine
# CountPointsInitState
###############################################
"""
Points Counting State Machine
This class starts a task for each account
"""
class PointsAllAccountsClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
return Accounts.all()
def execute(self, context, obj):
if not obj['result']:
return None
acc = obj['result']
if acc:
context['account_key'] = acc.key().name()
return "pointsperaccount"
"""
Second state for each account's points awarded for counting
"""
class PointsPerAccountClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
start_time = datetime.datetime.strptime(context['start_time'],
"%Y-%m-%d %H:%M:%S")
end_time = datetime.datetime.strptime(context['end_time'],
"%Y-%m-%d %H:%M:%S")
return Logs.all().filter("account =", context['account_key']).filter("event =", "awardpoints").filter("date >", start_time).filter("date <", end_time).filter("success", "true")
def execute(self, context, obj):
if not obj['result']:
return None
batch_key = context['account_key'] + '-' + \
context['end_time']
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
context['points'] = obj['result'].points
if not context['points']:
context['points'] = 0
def tx():
batch = PointBatch.get_by_key_name(batch_key)
if not batch:
# Initialize counter
batch = PointBatch(key_name=batch_key,
account_key=context['account_key'],
date=end_time)
batch.put()
db.run_in_transaction(tx)
return "count"
"""
This class fans in multiple logs
If an error occurrs during this processing it is possible to have
inflated numbers.
"""
class CountPointsClass(FSMAction):
def execute(self, contexts, obj):
"""Transactionally update our batch counter"""
allcontext = {}
context_account = {}
context_datetime = {}
batch_key = None
if len(contexts) < 1:
return
for index,ii in enumerate(contexts):
batch_key = ii['account_key'] + '-' + \
ii['end_time']
if batch_key in allcontext:
allcontext[batch_key] += int(ii['points'])
else:
allcontext[batch_key] = int(ii['points'])
context_account[batch_key] = ii['account_key']
end_time = datetime.datetime.strptime(ii['end_time'], "%Y-%m-%d %H:%M:%S")
context_datetime[batch_key] = end_time
def tx(batch_key, count):
batch = PointBatch.get_by_key_name(batch_key)
if not batch:
batch = PointBatch(key_name=batch_key,
account_key=context_account[batch_key],
date=context_datetime[batch_key])
batch.put()
batch.counter += int(count)
batch.put()
# if a failure happens while in this loop, numbers will be inflated
for ii in allcontext:
db.run_in_transaction(tx, ii, allcontext[ii])
###############################################
# End of State Machine
# CountAPICallsInitState
###############################################
###############################################
# Start of State Machine
# CountAwardedBadgePoints
###############################################
"""
Badge Point Award Counting State Machine
This class starts a task for each account
"""
class BadgePointsAllAccountsClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
return Accounts.all()
def execute(self, context, obj):
if not obj['result']:
return None
acc = obj['result']
if acc:
context['account_key'] = acc.key().name()
return "peraccount"
"""
Second state for each account's badges to count over
"""
class BadgePointsPerAccountClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
account_key = context['account_key']
account_ref = accounts_dao.get(account_key)
return Badges.all().filter('creator =', account_ref)
def execute(self, context, obj):
if not obj['result']:
return None
ii = obj['result']
context['badgeid'] = ii.theme + '-' + ii.name + '-' + ii.permissions
return "perbadge"
"""
Awarded Badge Counting State Machine
"""
class PerBadgePointsClass(DatastoreContinuationFSMAction):
def getQuery(self, context, obj):
start_time = datetime.datetime.strptime(context['start_time'], "%Y-%m-%d %H:%M:%S")
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
return Logs.all().filter("account =", context['account_key']).filter("badgeid =", context['badgeid']).filter("api =", "award_badge_points").filter("date >", start_time).filter("date <", end_time)
def execute(self, context, obj):
# Create a counter initialized the count to 0
# This way we'll at least know when its a sum of 0
# rather than having nothing to signify that it ran
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
def tx():
batch_key = context['account_key'] + '-' + \
context['badgeid'] + '-' + \
context['end_time']
batch = BadgePointsBatch.get_by_key_name(batch_key)
if not batch:
batch = BadgePointsBatch(key_name=batch_key,
badgeid=context['badgeid'],
account_key=context['account_key'],
date=end_time)
batch.put()
if not obj['result']:
return None
db.run_in_transaction(tx)
if obj['result'].success == "true":
context['points'] = str(obj['result'].points)
return "count"
else:
return None
"""
This class spawns a task for each log.
"""
class CountAwardedBadgePointsClass(FSMAction):
def execute(self, context, obj):
"""Transactionally update our batch counter"""
batch_key = context['account_key'] + '-' + \
context['badgeid'] + '-' + \
context['end_time']
def tx():
batch = BadgePointsBatch.get_by_key_name(batch_key)
if not batch:
end_time = datetime.datetime.strptime(context['end_time'], "%Y-%m-%d %H:%M:%S")
batch = BadgePointsBatch(key_name=batch_key,
badgeid=context['badgeid'],
account_key=context['account_key'],
date=end_time)
batch.put()
batch.counter += int(context['points'])
batch.put()
db.run_in_transaction(tx)
|
Yonnick/userinfuser
|
serverside/analytics.py
|
Python
|
gpl-3.0
| 18,276
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for benchmark utitilies."""
import tensorflow.compat.v2 as tf
from keras.benchmarks import benchmark_util
class BenchmarkUtilTest(tf.test.TestCase):
def test_get_benchmark_name(self):
name = "benchmark_layer_call__Conv2D_small_shape"
expected = ["Conv2D", "small", "shape"]
out = benchmark_util.get_benchmark_name(name)
self.assertAllEqual(out, expected)
def test_generate_benchmark_params_cpu_gpu(self):
adam_opt = tf.keras.optimizers.Adam()
sgd_opt = tf.keras.optimizers.SGD()
params = [
("Adam", adam_opt, 10),
("SGD", sgd_opt, 10),
]
expected = [
("Adam_CPU", adam_opt, 10),
("SGD_CPU", sgd_opt, 10),
("Adam_GPU", adam_opt, 10),
("SGD_GPU", sgd_opt, 10),
]
out = benchmark_util.generate_benchmark_params_cpu_gpu(params)
self.assertAllEqual(out, expected)
if __name__ == "__main__":
tf.test.main()
|
keras-team/keras
|
keras/benchmarks/benchmark_util_test.py
|
Python
|
apache-2.0
| 1,610
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SE-ResNeXt (50, 101, 152)
# Paper: https://arxiv.org/pdf/1709.01507.pdf
import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization, ReLU, Dense, Add
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Reshape, Multiply, Lambda, Concatenate
def stem(inputs):
""" Construct the Stem Convolution Group
inputs : input vector
"""
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(inputs)
x = BatchNormalization()(x)
x = ReLU()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
def learner(x, groups, cardinality, ratio):
""" Construct the Learner
x : input to the learner
groups : list of groups: filters in, filters out, number of blocks
cardinality: width of group convolution
ratio : amount of filter reduction during squeeze
"""
# First ResNeXt Group (not strided)
filters_in, filters_out, n_blocks = groups.pop(0)
x = group(x, n_blocks, filters_in, filters_out, cardinality=cardinality, ratio=ratio, strides=(1, 1))
# Remaining ResNeXt Groups
for filters_in, filters_out, n_blocks in groups:
x = group(x, n_blocks, filters_in, filters_out, cardinality=cardinality, ratio=ratio)
return x
def group(x, n_blocks, filters_in, filters_out, cardinality, ratio, strides=(2, 2)):
""" Construct a Squeeze-Excite Group
x : input to the group
n_blocks : number of blocks in the group
filters_in : number of filters (channels) at the input convolution
filters_out: number of filters (channels) at the output convolution
ratio : amount of filter reduction during squeeze
strides : whether projection block is strided
"""
# First block is a linear projection block
x = projection_block(x, filters_in, filters_out, strides=strides, cardinality=cardinality, ratio=ratio)
# Remaining blocks are identity links
for _ in range(n_blocks-1):
x = identity_block(x, filters_in, filters_out, cardinality=cardinality, ratio=ratio)
return x
def squeeze_excite_block(x, ratio=16):
""" Construct a Squeeze and Excite block
x : input to the block
ratio : amount of filter reduction during squeeze
"""
# Remember the input
shortcut = x
# Get the number of filters on the input
filters = x.shape[-1]
# Squeeze (dimensionality reduction)
# Do global average pooling across the filters, which will output a 1D vector
x = GlobalAveragePooling2D()(x)
# Reshape into 1x1 feature maps (1x1xC)
x = Reshape((1, 1, filters))(x)
# Reduce the number of filters (1x1xC/r)
x = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(x)
# Excitation (dimensionality restoration)
# Restore the number of filters (1x1xC)
x = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(x)
# Scale - multiply the squeeze/excitation output with the input (WxHxC)
x = Multiply()([shortcut, x])
return x
def identity_block(x, filters_in, filters_out, cardinality=32, ratio=16):
""" Construct a ResNeXT block with identity link
x : input to block
filters_in : number of filters (channels) at the input convolution
filters_out: number of filters (channels) at the output convolution
cardinality: width of cardinality layer
ratio : amount of filter reduction during squeeze
"""
# Remember the input
shortcut = x
# Dimensionality Reduction
x = Conv2D(filters_in, kernel_size=(1, 1), strides=(1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal')(shortcut)
x = BatchNormalization()(x)
x = ReLU()(x)
# Cardinality (Wide) Layer (split-transform)
filters_card = filters_in // cardinality
groups = []
for i in range(cardinality):
group = Lambda(lambda z: z[:, :, :, i * filters_card:i * filters_card + filters_card])(x)
groups.append(Conv2D(filters_card, kernel_size=(3, 3), strides=(1, 1), padding='same',use_bias=False,
kernel_initializer='he_normal')(group))
# Concatenate the outputs of the cardinality layer together (merge)
x = Concatenate()(groups)
x = BatchNormalization()(x)
x = ReLU()(x)
# Dimensionality restoration
x = Conv2D(filters_out, kernel_size=(1, 1), strides=(1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
# Pass the output through the squeeze and excitation block
x = squeeze_excite_block(x, ratio)
# Identity Link: Add the shortcut (input) to the output of the block
x = Add()([shortcut, x])
x = ReLU()(x)
return x
def projection_block(x, filters_in, filters_out, cardinality=32, strides=1, ratio=16):
""" Construct a ResNeXT block with projection shortcut
x : input to the block
filters_in : number of filters (channels) at the input convolution
filters_out: number of filters (channels) at the output convolution
cardinality: width of cardinality layer
strides : whether entry convolution is strided (i.e., (2, 2) vs (1, 1))
ratio : amount of filter reduction during squeeze
"""
# Construct the projection shortcut
# Increase filters by 2X to match shape when added to output of block
shortcut = Conv2D(filters_out, kernel_size=(1, 1), strides=strides,
padding='same', kernel_initializer='he_normal')(x)
shortcut = BatchNormalization()(shortcut)
# Dimensionality Reduction
x = Conv2D(filters_in, kernel_size=(1, 1), strides=(1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Cardinality (Wide) Layer (split-transform)
filters_card = filters_in // cardinality
groups = []
for i in range(cardinality):
group = Lambda(lambda z: z[:, :, :, i * filters_card:i * filters_card + filters_card])(x)
groups.append(Conv2D(filters_card, kernel_size=(3, 3), strides=strides, padding='same', use_bias=False,
kernel_initializer='he_normal')(group))
# Concatenate the outputs of the cardinality layer together (merge)
x = Concatenate()(groups)
x = BatchNormalization()(x)
x = ReLU()(x)
# Dimensionality restoration
x = Conv2D(filters_out, kernel_size=(1, 1), strides=(1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
# Pass the output through the squeeze and excitation block
x = squeeze_excite_block(x, ratio)
# Add the projection shortcut (input) to the output of the block
x = Add()([shortcut, x])
x = ReLU()(x)
return x
def classifier(x, n_classes):
""" Construct the Classifier
x : input to the classifier
n_classes : number of output classes
"""
# Final Dense Outputting Layer
x = GlobalAveragePooling2D()(x)
outputs = Dense(n_classes, activation='softmax', kernel_initializer='he_normal')(x)
return outputs
# Meta-parameter: number of filters in, out and number of blocks
groups = { 50 : [ (128, 256, 3), (256, 512, 4), (512, 1024, 6), (1024, 2048, 3)], # SE-ResNeXt 50
101: [ (128, 256, 3), (256, 512, 4), (512, 1024, 23), (1024, 2048, 3)], # SE-ResNeXt 101
152: [ (128, 256, 3), (256, 512, 8), (512, 1024, 36), (1024, 2048, 3)] # SE-ResNeXt 152
}
# Meta-parameter: width of group convolution
cardinality = 32
# Meta-parameter: Amount of filter reduction in squeeze operation
ratio = 16
# The input tensor
inputs = Input(shape=(224, 224, 3))
# The Stem Group
x = stem(inputs)
# The Learner
x = learner(x, groups[50], cardinality, ratio)
# The Classifier for 1000 classes
outputs = classifier(x, 1000)
# Instantiate the Model
model = Model(inputs, outputs)
|
GoogleCloudPlatform/keras-idiomatic-programmer
|
zoo/senet/se_resnext.py
|
Python
|
apache-2.0
| 8,772
|
"""
Test module for rendering funcweb
(We can remove this module once other modules are instrumented)
"""
import func_module
class EchoTest(func_module.FuncModule):
version = "0.0.1"
api_version = "0.0.1"
description = "Module that all of its methods returns back the same thing it recieves!"
def run_string(self, command):
"""
Run String
"""
return str(command)
def run_int(self,command):
"""
Run Integer
"""
return int(command)
def run_float(self,command):
"""
Run float
"""
return float(command)
def run_options(self,command):
"""
Run options
"""
return str(command)
def run_list(self,command):
"""
Run a list
"""
return command
def run_hash(self,command):
"""
Run hash
"""
return command
def run_boolean(self,command):
"""
Run boolean
"""
return command
def register_method_args(self):
"""
Implementing the argument getter
"""
return {
'run_string':{
'args':
{
'command':{
'type':'string',
'optional':False
}
},
'description':'Returns back a string'
},
'run_int':{
'args':
{
'command':{
'type':'int',
'optional':False
}
},
'description':'Returns back an integer'
},
'run_float':{
'args':
{
'command':{
'type':'float',
'optional':False
},
},
'description':'Returns back a float'
},
'run_options':{
'args':{
'command':{
'type':'string',
'optional':False,
'options':['first_option','second_option','third_option']
},
},
'description':'Getting the status of the service_name'
},
'run_list':{
'args':
{
'command':{
'type':'list',
'optional':False
}
},
'description':'Returns back a list'
},
'run_hash':{
'args':
{
'command':{
'type':'hash',
'optional':False
}
},
'description':'Returns back a hash'
},
'run_boolean':{
'args':
{
'command':{
'type':'boolean',
'optional':False
}
},
'description':'Returns back a boolean'
}
}
|
pombredanne/func
|
func/minion/modules/echo.py
|
Python
|
gpl-2.0
| 3,646
|
#
#
# This module provide a loop delayer modeled as a FIFO list
#
import numpy as np
class LoopDelayer(object):
def __init__(self, delay, step_time, initial_value = np.array([0.0, 0.0])):
'''
initialize the loop delayer.
delay: time that is needed for the loop to run
step: macro_time of the simulation
initial_value: initial value of the loop
'''
if delay < step_time:
raise ValueError(" delay must be >= step_time")
self.delay_steps = int(delay/step_time) - 1
self.fifo_queue = [initial_value] * self.delay_steps
def push_pop(self, value):
'''
push back a value, pop front one
'''
self.fifo_queue.append(value)
return self.fifo_queue.pop(0)
|
Bellaz/ModuleSimulator
|
python/loop_delay.py
|
Python
|
gpl-3.0
| 809
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
# pylint: disable=no-self-argument
@tvm.script.ir_module
class WithInit:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
with T.init():
B[i] = T.float32(0)
B[i] += A[i, j, k]
@tvm.script.ir_module
class WithBranch:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
if (j == 0) and (k == 32):
B[i] = T.float32(0)
B[i] += A[i, j, k]
@tvm.script.ir_module
class InitWithMatchBuffer:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
BB = T.match_buffer(B[i], ())
AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64))
with T.init():
BB[()] = T.float32(0)
BB[()] += AA[j, k]
@tvm.script.ir_module
class BranchWithMatchBuffer:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
BB = T.match_buffer(B[i], ())
AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64))
if (j == 0) and (k == 32):
BB[()] = T.float32(0)
BB[()] += AA[j, k]
def test_lower_reduction():
origin_mod = WithInit
mod = tvm.tir.transform.LowerInitBlock()(origin_mod)
print(mod.script())
tvm.ir.assert_structural_equal(mod, WithBranch, True)
def test_lower_match_buffer():
origin_mod = InitWithMatchBuffer
mod = tvm.tir.transform.LowerInitBlock()(origin_mod)
tvm.ir.assert_structural_equal(mod, BranchWithMatchBuffer, True)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.LowerInitBlock()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod) # LowerInitBlock should do nothing on TE
if __name__ == "__main__":
test_lower_reduction()
test_lower_match_buffer()
test_lower_te()
|
Laurawly/tvm-1
|
tests/python/unittest/test_tir_transform_lower_init_block.py
|
Python
|
apache-2.0
| 3,928
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _BaseLazyModule, is_tf_available, is_torch_available
_import_structure = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
if is_torch_available():
_import_structure["modeling_transfo_xl"] = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
if is_tf_available():
_import_structure["modeling_tf_transfo_xl"] = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
if is_torch_available():
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
if is_tf_available():
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
|
huggingface/pytorch-transformers
|
src/transformers/models/transfo_xl/__init__.py
|
Python
|
apache-2.0
| 3,221
|
# Vortex panel method module
import numpy
from matplotlib import pyplot
# velocity component functions
def get_u( x, y, S, gamma ):
return gamma/(2*numpy.pi)*(numpy.arctan((x-S)/y)-numpy.arctan((x+S)/y))
def get_v( x, y, S, gamma ):
return gamma/(4*numpy.pi)*(numpy.log(((x+S)**2+y**2)/((x-S)**2+y**2)))
# vortex panel class
class Panel:
# save the inputs and pre-compute factors for the coordinate tranform
def __init__( self, x0, y0, x1, y1, gamma=0 ):
self.x,self.y,self.gamma = [x0,x1],[y0,y1],gamma
self.xc = 0.5*(x0+x1) # panel x-center
self.yc = 0.5*(y0+y1) # panel y-center
self.S = numpy.sqrt( # ...
(x1-self.xc)**2+(y1-self.yc)**2) # panel width
self.sx = (x1-self.xc)/self.S # unit vector in x
self.sy = (y1-self.yc)/self.S # unit vector in y
# get the velocity!
def velocity( self, x, y, gamma=None ):
if gamma is None: gamma = self.gamma # default gamma
xp,yp = self.transform_xy( x, y ) # transform
up = get_u( xp, yp, self.S, gamma ) # get u prime
vp = get_v( xp, yp, self.S, gamma ) # get v prime
return self.rotate_uv( up, vp ) # rotate back
# plot the panel
def plot(self):
return pyplot.plot(self.x,self.y,'k-',lw=2)
# transform from global to panel coordinates
def transform_xy( self, x, y ):
xt = x-self.xc # shift x
yt = y-self.yc # shift y
xp = xt*self.sx+yt*self.sy # rotate x
yp = yt*self.sx-xt*self.sy # rotate y
return [ xp, yp ]
# rotate velocity back to global coordinates
def rotate_uv( self, up, vp):
u = up*self.sx-vp*self.sy # reverse rotate u prime
v = vp*self.sx+up*self.sy # reverse rotate v prime
return [ u, v ]
# plot the flow on a grid
def plot_flow(panels,alpha=0,xmax=2,N_grid=100):
# define the grid
X = numpy.linspace(-xmax, xmax, N_grid) # computes a 1D-array for x
Y = numpy.linspace(-xmax, xmax, N_grid) # computes a 1D-array for y
x, y = numpy.meshgrid(X, Y) # generates a mesh grid
# get the uniform velocity on the grid
u = numpy.cos(alpha)*numpy.ones((N_grid,N_grid))
v = numpy.sin(alpha)*numpy.ones((N_grid,N_grid))
# add the velocity contribution from each panel
for p in panels:
u0,v0 = p.velocity(x,y)
u = u+u0
v = v+v0
# plot it
pyplot.figure(figsize=(8,11)) # set size
pyplot.xlabel('x', fontsize=16) # label x
pyplot.ylabel('y', fontsize=16) # label y
m = numpy.sqrt(u**2+v**2) # compute velocity magnitude
velocity = pyplot.contourf(x, y, m) # plot magnitude contours
cbar = pyplot.colorbar(velocity, orientation='horizontal')
cbar.set_label('Velocity magnitude', fontsize=16);
pyplot.quiver(x[::4,::4], y[::4,::4],
u[::4,::4], v[::4,::4]) # plot vector field
# pyplot.streamplot(x, y, u, v) # plots streamlines - this is slow!
for p in panels: p.plot()
# define the influence of panel_j on panel_i
def influence(panel_i,panel_j):
u,v = panel_j.velocity(panel_i.xc,panel_i.yc,gamma=1)
return u*panel_i.sx+v*panel_i.sy
# construct the linear system
def construct_A_b(panels,alpha=0):
# construct matrix
N_panels = len(panels)
A = numpy.empty((N_panels, N_panels), dtype=float) # empty matrix
numpy.fill_diagonal(A, 0.5) # fill diagonal with 1/2
for i, p_i in enumerate(panels):
for j, p_j in enumerate(panels):
if i != j: # off-diagonals
A[i,j] = influence(p_i,p_j) # find influence
# computes the RHS
b = [-numpy.cos(alpha)*p.sx-numpy.sin(alpha)*p.sy for p in panels]
return [A,b]
# determine the vortex strength on a set of panels
def solve_gamma(panels,alpha=0):
A,b = construct_A_b(panels,alpha) # construct linear system
gamma = numpy.linalg.solve(A, b) # solve for gamma!
for i,p_i in enumerate(panels):
p_i.gamma = gamma[i] # update panels
# determine the vortex panel strength with Kutta Condition
def solve_gamma_kutta(panels,alpha=0):
A,b = construct_A_b(panels,alpha) # construct linear system
A[:, 0] += 1 # gamma[0]+ ...
A[:,-1] += 1 # gamma[N-1]=0
gamma = numpy.linalg.solve(A, b) # solve for gamma!
for i,p_i in enumerate(panels):
p_i.gamma = gamma[i] # update panels
### Geometries
# make a circle
def make_circle(N,t_c=1):
# define the end-points of the panels
x_ends = numpy.cos(numpy.linspace(0, -2*numpy.pi, N+1))
y_ends = numpy.sin(numpy.linspace(0, -2*numpy.pi, N+1))
y_ends *= t_c
# define the panels
circle = numpy.empty(N, dtype=object)
for i in xrange(N):
circle[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1])
return circle
# make a jukowski foil
def make_jukowski( N, dx = 0.2, dy = 0, dr = 0 ):
# define the circle
x_ends = numpy.cos(numpy.linspace(0, -2*numpy.pi, N+1))
y_ends = numpy.sin(numpy.linspace(0, -2*numpy.pi, N+1))
# shift circle
r = numpy.sqrt((1+dx)**2+dy**2)+dr
x2_ends = r*x_ends-dx
y2_ends = r*y_ends-dy
r2_ends = x2_ends**2+y2_ends**2
# apply jukowski mapping
x3_ends = x2_ends*(1+1./r2_ends)/2
y3_ends = y2_ends*(1-1./r2_ends)/2
# define the panels
foil = numpy.empty(N, dtype=object)
for i in xrange(N):
foil[i] = Panel(x3_ends[i], y3_ends[i], x3_ends[i+1], y3_ends[i+1])
return foil
|
ultiyuan/test0
|
lessons/VortexPanel.py
|
Python
|
gpl-2.0
| 5,764
|
from lxml import etree
from lxml.html import fromstring, tostring
import cookielib
import mechanize
class XRoxy(object):
def __init__(self):
self._br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
self._br.set_cookiejar(cj)
# Browser options
self._br.set_handle_equiv(True)
self._br.set_handle_gzip(True)
self._br.set_handle_redirect(True)
self._br.set_handle_referer(True)
self._br.set_handle_robots(False)
self._br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0')]
def fetch_proxies(self, limit_page=5, country='BR', ssl=''):
for page in xrange(limit_page):
url = 'http://www.xroxy.com/proxylist.php?port=&type=&ssl=%s&country=%s&latency=&reliability=&sort=reliability&desc=true&pnum=%d#table' % (ssl, country, page)
resp = self._br.open(url)
read = resp.read()
#print read
tree = etree.HTML(read)
tds = tree.xpath('/html/body/div[1]/div[2]/table[1]/tr/td')
for td in tds:
if td.text is not None and len(td.text) > 0:
print td.text
|
LeoCBS/xroxy-rest
|
xroxy.py
|
Python
|
mit
| 1,411
|
from nltk import word_tokenize
from marmot.features.feature_extractor import FeatureExtractor
from marmot.exceptions.no_data_error import NoDataError
class PseudoReferenceFeatureExtractor(FeatureExtractor):
'''
A feature that extracts the pseudo-reference feature
for pseudo-references provided in a file
(as an alternative to GoogleTranslateFeatureExtractor)
'''
def __init__(self, ref_file):
self.pseudo_references = []
for line in open(ref_file):
self.pseudo_references.append(word_tokenize(line[:-1].decode('utf-8')))
def get_features(self, context_obj):
if 'sentence_id' not in context_obj:
raise NoDataError('sentence_id', context_obj, 'PseudoReferenceFeatureExtractor')
out = 1 if context_obj['token'] in self.pseudo_references[context_obj['sentence_id']] else 0
return [out]
def get_feature_names(self):
return ["pseudo-reference"]
|
qe-team/marmot
|
marmot/features/pseudo_reference_feature_extractor.py
|
Python
|
isc
| 953
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.